From 2bd21d9e3e270acbe5fdebe2d77dfef6c2fce369 Mon Sep 17 00:00:00 2001 From: Ibrahim Mohamed Date: Thu, 20 Nov 2025 23:27:03 -0500 Subject: [PATCH] Update to version v4.0.0 --- .gitignore | 13 + CHANGELOG.md | 16 + NOTICE.txt | 37 +- README.md | 4 +- deployment/build-s3-dist.sh | 90 + deployment/ecr/.dockerignore | 45 + .../ecr/gaab-strands-agent/.dockerignore | 23 + deployment/ecr/gaab-strands-agent/Dockerfile | 66 + deployment/ecr/gaab-strands-agent/README.md | 165 + .../ecr/gaab-strands-agent/pyproject.toml | 96 + .../scripts/build-container.sh | 286 + .../gaab-strands-agent/scripts/deploy-ecr.sh | 388 + .../scripts/run_unit_tests.sh | 401 + .../src/configurable_agent.py | 249 + deployment/ecr/gaab-strands-agent/src/main.py | 205 + .../ecr/gaab-strands-agent/test/conftest.py | 40 + .../ecr/gaab-strands-agent/test/run_tests.py | 29 + .../test/test_configurable_agent.py | 1106 +++ .../test/test_ddb_helper.py | 362 + .../test/test_integration.py | 545 ++ .../test/test_main_memory.py | 132 + .../test/test_mcp_tools_loader.py | 727 ++ .../gaab-strands-agent/test/test_models.py | 744 ++ .../test/test_runtime_mcp_discovery.py | 427 + .../test/test_strands_tools_registry.py | 265 + .../test/test_tools_manager.py | 943 ++ deployment/ecr/gaab-strands-agent/uv.lock | 3002 ++++++ deployment/ecr/gaab-strands-common/README.md | 47 + .../ecr/gaab-strands-common/pyproject.toml | 32 + deployment/ecr/gaab-strands-common/pytest.ini | 12 + .../scripts/run_unit_tests.sh | 36 + .../src/gaab_strands_common/__init__.py | 104 + .../src/gaab_strands_common/base_agent.py | 112 + .../src/gaab_strands_common/constants.py | 16 + .../custom_tools/__init__.py | 81 + .../custom_tools/s3_file_reader.py | 226 + .../custom_tools/setup/__init__.py | 22 + .../custom_tools/setup/base_tool.py | 127 + .../custom_tools/setup/decorators.py | 62 + .../custom_tools/setup/metadata.py | 46 + .../custom_tools/setup/registry.py | 184 + .../src/gaab_strands_common/ddb_helper.py | 132 + .../gaab_strands_common/mcp_tools_loader.py | 334 + .../src/gaab_strands_common/models.py | 374 + .../multimodal/__init__.py | 14 + .../multimodal/file_handler.py | 287 + .../multimodal/multimodal_processor.py | 71 + .../gaab_strands_common/runtime_streaming.py | 237 + .../strands_tools_registry.py | 245 + .../src/gaab_strands_common/tool_wrapper.py | 445 + .../src/gaab_strands_common/tools_manager.py | 445 + .../src/gaab_strands_common/utils/__init__.py | 42 + .../gaab_strands_common/utils/constants.py | 36 + .../src/gaab_strands_common/utils/helpers.py | 263 + .../ecr/gaab-strands-common/test/README.md | 113 + .../ecr/gaab-strands-common/test/__init__.py | 6 + .../ecr/gaab-strands-common/test/conftest.py | 132 + .../test/custom_tools/test_base_tool.py | 181 + .../test/custom_tools/test_decorators.py | 99 + .../test/custom_tools/test_metadata.py | 58 + .../test/custom_tools/test_registry.py | 122 + .../test/custom_tools/test_s3_file_reader.py | 368 + .../test/multimodal/test_file_handler.py | 657 ++ .../multimodal/test_multimodal_processor.py | 214 + .../gaab-strands-common/test/pytest_plugin.py | 29 + .../test/test_base_agent.py | 251 + .../gaab-strands-common/test/test_helpers.py | 117 + .../gaab-strands-common/test/test_models.py | 559 ++ .../test/test_runtime_streaming.py | 481 + .../test/test_tool_wrapper.py | 491 + .../test/utils/test_helpers.py | 234 + deployment/ecr/gaab-strands-common/uv.lock | 1265 +++ .../gaab-strands-workflow-agent/.dockerignore | 23 + .../gaab-strands-workflow-agent/Dockerfile | 66 + .../ecr/gaab-strands-workflow-agent/README.md | 75 + .../pyproject.toml | 78 + .../scripts/build-container.sh | 290 + .../scripts/deploy-ecr.sh | 388 + .../scripts/run_unit_tests.sh | 380 + .../src/agents_loader.py | 286 + .../gaab-strands-workflow-agent/src/main.py | 290 + .../src/workflow_agent.py | 459 + .../test/conftest.py | 189 + .../test/test_agentcore_integration.py | 829 ++ .../test/test_agents_loader.py | 542 ++ .../test/test_workflow_agent.py | 714 ++ .../test/test_workflow_agent_memory.py | 230 + .../ecr/gaab-strands-workflow-agent/uv.lock | 2987 ++++++ deployment/run-unit-tests.sh | 62 + source/.prettierignore | 9 - source/.prettierrc.yml | 14 - .../infrastructure/bin/gen-ai-app-builder.ts | 28 +- source/infrastructure/cdk.json | 4 +- .../lib/api/base-rest-endpoint.ts | 9 +- .../deployment-platform-rest-api-helper.ts | 369 + .../api/deployment-platform-rest-endpoint.ts | 437 +- .../api/model-schema/deploy-usecase-body.ts | 739 -- .../agents/deploy-agent-usecase-body.ts | 25 + .../agents/deploy-agent-usecase-response.ts | 14 + .../agents/params/agent-core-params.ts | 77 + .../agents/update-agent-usecase-body.ts | 24 + .../agents/update-agent-usecase-response.ts | 14 + .../deployments/base-usecase-schema.ts | 70 + .../deployments/deploy-usecase-body.ts | 139 + .../deploy-usecase-response.ts | 6 +- .../mcp/deploy-mcp-usecase-body.ts | 85 + .../mcp/deploy-mcp-usecase-response.ts | 20 + .../mcp/params/mcp-gateway-params.ts | 250 + .../mcp/update-mcp-usecase-body.ts | 84 + .../mcp/update-mcp-usecase-response.ts | 20 + .../deployments/mcp/upload-schema-body.ts | 53 + .../deployments/mcp/upload-schema-response.ts | 54 + .../deployments/update-usecase-body.ts | 108 + .../update-usecase-response.ts | 7 +- .../workflows/deploy-workflow-usecase-body.ts | 31 + .../deploy-workflow-usecase-response.ts | 20 + .../workflows/params/workflow-core-params.ts | 95 + .../workflows/update-workflow-usecase-body.ts | 29 + .../update-workflow-usecase-response.ts | 20 + .../{ => feedback}/feedback-body.ts | 10 +- .../lib/api/model-schema/index.ts | 66 + .../multimodal/files-delete-request-body.ts | 47 + .../multimodal/files-delete-response-body.ts | 60 + .../multimodal/files-get-response-body.ts | 23 + .../multimodal/files-upload-request-body.ts | 47 + .../multimodal/files-upload-response-body.ts | 74 + .../shared/agent-memory-params.ts | 17 + .../api/model-schema/shared/auth-params.ts | 51 + .../shared/knowledge-base-params.ts | 321 + .../lib/api/model-schema/shared/llm-params.ts | 506 + .../lib/api/model-schema/shared/vpc-params.ts | 147 + .../api/model-schema/update-usecase-body.ts | 578 -- .../lib/api/rest-request-processor.ts | 18 + .../lib/api/use-case-rest-endpoint-setup.ts | 25 +- .../lib/api/websocket-endpoint.ts | 37 +- .../lib/api/websocket-request-processor.ts | 38 +- .../infrastructure/lib/auth/cognito-setup.ts | 46 +- .../lib/auth/component-cognito-app-client.ts | 134 + .../infrastructure/lib/bedrock-agent-stack.ts | 58 +- .../infrastructure/lib/bedrock-chat-stack.ts | 2 +- .../lib/deployment-platform-stack.ts | 186 +- .../lib/feedback/feedback-setup-stack.ts | 2 +- .../lib/framework/application-setup.ts | 19 +- .../lib/framework/base-stack.ts | 146 +- .../lib/framework/text-use-case-stack.ts | 48 +- .../lib/framework/use-case-stack.ts | 424 +- .../infrastructure/lib/layers/runtime-libs.ts | 4 +- source/infrastructure/lib/mcp-server-stack.ts | 474 + .../lib/metrics/use-case-dashboard.ts | 332 + .../lib/multimodal/multimodal-setup.ts | 586 ++ .../lib/sagemaker-chat-stack.ts | 2 +- .../lib/storage/chat-storage-stack.ts | 4 +- .../deployment-platform-storage-setup.ts | 127 +- .../cfn-deploy-role-factory.ts | 698 ++ .../use-case-management/management-stack.ts | 1026 +- .../lib/use-case-management/setup.ts | 29 +- .../agent-core/agent-builder-stack.ts | 153 + .../agent-core/agent-core-base-stack.ts | 950 ++ .../components/agent-execution-role.ts | 445 + .../components/agent-invocation-lambda.ts | 140 + .../components/agent-memory-deployment.ts | 119 + .../components/agent-runtime-deployment.ts | 227 + .../components/ecr-pull-through-cache.ts | 144 + .../agent-core/utils/image-uri-resolver.ts | 466 + .../agent-core/workflow-stack.ts | 214 + .../lib/utils/app-registry-aspects.ts | 182 - .../infrastructure/lib/utils/common-utils.ts | 102 +- source/infrastructure/lib/utils/constants.ts | 156 +- .../lib/utils/custom-infra-setup.ts | 36 +- .../lib/utils/solution-helper.ts | 18 +- .../lib/vpc/agent-builder-vpc.ts | 55 + source/infrastructure/lib/vpc/vpc-setup.ts | 17 + source/infrastructure/package-lock.json | 116 +- source/infrastructure/package.json | 13 +- ...eployment-platform-rest-api-helper.test.ts | 461 + .../deployment-platform-rest-endpoint.test.ts | 347 +- .../model-schema/deploy-usecase-body.test.ts | 1737 ---- .../agents/deploy-agent-usecase-body.test.ts | 312 + .../agents/params/agent-core-params.test.ts | 314 + .../agents/params/agent-memory-params.test.ts | 54 + .../agents/update-agent-usecase-body.test.ts | 262 + .../deployments/deploy-usecase-body.test.ts | 431 + .../mcp/deploy-mcp-usecase-body.test.ts | 335 + .../mcp/params/mcp-gateway-params.test.ts | 1026 ++ .../mcp/update-mcp-usecase-body.test.ts | 381 + .../mcp/upload-schema-body.test.ts | 348 + .../mcp/upload-schema-response.test.ts | 362 + .../deployments/update-usecase-body.test.ts | 352 + .../deploy-workflow-usecase-body.test.ts | 802 ++ .../params/workflow-core-params.test.ts | 544 ++ .../update-workflow-usecase-body.test.ts | 426 + .../feedback/feedback-body.test.ts | 198 + .../files-delete-request-body.test.ts | 212 + .../files-delete-response-body.test.ts | 487 + .../files-get-response-body.test.ts | 192 + .../files-upload-request-body.test.ts | 330 + .../files-upload-response-body.test.ts | 698 ++ .../model-schema/shared/auth-params.test.ts | 248 + .../bedrock-params.test.ts} | 974 +- .../model-schema/shared/llm-params.test.ts | 948 ++ .../api/model-schema/{ => shared}/utils.ts | 2 +- .../model-schema/shared/vpc-params.test.ts | 278 + .../test/api/rest-request-processor.test.ts | 7 +- .../test/api/use-case-rest-endpoint.test.ts | 11 +- .../test/api/websocket-endpoint.test.ts | 1 + .../api/websocket-request-processor.test.ts | 76 +- .../auth/component-cognito-app-client.test.ts | 311 + .../deployment-platform-cognito-setup.test.ts | 41 +- .../test/bedrock-agent-stack.test.ts | 57 + .../test/bedrock-chat-stack.test.ts | 91 + .../test/deployment-platform-stack.test.ts | 302 +- .../test/framework/application-setup.test.ts | 29 +- .../test/framework/use-case-stack.test.ts | 415 +- .../test/mcp-server-stack.test.ts | 667 ++ .../test/mock-lambda-func/.gitignore | 6 +- .../node-lambda/package-lock.json | 4 +- .../mock-lambda-func/node-lambda/package.json | 2 +- .../python-lambda/pyproject.toml | 2 +- .../typescript-lambda/package-lock.json | 17 +- .../typescript-lambda/package.json | 2 +- .../test/mock-ui/package-lock.json | 4 +- .../infrastructure/test/mock-ui/package.json | 2 +- .../test/multimodal/multimodal-setup.test.ts | 779 ++ .../test/storage/chat-storage-stack.test.ts | 2 +- .../deployment-platform-storage-setup.test.ts | 162 +- .../deployment-platform-storage-stack.test.ts | 2 +- .../cfn-deploy-role-factory.test.ts | 391 + .../management-stack.test.ts | 1084 +-- .../agent-core/agent-builder-stack.test.ts | 1531 +++ .../agent-core/agent-core-base-stack.test.ts | 120 + .../components/agent-execution-role.test.ts | 1053 +++ .../agent-invocation-lambda.test.ts | 181 + .../agent-memory-deployment.test.ts | 106 + .../agent-runtime-deployment.test.ts | 383 + .../components/ecr-pull-through-cache.test.ts | 473 + .../utils/image-uri-resolver.test.ts | 433 + .../agent-core/workflow-stack.test.ts | 630 ++ .../test/utils/app-registry.test.ts | 188 - .../test/utils/common-utils.test.ts | 10 +- .../test/utils/custom-infra-setup.test.ts | 19 +- .../test/utils/solution-helper.test.ts | 31 +- .../test/vpc/custom-vpc.test.ts | 4 +- source/lambda/agentcore-invocation/handler.py | 523 + .../lambda/agentcore-invocation/poetry.lock | 1148 +++ .../agentcore-invocation/pyproject.toml | 50 + .../agentcore-invocation/test/__init__.py | 2 + .../test/test_agentcore_client.py | 587 ++ .../test/test_event_processor.py | 668 ++ .../test/test_handler_integration.py | 1111 +++ .../agentcore-invocation/test/test_helper.py | 337 + .../test/test_keep_alive_manager.py | 300 + .../agentcore-invocation/utils/__init__.py | 9 + .../utils/agentcore_client.py | 478 + .../agentcore-invocation/utils/constants.py | 58 + .../utils/event_processor.py | 172 + .../agentcore-invocation/utils/helper.py | 115 + .../utils/keep_alive_manager.py | 343 + .../utils/websocket_error_handler.py | 106 + source/lambda/chat/poetry.lock | 56 +- source/lambda/chat/pyproject.toml | 2 +- .../custom-authorizer/package-lock.json | 43 +- source/lambda/custom-authorizer/package.json | 6 +- .../custom-authorizer/rest-authorizer.ts | 3 +- .../custom-authorizer/test/authorizer.test.ts | 4 + source/lambda/custom-authorizer/tsconfig.json | 3 + .../custom-authorizer/utils/get-policy.ts | 4 +- source/lambda/custom-resource/lambda_func.py | 27 +- .../custom-resource/lambda_ops_metrics.py | 8 +- .../operations/agentcore_oauth_client.py | 149 + .../agentcore_outbound_permissions.py | 276 + .../operations/deploy_agent_core.py | 662 ++ .../operations/deploy_agent_core_memory.py | 293 + .../operations/deploy_mcp_gateway.py | 81 + .../operations/deploy_mcp_runtime.py | 73 + .../operations/gen_ecr_repo_prefix.py | 135 + .../get_arns_for_inference_profile.py | 64 +- .../operations/lambda_version_generator.py | 49 + .../multimodal_bucket_notifications.py | 62 + .../operations/operation_types.py | 13 +- .../{anonymous_metrics.py => send_metrics.py} | 83 +- .../custom-resource/operations/shared.py | 96 + .../custom-resource/operations/sleep.py | 65 + source/lambda/custom-resource/poetry.lock | 576 +- source/lambda/custom-resource/pyproject.toml | 3 +- .../fixtures/agentcore_oauth_client_events.py | 22 + .../agentcore_outbound_permissions_events.py | 21 + .../fixtures/gen_ecr_repo_prefix_events.py | 18 + .../lambda_version_generator_events.py | 19 + ...us_metrics_events.py => metrics_events.py} | 222 +- .../operations/test_agentcore_oauth_client.py | 118 + .../test_agentcore_outbound_permissions.py | 307 + .../test/operations/test_deploy_agent_core.py | 1317 +++ .../test_deploy_agent_core_memory.py | 192 + .../operations/test_deploy_mcp_gateway.py | 188 + .../operations/test_deploy_mcp_runtime.py | 186 + .../operations/test_gen_ecr_repo_prefix.py | 180 + .../test_get_arns_for_inference_profile.py | 280 +- .../test_lambda_version_generator.py | 148 + .../test_multimodal_bucket_notifications.py | 127 + ...onymous_metric.py => test_send_metrics.py} | 306 +- .../test/operations/test_shared.py | 117 +- .../custom-resource/test/test_lambda_func.py | 7 +- .../test/test_lambda_ops_metrics.py | 26 +- .../test/utils/test_agentcore_mcp.py | 111 + .../test/utils/test_auth_manager.py | 211 + .../custom-resource/test/utils/test_data.py | 197 + .../test/utils/test_gateway_mcp.py | 350 + .../test/utils/test_lambda_target_creator.py | 86 + .../test/utils/test_mcp_config_manager.py | 770 ++ .../test/utils/test_mcp_factory.py | 135 + .../test/utils/test_metrics.py | 6 +- .../test/utils/test_metrics_payload.py | 2 +- .../test/utils/test_openapi_target_creator.py | 150 + .../test/utils/test_policy_manager.py | 556 ++ .../test/utils/test_runtime_mcp.py | 256 + .../test/utils/test_smithy_target_creator.py | 55 + .../custom-resource/utils/agent_core_utils.py | 89 + .../custom-resource/utils/agentcore_mcp.py | 103 + .../custom-resource/utils/auth_manager.py | 159 + .../lambda/custom-resource/utils/constants.py | 18 +- source/lambda/custom-resource/utils/data.py | 88 +- .../custom-resource/utils/gateway_mcp.py | 323 + .../utils/lambda_target_creator.py | 44 + .../utils/mcp_config_manager.py | 267 + .../custom-resource/utils/mcp_factory.py | 107 + .../lambda/custom-resource/utils/metrics.py | 1 + .../custom-resource/utils/metrics_payload.py | 78 + .../custom-resource/utils/metrics_schema.py | 41 + .../utils/openapi_target_creator.py | 110 + .../custom-resource/utils/policy_manager.py | 306 + .../custom-resource/utils/runtime_mcp.py | 205 + .../utils/smithy_target_creator.py | 39 + .../lambda/ext-idp-group-mapper/poetry.lock | 32 +- .../ext-idp-group-mapper/pyproject.toml | 2 +- .../feedback-management/package-lock.json | 25 +- .../lambda/feedback-management/package.json | 6 +- .../conversation-retrieval-service.ts | 6 +- .../services/dynamodb-use-case-retriever.ts | 6 +- .../services/feedback-storage-service.ts | 5 +- .../feedback-management/test/index.test.ts | 7 +- .../conversation-retrieval-service.test.ts | 4 + .../dynamodb-use-case-retriever.test.ts | 4 + .../services/feedback-storage-service.test.ts | 4 + .../lambda/feedback-management/tsconfig.json | 5 +- .../utils/http-response-formatters.ts | 2 +- source/lambda/files-management/index.ts | 115 + source/lambda/files-management/jest.config.js | 26 + .../files-management/models/file-command.ts | 249 + .../files-management/models/files-factory.ts | 132 + .../lambda/files-management/models/types.ts | 122 + .../lambda/files-management/package-lock.json | 5322 +++++++++++ source/lambda/files-management/package.json | 40 + .../files-management/power-tools-init.ts | 16 + .../services/ddb-config-service.ts | 112 + .../services/ddb-metadata-service.ts | 269 + .../services/s3-presigned-url-service.ts | 169 + .../test/commands/file-command.test.ts | 1176 +++ .../test/factories/files-factory.test.ts | 499 + .../files-management/test/index.test.ts | 438 + .../test/services/ddb-config-service.test.ts | 195 + .../services/ddb-metadata-service.test.ts | 485 + .../services/s3-presigned-url-service.test.ts | 333 + .../files-management/test/utils/error.test.ts | 31 + .../utils/http-response-formatter.test.ts | 103 + .../test/utils/multimodal-cache.test.ts | 226 + .../files-management/test/utils/utils.test.ts | 554 ++ .../test/validators/file-validator.test.ts | 213 + .../validators/request-validators.test.ts | 258 + source/lambda/files-management/tsconfig.json | 85 + .../files-management/utils/constants.ts | 106 + source/lambda/files-management/utils/error.ts | 9 + .../utils/http-response-formatters.ts | 75 + .../utils/multimodal-cache.ts | 90 + source/lambda/files-management/utils/utils.ts | 275 + .../validators/file-validator.ts | 99 + .../validators/request-validators.ts | 32 + .../lambda/files-metadata-management/index.ts | 122 + .../files-metadata-management/jest.config.js | 22 + .../files-metadata-management/models/types.ts | 54 + .../package-lock.json | 5960 ++++++++++++ .../files-metadata-management/package.json | 43 + .../power-tools-init.ts | 16 + .../test/index.test.ts | 632 ++ .../test/power-tools-init.test.ts | 55 + .../test/utils/error.test.ts | 31 + .../test/utils/eventbridge-processor.test.ts | 501 + .../test/utils/file-validator.test.ts | 771 ++ .../utils/http-response-formatter.test.ts | 103 + .../test/utils/metadata-validator.test.ts | 416 + .../test/utils/utils.test.ts | 263 + .../files-metadata-management/tsconfig.json | 76 + .../utils/constants.ts | 104 + .../files-metadata-management/utils/error.ts | 12 + .../utils/eventbridge-processor.ts | 215 + .../utils/file-validator.ts | 242 + .../utils/http-response-formatters.ts | 57 + .../utils/metadata-validator.ts | 124 + .../files-metadata-management/utils/utils.ts | 240 + source/lambda/invoke-agent/poetry.lock | 32 +- source/lambda/invoke-agent/pyproject.toml | 2 +- .../aws-node-user-agent-config/.gitignore | 2 - .../aws-node-user-agent-config/jest.config.js | 26 + .../package-lock.json | 43 +- .../aws-node-user-agent-config/package.json | 6 +- .../aws-sdk-lib/aws-client-manager.test.ts | 76 + .../layers/aws-sdk-lib/aws-client-manager.ts | 50 + source/lambda/layers/aws-sdk-lib/index.ts | 4 + .../lambda/layers/aws-sdk-lib/jest.config.js | 9 + .../layers/aws-sdk-lib/package-lock.json | 8385 +++++++++++++---- source/lambda/layers/aws-sdk-lib/package.json | 42 +- .../lambda/layers/aws-sdk-lib/tsconfig.json | 22 + source/lambda/layers/aws_boto3/poetry.lock | 26 +- source/lambda/layers/aws_boto3/pyproject.toml | 6 +- .../layers/custom_boto3_init/poetry.lock | 30 +- .../layers/custom_boto3_init/pyproject.toml | 2 +- source/lambda/layers/langchain/poetry.lock | 24 +- source/lambda/layers/langchain/pyproject.toml | 4 +- source/lambda/model-info/index.ts | 4 +- source/lambda/model-info/models/types.ts | 11 + source/lambda/model-info/package-lock.json | 25 +- source/lambda/model-info/package.json | 6 +- source/lambda/model-info/tsconfig.json | 3 + .../model-info/utils/model-info-retriever.ts | 3 +- source/lambda/use-case-details/index.ts | 8 +- .../lambda/use-case-details/package-lock.json | 25 +- source/lambda/use-case-details/package.json | 6 +- .../use-case-details/test/index.test.ts | 325 +- .../use-case-details/test/utils/utils.test.ts | 139 + source/lambda/use-case-details/tsconfig.json | 3 + .../use-case-details/utils/constants.ts | 5 + source/lambda/use-case-details/utils/utils.ts | 8 +- .../use-case-management/agents-handler.ts | 121 + .../cfn/stack-management.ts | 9 +- .../cfn/stack-operation-builder.ts | 7 +- .../ddb/storage-management.ts | 4 +- .../ddb/storage-operation-builder.ts | 40 +- .../ddb/use-case-config-management.ts | 35 +- .../ddb/use-case-config-operation-builder.ts | 34 +- .../lambda/use-case-management/jest.config.js | 10 +- .../lambda/use-case-management/mcp-handler.ts | 85 + .../agent-builder-use-case-adapter.ts | 280 + .../{ => adapters}/agent-use-case-adapter.ts | 24 +- .../{ => adapters}/chat-use-case-adapter.ts | 22 +- .../model/adapters/mcp-adapter.ts | 201 + .../adapters/workflow-use-case-adapter.ts | 284 + .../model/commands/agent-builder-command.ts | 24 + .../model/commands/case-command.ts | 15 + .../model/commands/mcp-command.ts | 435 + .../commands/use-case-command.ts} | 60 +- .../model/commands/workflow-command.ts | 24 + .../use-case-management/model/get-use-case.ts | 24 +- .../model/list-use-cases.ts | 1 + .../lambda/use-case-management/model/types.ts | 150 +- .../model/use-case-validator.ts | 484 - .../use-case-management/model/use-case.ts | 54 +- .../validators/agent-builder-validator.ts | 156 + .../model/validators/agent-validator.ts | 71 + .../model/validators/base-validator.ts | 47 + .../model/validators/config-merge-utils.ts | 150 + .../model/validators/index.ts | 28 + .../model/validators/mcp-validator.ts | 665 ++ .../model/validators/text-validator.ts | 106 + .../model/validators/validation-utils.ts | 224 + .../model/validators/validator-factory.ts | 50 + .../model/validators/workflow-validator.ts | 208 + .../use-case-management/package-lock.json | 1496 ++- .../lambda/use-case-management/package.json | 6 +- .../use-case-management/power-tools-init.ts | 2 +- .../use-case-management/s3/s3-management.ts | 172 + .../test/agents-handler.test.ts | 898 ++ .../test/cfn/stack-operation-builder.test.ts | 7 +- .../test/ddb/builder.test.ts | 2 +- .../ddb/use-case-config-management.test.ts | 29 +- .../use-case-config-operation-builder.test.ts | 1 - .../test/event-test-data.ts | 203 + .../test/mcp-handler.test.ts | 274 + .../agent-builder-use-case-adapter.test.ts | 834 ++ .../agent-use-case-adapter.test.ts | 137 +- .../chat-use-case-adapter.test.ts | 13 +- .../test/model/adapters/mcp-adapter.test.ts | 343 + .../workflow-use-case-adapter.test.ts | 876 ++ .../command/agent-builder-command.test.ts | 149 + .../model/command/workflow-command.test.ts | 149 + .../test/model/commands/mcp-command.test.ts | 1929 ++++ .../test/model/get-use-case.test.ts | 82 +- .../test/model/use-case-validator.test.ts | 2153 ----- .../agent-builder-validator.test.ts | 472 + .../model/validators/agent-validator.test.ts | 241 + .../validators/config-merge-utils.test.ts | 693 ++ .../model/validators/mcp-validator.test.ts | 1250 +++ .../model/validators/text-validator.test.ts | 506 + .../validators/validator-factory.test.ts | 135 + .../validators/workflow-validator.test.ts | 402 + .../test/s3/s3-management.test.ts | 377 + ...mmand.test.ts => use-case-command.test.ts} | 3 +- ...index.test.ts => use-case-handler.test.ts} | 68 +- .../test/utils/utils.test.ts | 338 + .../test/workflows-handler.test.ts | 481 + .../lambda/use-case-management/tsconfig.json | 11 +- .../{index.ts => use-case-handler.ts} | 106 +- .../use-case-management/utils/check-env.ts | 22 - .../use-case-management/utils/constants.ts | 152 +- .../utils/http-response-formatters.ts | 4 +- .../lambda/use-case-management/utils/utils.ts | 219 +- .../use-case-management/workflows-handler.ts | 121 + .../websocket-connectors/package-lock.json | 43 +- .../lambda/websocket-connectors/package.json | 6 +- .../chat-bedrock-ai21.jamba-1-5-large-v1.json | 23 - .../chat-bedrock-ai21.jamba-1-5-mini-v1.json | 23 - .../chat-bedrock-amazon-nova-lite-v1.json | 23 - .../chat-bedrock-amazon-nova-micro-v1.json | 23 - .../chat-bedrock-amazon-nova-pro-v1.json | 23 - ...-bedrock-anthropic-claude-v3-haiku-v1.json | 23 - ...drock-anthropic-claude-v3.5-sonnet-v1.json | 23 - ...chat-bedrock-cohere-command-r-plus-v1.json | 23 - .../chat-bedrock-cohere-command-r-v1.json | 23 - .../chat-bedrock-llama3-70b-instruct-v1.json | 23 - .../chat-bedrock-llama3-8b-instruct-v1.json | 23 - ...gchat-bedrock-ai21.jamba-1-5-large-v1.json | 24 - ...agchat-bedrock-ai21.jamba-1-5-mini-v1.json | 24 - .../ragchat-bedrock-amazon-nova-lite-v1.json | 24 - .../ragchat-bedrock-amazon-nova-micro-v1.json | 24 - .../ragchat-bedrock-amazon-nova-pro-v1.json | 24 - ...-bedrock-anthropic-claude-v3-haiku-v1.json | 24 - ...drock-anthropic-claude-v3.5-sonnet-v1.json | 24 - ...chat-bedrock-cohere-command-r-plus-v1.json | 24 - .../ragchat-bedrock-cohere-command-r-v1.json | 24 - ...agchat-bedrock-llama3-70b-instruct-v1.json | 24 - ...ragchat-bedrock-llama3-8b-instruct-v1.json | 24 - source/pre-build-ecr-images.sh | 504 + source/scripts/v2_migration/pyproject.toml | 2 +- source/stage-assets.sh | 671 +- source/ui-chat/package-lock.json | 826 +- source/ui-chat/package.json | 10 +- .../common/common-components.test.tsx | 22 +- .../external-link-warning-modal.test.tsx | 155 + .../markdown/MarkdownContent.test.tsx | 159 +- .../thinking/ThinkingIndicator.test.tsx | 74 + .../hooks/use-chat-messages.test.tsx | 341 +- .../src/__tests__/models/response.test.ts | 154 + .../__tests__/pages/chat/ChatPage.test.tsx | 24 +- .../chat/components/input/ChatInput.test.tsx | 519 +- .../messages/IncomingMessage.test.tsx | 231 + .../messages/OutgoingMessage.test.tsx | 106 +- .../src/__tests__/pages/chat/types.test.ts | 72 + .../__tests__/reducers/chat-reducer.test.ts | 236 +- .../utils/construct-api-payload.test.ts | 402 +- .../src/__tests__/utils/file-upload.test.ts | 339 + .../src/__tests__/utils/validation.test.ts | 36 + .../components/common/common-components.tsx | 1 + .../common/external-link-warning-modal.tsx | 54 + .../components/markdown/MarkdownContent.tsx | 66 +- .../src/components/multimodal/FileDisplay.tsx | 121 + .../components/multimodal/FileTokenGroup.tsx | 126 + .../multimodal/__tests__/FileDisplay.test.tsx | 256 + .../thinking/ExpandableContent.scss | 29 + .../components/thinking/ExpandableContent.tsx | 39 + .../thinking/ThinkingIndicator.scss | 118 + .../components/thinking/ThinkingIndicator.tsx | 114 + .../ui-chat/src/components/thinking/index.ts | 5 + .../components/tool-usage/ToolUsageList.scss | 9 + .../components/tool-usage/ToolUsageList.tsx | 43 + .../src/components/tool-usage/index.ts | 5 + .../src/components/tools/HelpPanelContent.tsx | 37 + .../components/tools/ToolUsageIndicator.scss | 182 + .../components/tools/ToolUsageIndicator.tsx | 96 + .../src/components/tools/ToolsContent.tsx | 9 +- source/ui-chat/src/components/tools/index.ts | 5 + .../hooks/__tests__/use-file-upload.test.ts | 238 + source/ui-chat/src/hooks/use-chat-message.ts | 164 +- source/ui-chat/src/hooks/use-file-upload.ts | 262 + source/ui-chat/src/mocks/handlers.ts | 68 +- source/ui-chat/src/models/api/response.ts | 53 + source/ui-chat/src/models/message.ts | 19 +- source/ui-chat/src/models/runtime-config.ts | 4 +- source/ui-chat/src/models/use-case-config.ts | 26 +- source/ui-chat/src/pages/chat/ChatPage.tsx | 74 +- .../pages/chat/components/input/ChatInput.tsx | 745 +- .../chat/components/messages/ChatMessage.tsx | 19 +- .../messages/ChatMessagesContainer.tsx | 39 +- .../components/messages/IncomingMessage.tsx | 32 +- .../chat/components/messages/Messages.tsx | 32 +- .../components/messages/OutgoingMessage.tsx | 3 + .../pages/chat/components/messages/types.ts | 4 + source/ui-chat/src/pages/chat/types.ts | 37 +- source/ui-chat/src/reducers/chat-reducer.ts | 237 +- .../__tests__/fileUploadService.test.ts | 280 + .../ui-chat/src/services/fileUploadService.ts | 549 ++ source/ui-chat/src/store/configSlice.ts | 66 +- source/ui-chat/src/store/solutionApi.ts | 35 +- source/ui-chat/src/types/file-upload.ts | 91 + source/ui-chat/src/utils/API.adapter.ts | 47 +- source/ui-chat/src/utils/constants.ts | 32 +- .../src/utils/construct-api-payload.ts | 105 +- .../src/utils/extract-thinking-content.ts | 112 + source/ui-chat/src/utils/file-upload.ts | 334 + source/ui-chat/src/utils/validation.ts | 17 +- source/ui-deployment/package-lock.json | 59 +- source/ui-deployment/package.json | 6 +- source/ui-deployment/src/App.jsx | 33 +- .../__mocks__/deployment-steps-form-data.js | 3 +- .../__mocks__/mock-text-deployment.js | 1 + .../dashboard/DashboardNavigation.test.tsx | 118 + .../dashboard/DashboardView.test.tsx | 23 +- .../useCaseDetails/Gateway.test.tsx | 62 + .../useCaseDetails/UseCaseView.test.tsx | 2 +- .../__snapshots__/Gateway.test.tsx.snap | 7 + .../__snapshots__/UseCaseView.test.tsx.snap | 2 + .../snapshot_tests/wizard/WizardView.test.tsx | 1 + .../useCaseDetails/UseCaseView.test.tsx | 28 +- .../useCaseDetails/gateway/Gateway.test.tsx | 102 + .../gateway/GatewayDetails.test.tsx | 93 + .../useCaseDetails/mcps/MCPItem.test.tsx | 154 + .../useCaseDetails/mcps/MCPs.test.tsx | 112 + .../useCaseDetails/mcps/MCPsList.test.tsx | 188 + .../useCaseDetails/mcps/ToolItem.test.tsx | 129 + .../useCaseDetails/mcps/ToolsList.test.tsx | 184 + .../memory/MemoryDetails.test.tsx | 78 + .../model/BedrockDetails.test.tsx | 28 +- .../systemPrompt/SystemPromptDetails.test.tsx | 83 + .../workflowDetails/WorkflowDetails.test.tsx | 115 + .../WorkflowOrchestration.test.tsx | 190 + .../__tests__/wizard/WizardView.test.tsx | 110 +- .../wizard/Workflow/AddAgentModal.test.tsx | 404 + .../__tests__/wizard/utils.test.jsx | 1194 ++- .../src/components/common/Notifications.tsx | 46 + .../src/components/common/index.ts | 27 + .../commons/__tests__/notifications.test.tsx | 142 + .../commons/deploy-confirmation-modal.jsx | 9 +- .../components/commons/full-page-header.tsx | 9 +- .../src/components/commons/notifications.tsx | 51 +- .../components/dashboard/DashboardView.jsx | 3 +- .../components/useCaseDetails/UseCaseView.jsx | 141 +- .../agentDetails/AgentDetails.tsx | 53 + .../useCaseDetails/agentDetails/index.ts | 4 + .../useCaseDetails/gateway/Gateway.tsx | 35 + .../useCaseDetails/gateway/GatewayDetails.tsx | 28 + .../useCaseDetails/gateway/index.ts | 5 + .../useCaseDetails/general/GeneralConfig.tsx | 19 +- .../useCaseDetails/mcps/MCPItem.tsx | 43 + .../components/useCaseDetails/mcps/MCPs.tsx | 53 + .../useCaseDetails/mcps/MCPsList.tsx | 31 + .../useCaseDetails/mcps/ToolItem.tsx | 21 + .../useCaseDetails/mcps/ToolsList.tsx | 33 + .../components/useCaseDetails/mcps/index.ts | 7 + .../useCaseDetails/memory/Memory.tsx | 35 + .../useCaseDetails/memory/MemoryDetails.tsx | 30 + .../components/useCaseDetails/memory/index.ts | 5 + .../useCaseDetails/model/BedrockDetails.tsx | 7 +- .../useCaseDetails/model/ModelDetails.tsx | 8 +- .../useCaseDetails/runtime/Runtime.tsx | 35 + .../useCaseDetails/runtime/RuntimeDetails.tsx | 37 + .../useCaseDetails/runtime/index.ts | 5 + .../systemPrompt/SystemPrompt.tsx | 35 + .../systemPrompt/SystemPromptDetails.tsx | 30 + .../useCaseDetails/systemPrompt/index.ts | 5 + .../useCaseDetails/targets/TargetItem.tsx | 27 + .../useCaseDetails/targets/Targets.tsx | 31 + .../useCaseDetails/targets/TargetsList.tsx | 28 + .../useCaseDetails/targets/index.ts | 6 + .../workflowDetails/WorkflowDetails.jsx | 58 + .../useCaseDetails/workflowDetails/index.js | 4 + .../WorkflowOrchestration.jsx | 85 + .../workflowOrchestration/index.js | 4 + .../wizard/AgentBuilder/AgentBuilder.tsx | 77 + .../components/wizard/AgentBuilder/Memory.tsx | 101 + .../wizard/AgentBuilder/SystemPrompt.tsx | 165 + .../components/wizard/AgentBuilder/Tools.tsx | 422 + .../__tests__/AgentBuilder.test.tsx | 374 + .../AgentBuilder/__tests__/Memory.test.tsx | 145 + .../__tests__/SystemPrompt.test.tsx | 191 + .../AgentBuilder/__tests__/Tools.test.tsx | 1235 +++ .../components/wizard/AgentBuilder/index.ts | 4 + .../MCPServer/AdditionalConfigurations.tsx | 431 + .../wizard/MCPServer/LambdaTarget.tsx | 83 + .../MCPServer/MCPGatewayConfiguration.tsx | 109 + .../MCPServer/MCPRuntimeConfiguration.tsx | 289 + .../components/wizard/MCPServer/MCPServer.tsx | 199 + .../MCPServer/MCPServerConfiguration.tsx | 34 + .../MCPServer/MCPTargetConfiguration.tsx | 177 + .../wizard/MCPServer/OpenAPITarget.tsx | 63 + .../wizard/MCPServer/OutboundAuth.tsx | 153 + .../wizard/MCPServer/SchemaUpload.tsx | 157 + .../wizard/MCPServer/SmithyTarget.tsx | 31 + .../wizard/MCPServer/TargetBasicInfo.tsx | 163 + .../wizard/MCPServer/TargetTypeSelector.tsx | 32 + .../AdditionalConfigurations.test.tsx | 814 ++ .../MCPGatewayConfiguration.test.tsx | 217 + .../MCPRuntimeConfiguration.test.tsx | 213 + .../MCPServer/__tests__/MCPServer.test.tsx | 186 + .../__tests__/MCPServerConfiguration.test.tsx | 158 + .../__tests__/MCPTargetConfiguration.test.tsx | 537 ++ .../MCPServer/__tests__/SchemaUpload.test.tsx | 584 ++ .../MCPServer/__tests__/helpers.test.tsx | 142 + .../components/wizard/MCPServer/helpers.tsx | 301 + .../src/components/wizard/MCPServer/index.ts | 15 + .../src/components/wizard/Model/Model.tsx | 104 +- .../wizard/Model/ModelAdditionalSettings.tsx | 1 - .../wizard/Model/MultimodalInputSupport.tsx | 82 + .../wizard/Model/__tests__/Model.test.tsx | 1 + .../__tests__/MultimodalInputSupport.test.tsx | 157 + .../common/BedrockInferenceTypeRadio.tsx | 33 +- .../wizard/Model/common/ModelNameDropdown.tsx | 171 - .../wizard/Model/common/ModelProvider.tsx | 5 +- .../BedrockInferenceTypeRadio.test.tsx | 10 +- .../__tests__/ModelNameDropdown.test.tsx | 239 - .../components/wizard/Model/common/helpers.ts | 42 +- .../src/components/wizard/Model/helpers.ts | 2 - .../wizard/Model/providers/Bedrock.tsx | 7 +- .../providers/__tests__/Bedrock.test.tsx | 27 +- .../wizard/Review/AgentBuilderFlowReview.tsx | 58 + .../wizard/Review/AgentBuilderReview.tsx | 108 + .../components/wizard/Review/MCPReview.tsx | 229 + .../components/wizard/Review/ModelReview.tsx | 48 +- .../components/wizard/Review/PromptReview.tsx | 7 +- .../src/components/wizard/Review/Review.tsx | 3 + .../wizard/Review/UseCaseReview.tsx | 25 +- .../__tests__/AgentBuilderFlowReview.test.tsx | 220 + .../__tests__/AgentBuilderReview.test.tsx | 570 ++ .../Review/__tests__/MCPReview.test.tsx | 553 ++ .../Review/__tests__/ModelReview.test.tsx | 161 +- .../Review/__tests__/UseCaseReview.test.tsx | 34 + .../UseCase/EnableProvisionedConcurrency.tsx | 146 + .../src/components/wizard/UseCase/UseCase.tsx | 272 +- .../wizard/UseCase/UseCaseDescription.tsx | 16 +- .../EnableProvisionedConcurrency.test.tsx | 137 + .../wizard/UseCase/__tests__/UseCase.test.tsx | 720 +- .../__tests__/UseCaseDescription.test.tsx | 115 +- .../components/wizard/UseCaseSelection.tsx | 52 +- .../src/components/wizard/WizardView.tsx | 132 +- .../wizard/Workflow/AddAgentModal.tsx | 246 + .../wizard/Workflow/AgentSelection.tsx | 147 + .../components/wizard/Workflow/Workflow.tsx | 108 + .../wizard/Workflow/WorkflowConfigReview.tsx | 105 + .../wizard/Workflow/WorkflowReview.tsx | 42 + .../Workflow/__tests__/Workflow.test.tsx | 876 ++ .../__tests__/WorkflowConfigReview.test.tsx | 681 ++ .../__tests__/WorkflowReview.test.tsx | 247 + .../src/components/wizard/Workflow/index.ts | 6 + .../wizard/__tests__/WizardView.test.tsx | 66 + .../__tests__/agentBuilderUtils.test.js | 467 + .../wizard/__tests__/utils.test.jsx | 64 +- .../components/wizard/interfaces/Steps.tsx | 15 + .../Steps/AgentBuilderReviewStep.tsx | 40 + .../interfaces/Steps/AgentBuilderStep.tsx | 75 + .../interfaces/Steps/BaseWizardStep.tsx | 9 +- .../wizard/interfaces/Steps/MCPReviewStep.tsx | 36 + .../wizard/interfaces/Steps/MCPServerStep.tsx | 287 + .../wizard/interfaces/Steps/ModelStep.tsx | 26 +- .../wizard/interfaces/Steps/UseCaseStep.tsx | 9 +- .../interfaces/Steps/WorkflowReviewStep.tsx | 36 + .../wizard/interfaces/Steps/WorkflowStep.tsx | 73 + .../__tests__/AgentBuilderReviewStep.test.tsx | 116 + .../Steps/__tests__/AgentBuilderStep.test.tsx | 154 + .../__tests__/AgentFlowReviewStep.test.tsx | 56 + .../Steps/__tests__/AgentStep.test.tsx | 127 + .../__tests__/KnowledgeBaseStep.test.tsx | 138 + .../Steps/__tests__/MCPReviewStep.test.tsx | 67 + .../Steps/__tests__/MCPServerStep.test.tsx | 334 + .../Steps/__tests__/ModelStep.test.tsx | 197 + .../Steps/__tests__/PromptStep.test.tsx | 118 + .../Steps/__tests__/ReviewStep.test.tsx | 56 + .../Steps/__tests__/UseCaseStep.test.tsx | 57 + .../Steps/__tests__/VpcStep.test.tsx | 131 + .../interfaces/Steps/__tests__/test-utils.ts | 81 + .../wizard/interfaces/UseCaseTypes/Agent.tsx | 6 +- .../interfaces/UseCaseTypes/AgentBuilder.tsx | 42 + .../interfaces/UseCaseTypes/MCPHost.tsx | 34 + .../wizard/interfaces/UseCaseTypes/Text.tsx | 13 +- .../interfaces/UseCaseTypes/UseCaseType.tsx | 1 + .../interfaces/UseCaseTypes/Workflow.tsx | 42 + .../__tests__/UseCaseType.test.tsx | 188 + .../src/components/wizard/interfaces/index.ts | 1 + .../src/components/wizard/params-builder.jsx | 930 ++ .../src/components/wizard/steps-config.jsx | 59 +- .../src/components/wizard/utils.jsx | 782 +- .../utils/__tests__/mcpSchemaUpload.test.ts | 277 + .../wizard/utils/mcpSchemaUpload.ts | 164 + .../components/wizard/wizard-components.jsx | 7 +- .../src/hooks/__tests__/useQueries.test.tsx | 623 +- .../src/hooks/useNotifications.ts | 138 + source/ui-deployment/src/hooks/useQueries.ts | 139 +- .../services/__tests__/fetchMcpData.test.ts | 492 + .../__tests__/fetchSchemaUpload.test.ts | 372 + .../src/services/fetchAgentData.ts | 60 + .../src/services/fetchMcpData.ts | 152 + .../src/services/fetchSchemaUpload.ts | 186 + source/ui-deployment/src/services/index.ts | 3 + .../src/utils/KeyValueDisplay.tsx | 83 + .../utils/__tests__/KeyValueDisplay.test.tsx | 379 + .../src/utils/__tests__/utils.test.ts | 79 +- source/ui-deployment/src/utils/constants.ts | 314 +- source/ui-deployment/src/utils/index.ts | 2 + .../src/utils/notificationHelpers.ts | 138 + source/ui-deployment/src/utils/utils.tsx | 47 +- 795 files changed, 154789 insertions(+), 15368 deletions(-) create mode 100644 deployment/ecr/.dockerignore create mode 100644 deployment/ecr/gaab-strands-agent/.dockerignore create mode 100644 deployment/ecr/gaab-strands-agent/Dockerfile create mode 100644 deployment/ecr/gaab-strands-agent/README.md create mode 100644 deployment/ecr/gaab-strands-agent/pyproject.toml create mode 100755 deployment/ecr/gaab-strands-agent/scripts/build-container.sh create mode 100755 deployment/ecr/gaab-strands-agent/scripts/deploy-ecr.sh create mode 100755 deployment/ecr/gaab-strands-agent/scripts/run_unit_tests.sh create mode 100644 deployment/ecr/gaab-strands-agent/src/configurable_agent.py create mode 100644 deployment/ecr/gaab-strands-agent/src/main.py create mode 100644 deployment/ecr/gaab-strands-agent/test/conftest.py create mode 100644 deployment/ecr/gaab-strands-agent/test/run_tests.py create mode 100644 deployment/ecr/gaab-strands-agent/test/test_configurable_agent.py create mode 100644 deployment/ecr/gaab-strands-agent/test/test_ddb_helper.py create mode 100644 deployment/ecr/gaab-strands-agent/test/test_integration.py create mode 100644 deployment/ecr/gaab-strands-agent/test/test_main_memory.py create mode 100644 deployment/ecr/gaab-strands-agent/test/test_mcp_tools_loader.py create mode 100644 deployment/ecr/gaab-strands-agent/test/test_models.py create mode 100644 deployment/ecr/gaab-strands-agent/test/test_runtime_mcp_discovery.py create mode 100644 deployment/ecr/gaab-strands-agent/test/test_strands_tools_registry.py create mode 100644 deployment/ecr/gaab-strands-agent/test/test_tools_manager.py create mode 100644 deployment/ecr/gaab-strands-agent/uv.lock create mode 100644 deployment/ecr/gaab-strands-common/README.md create mode 100644 deployment/ecr/gaab-strands-common/pyproject.toml create mode 100644 deployment/ecr/gaab-strands-common/pytest.ini create mode 100755 deployment/ecr/gaab-strands-common/scripts/run_unit_tests.sh create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/__init__.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/base_agent.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/constants.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/__init__.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/s3_file_reader.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/__init__.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/base_tool.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/decorators.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/metadata.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/registry.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/ddb_helper.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/mcp_tools_loader.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/models.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/multimodal/__init__.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/multimodal/file_handler.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/multimodal/multimodal_processor.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/runtime_streaming.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/strands_tools_registry.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/tool_wrapper.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/tools_manager.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/utils/__init__.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/utils/constants.py create mode 100644 deployment/ecr/gaab-strands-common/src/gaab_strands_common/utils/helpers.py create mode 100644 deployment/ecr/gaab-strands-common/test/README.md create mode 100644 deployment/ecr/gaab-strands-common/test/__init__.py create mode 100644 deployment/ecr/gaab-strands-common/test/conftest.py create mode 100644 deployment/ecr/gaab-strands-common/test/custom_tools/test_base_tool.py create mode 100644 deployment/ecr/gaab-strands-common/test/custom_tools/test_decorators.py create mode 100644 deployment/ecr/gaab-strands-common/test/custom_tools/test_metadata.py create mode 100644 deployment/ecr/gaab-strands-common/test/custom_tools/test_registry.py create mode 100644 deployment/ecr/gaab-strands-common/test/custom_tools/test_s3_file_reader.py create mode 100644 deployment/ecr/gaab-strands-common/test/multimodal/test_file_handler.py create mode 100644 deployment/ecr/gaab-strands-common/test/multimodal/test_multimodal_processor.py create mode 100644 deployment/ecr/gaab-strands-common/test/pytest_plugin.py create mode 100644 deployment/ecr/gaab-strands-common/test/test_base_agent.py create mode 100644 deployment/ecr/gaab-strands-common/test/test_helpers.py create mode 100644 deployment/ecr/gaab-strands-common/test/test_models.py create mode 100644 deployment/ecr/gaab-strands-common/test/test_runtime_streaming.py create mode 100644 deployment/ecr/gaab-strands-common/test/test_tool_wrapper.py create mode 100644 deployment/ecr/gaab-strands-common/test/utils/test_helpers.py create mode 100644 deployment/ecr/gaab-strands-common/uv.lock create mode 100644 deployment/ecr/gaab-strands-workflow-agent/.dockerignore create mode 100644 deployment/ecr/gaab-strands-workflow-agent/Dockerfile create mode 100644 deployment/ecr/gaab-strands-workflow-agent/README.md create mode 100644 deployment/ecr/gaab-strands-workflow-agent/pyproject.toml create mode 100755 deployment/ecr/gaab-strands-workflow-agent/scripts/build-container.sh create mode 100755 deployment/ecr/gaab-strands-workflow-agent/scripts/deploy-ecr.sh create mode 100755 deployment/ecr/gaab-strands-workflow-agent/scripts/run_unit_tests.sh create mode 100644 deployment/ecr/gaab-strands-workflow-agent/src/agents_loader.py create mode 100644 deployment/ecr/gaab-strands-workflow-agent/src/main.py create mode 100644 deployment/ecr/gaab-strands-workflow-agent/src/workflow_agent.py create mode 100644 deployment/ecr/gaab-strands-workflow-agent/test/conftest.py create mode 100644 deployment/ecr/gaab-strands-workflow-agent/test/test_agentcore_integration.py create mode 100644 deployment/ecr/gaab-strands-workflow-agent/test/test_agents_loader.py create mode 100644 deployment/ecr/gaab-strands-workflow-agent/test/test_workflow_agent.py create mode 100644 deployment/ecr/gaab-strands-workflow-agent/test/test_workflow_agent_memory.py create mode 100644 deployment/ecr/gaab-strands-workflow-agent/uv.lock delete mode 100644 source/.prettierignore delete mode 100644 source/.prettierrc.yml create mode 100644 source/infrastructure/lib/api/deployment-platform-rest-api-helper.ts delete mode 100644 source/infrastructure/lib/api/model-schema/deploy-usecase-body.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/agents/deploy-agent-usecase-body.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/agents/deploy-agent-usecase-response.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/agents/params/agent-core-params.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/agents/update-agent-usecase-body.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/agents/update-agent-usecase-response.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/base-usecase-schema.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/deploy-usecase-body.ts rename source/infrastructure/lib/api/model-schema/{ => deployments}/deploy-usecase-response.ts (72%) create mode 100644 source/infrastructure/lib/api/model-schema/deployments/mcp/deploy-mcp-usecase-body.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/mcp/deploy-mcp-usecase-response.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/mcp/params/mcp-gateway-params.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/mcp/update-mcp-usecase-body.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/mcp/update-mcp-usecase-response.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/mcp/upload-schema-body.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/mcp/upload-schema-response.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/update-usecase-body.ts rename source/infrastructure/lib/api/model-schema/{ => deployments}/update-usecase-response.ts (70%) create mode 100644 source/infrastructure/lib/api/model-schema/deployments/workflows/deploy-workflow-usecase-body.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/workflows/deploy-workflow-usecase-response.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/workflows/params/workflow-core-params.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/workflows/update-workflow-usecase-body.ts create mode 100644 source/infrastructure/lib/api/model-schema/deployments/workflows/update-workflow-usecase-response.ts rename source/infrastructure/lib/api/model-schema/{ => feedback}/feedback-body.ts (90%) create mode 100644 source/infrastructure/lib/api/model-schema/index.ts create mode 100644 source/infrastructure/lib/api/model-schema/multimodal/files-delete-request-body.ts create mode 100644 source/infrastructure/lib/api/model-schema/multimodal/files-delete-response-body.ts create mode 100644 source/infrastructure/lib/api/model-schema/multimodal/files-get-response-body.ts create mode 100644 source/infrastructure/lib/api/model-schema/multimodal/files-upload-request-body.ts create mode 100644 source/infrastructure/lib/api/model-schema/multimodal/files-upload-response-body.ts create mode 100644 source/infrastructure/lib/api/model-schema/shared/agent-memory-params.ts create mode 100644 source/infrastructure/lib/api/model-schema/shared/auth-params.ts create mode 100644 source/infrastructure/lib/api/model-schema/shared/knowledge-base-params.ts create mode 100644 source/infrastructure/lib/api/model-schema/shared/llm-params.ts create mode 100644 source/infrastructure/lib/api/model-schema/shared/vpc-params.ts delete mode 100644 source/infrastructure/lib/api/model-schema/update-usecase-body.ts create mode 100644 source/infrastructure/lib/auth/component-cognito-app-client.ts create mode 100644 source/infrastructure/lib/mcp-server-stack.ts create mode 100644 source/infrastructure/lib/multimodal/multimodal-setup.ts create mode 100644 source/infrastructure/lib/use-case-management/cfn-deploy-role-factory.ts create mode 100644 source/infrastructure/lib/use-case-stacks/agent-core/agent-builder-stack.ts create mode 100644 source/infrastructure/lib/use-case-stacks/agent-core/agent-core-base-stack.ts create mode 100644 source/infrastructure/lib/use-case-stacks/agent-core/components/agent-execution-role.ts create mode 100644 source/infrastructure/lib/use-case-stacks/agent-core/components/agent-invocation-lambda.ts create mode 100644 source/infrastructure/lib/use-case-stacks/agent-core/components/agent-memory-deployment.ts create mode 100644 source/infrastructure/lib/use-case-stacks/agent-core/components/agent-runtime-deployment.ts create mode 100644 source/infrastructure/lib/use-case-stacks/agent-core/components/ecr-pull-through-cache.ts create mode 100644 source/infrastructure/lib/use-case-stacks/agent-core/utils/image-uri-resolver.ts create mode 100644 source/infrastructure/lib/use-case-stacks/agent-core/workflow-stack.ts delete mode 100644 source/infrastructure/lib/utils/app-registry-aspects.ts create mode 100644 source/infrastructure/lib/vpc/agent-builder-vpc.ts create mode 100644 source/infrastructure/test/api/deployment-platform-rest-api-helper.test.ts delete mode 100644 source/infrastructure/test/api/model-schema/deploy-usecase-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/deployments/agents/deploy-agent-usecase-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/deployments/agents/params/agent-core-params.test.ts create mode 100644 source/infrastructure/test/api/model-schema/deployments/agents/params/agent-memory-params.test.ts create mode 100644 source/infrastructure/test/api/model-schema/deployments/agents/update-agent-usecase-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/deployments/deploy-usecase-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/deployments/mcp/deploy-mcp-usecase-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/deployments/mcp/params/mcp-gateway-params.test.ts create mode 100644 source/infrastructure/test/api/model-schema/deployments/mcp/update-mcp-usecase-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/deployments/mcp/upload-schema-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/deployments/mcp/upload-schema-response.test.ts create mode 100644 source/infrastructure/test/api/model-schema/deployments/update-usecase-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/deployments/workflows/deploy-workflow-usecase-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/deployments/workflows/params/workflow-core-params.test.ts create mode 100644 source/infrastructure/test/api/model-schema/deployments/workflows/update-workflow-usecase-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/feedback/feedback-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/multimodal/files-delete-request-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/multimodal/files-delete-response-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/multimodal/files-get-response-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/multimodal/files-upload-request-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/multimodal/files-upload-response-body.test.ts create mode 100644 source/infrastructure/test/api/model-schema/shared/auth-params.test.ts rename source/infrastructure/test/api/model-schema/{update-usecase-body.test.ts => shared/bedrock-params.test.ts} (54%) create mode 100644 source/infrastructure/test/api/model-schema/shared/llm-params.test.ts rename source/infrastructure/test/api/model-schema/{ => shared}/utils.ts (99%) create mode 100644 source/infrastructure/test/api/model-schema/shared/vpc-params.test.ts create mode 100644 source/infrastructure/test/auth/component-cognito-app-client.test.ts create mode 100644 source/infrastructure/test/mcp-server-stack.test.ts create mode 100644 source/infrastructure/test/multimodal/multimodal-setup.test.ts create mode 100644 source/infrastructure/test/use-case-management/cfn-deploy-role-factory.test.ts create mode 100644 source/infrastructure/test/use-case-stacks/agent-core/agent-builder-stack.test.ts create mode 100644 source/infrastructure/test/use-case-stacks/agent-core/agent-core-base-stack.test.ts create mode 100644 source/infrastructure/test/use-case-stacks/agent-core/components/agent-execution-role.test.ts create mode 100644 source/infrastructure/test/use-case-stacks/agent-core/components/agent-invocation-lambda.test.ts create mode 100644 source/infrastructure/test/use-case-stacks/agent-core/components/agent-memory-deployment.test.ts create mode 100644 source/infrastructure/test/use-case-stacks/agent-core/components/agent-runtime-deployment.test.ts create mode 100644 source/infrastructure/test/use-case-stacks/agent-core/components/ecr-pull-through-cache.test.ts create mode 100644 source/infrastructure/test/use-case-stacks/agent-core/utils/image-uri-resolver.test.ts create mode 100644 source/infrastructure/test/use-case-stacks/agent-core/workflow-stack.test.ts delete mode 100644 source/infrastructure/test/utils/app-registry.test.ts create mode 100644 source/lambda/agentcore-invocation/handler.py create mode 100644 source/lambda/agentcore-invocation/poetry.lock create mode 100644 source/lambda/agentcore-invocation/pyproject.toml create mode 100644 source/lambda/agentcore-invocation/test/__init__.py create mode 100644 source/lambda/agentcore-invocation/test/test_agentcore_client.py create mode 100644 source/lambda/agentcore-invocation/test/test_event_processor.py create mode 100644 source/lambda/agentcore-invocation/test/test_handler_integration.py create mode 100644 source/lambda/agentcore-invocation/test/test_helper.py create mode 100644 source/lambda/agentcore-invocation/test/test_keep_alive_manager.py create mode 100644 source/lambda/agentcore-invocation/utils/__init__.py create mode 100644 source/lambda/agentcore-invocation/utils/agentcore_client.py create mode 100644 source/lambda/agentcore-invocation/utils/constants.py create mode 100644 source/lambda/agentcore-invocation/utils/event_processor.py create mode 100644 source/lambda/agentcore-invocation/utils/helper.py create mode 100644 source/lambda/agentcore-invocation/utils/keep_alive_manager.py create mode 100644 source/lambda/agentcore-invocation/utils/websocket_error_handler.py create mode 100644 source/lambda/custom-resource/operations/agentcore_oauth_client.py create mode 100644 source/lambda/custom-resource/operations/agentcore_outbound_permissions.py create mode 100644 source/lambda/custom-resource/operations/deploy_agent_core.py create mode 100644 source/lambda/custom-resource/operations/deploy_agent_core_memory.py create mode 100644 source/lambda/custom-resource/operations/deploy_mcp_gateway.py create mode 100644 source/lambda/custom-resource/operations/deploy_mcp_runtime.py create mode 100644 source/lambda/custom-resource/operations/gen_ecr_repo_prefix.py create mode 100644 source/lambda/custom-resource/operations/lambda_version_generator.py create mode 100644 source/lambda/custom-resource/operations/multimodal_bucket_notifications.py rename source/lambda/custom-resource/operations/{anonymous_metrics.py => send_metrics.py} (70%) create mode 100644 source/lambda/custom-resource/operations/sleep.py create mode 100644 source/lambda/custom-resource/test/fixtures/agentcore_oauth_client_events.py create mode 100644 source/lambda/custom-resource/test/fixtures/agentcore_outbound_permissions_events.py create mode 100644 source/lambda/custom-resource/test/fixtures/gen_ecr_repo_prefix_events.py create mode 100644 source/lambda/custom-resource/test/fixtures/lambda_version_generator_events.py rename source/lambda/custom-resource/test/fixtures/{anonymous_metrics_events.py => metrics_events.py} (50%) create mode 100644 source/lambda/custom-resource/test/operations/test_agentcore_oauth_client.py create mode 100644 source/lambda/custom-resource/test/operations/test_agentcore_outbound_permissions.py create mode 100644 source/lambda/custom-resource/test/operations/test_deploy_agent_core.py create mode 100644 source/lambda/custom-resource/test/operations/test_deploy_agent_core_memory.py create mode 100644 source/lambda/custom-resource/test/operations/test_deploy_mcp_gateway.py create mode 100644 source/lambda/custom-resource/test/operations/test_deploy_mcp_runtime.py create mode 100644 source/lambda/custom-resource/test/operations/test_gen_ecr_repo_prefix.py create mode 100644 source/lambda/custom-resource/test/operations/test_lambda_version_generator.py create mode 100644 source/lambda/custom-resource/test/operations/test_multimodal_bucket_notifications.py rename source/lambda/custom-resource/test/operations/{test_anonymous_metric.py => test_send_metrics.py} (50%) create mode 100644 source/lambda/custom-resource/test/utils/test_agentcore_mcp.py create mode 100644 source/lambda/custom-resource/test/utils/test_auth_manager.py create mode 100644 source/lambda/custom-resource/test/utils/test_data.py create mode 100644 source/lambda/custom-resource/test/utils/test_gateway_mcp.py create mode 100644 source/lambda/custom-resource/test/utils/test_lambda_target_creator.py create mode 100644 source/lambda/custom-resource/test/utils/test_mcp_config_manager.py create mode 100644 source/lambda/custom-resource/test/utils/test_mcp_factory.py create mode 100644 source/lambda/custom-resource/test/utils/test_openapi_target_creator.py create mode 100644 source/lambda/custom-resource/test/utils/test_policy_manager.py create mode 100644 source/lambda/custom-resource/test/utils/test_runtime_mcp.py create mode 100644 source/lambda/custom-resource/test/utils/test_smithy_target_creator.py create mode 100644 source/lambda/custom-resource/utils/agent_core_utils.py create mode 100644 source/lambda/custom-resource/utils/agentcore_mcp.py create mode 100644 source/lambda/custom-resource/utils/auth_manager.py create mode 100644 source/lambda/custom-resource/utils/gateway_mcp.py create mode 100644 source/lambda/custom-resource/utils/lambda_target_creator.py create mode 100644 source/lambda/custom-resource/utils/mcp_config_manager.py create mode 100644 source/lambda/custom-resource/utils/mcp_factory.py create mode 100644 source/lambda/custom-resource/utils/openapi_target_creator.py create mode 100644 source/lambda/custom-resource/utils/policy_manager.py create mode 100644 source/lambda/custom-resource/utils/runtime_mcp.py create mode 100644 source/lambda/custom-resource/utils/smithy_target_creator.py create mode 100644 source/lambda/files-management/index.ts create mode 100644 source/lambda/files-management/jest.config.js create mode 100644 source/lambda/files-management/models/file-command.ts create mode 100644 source/lambda/files-management/models/files-factory.ts create mode 100644 source/lambda/files-management/models/types.ts create mode 100644 source/lambda/files-management/package-lock.json create mode 100644 source/lambda/files-management/package.json create mode 100644 source/lambda/files-management/power-tools-init.ts create mode 100644 source/lambda/files-management/services/ddb-config-service.ts create mode 100644 source/lambda/files-management/services/ddb-metadata-service.ts create mode 100644 source/lambda/files-management/services/s3-presigned-url-service.ts create mode 100644 source/lambda/files-management/test/commands/file-command.test.ts create mode 100644 source/lambda/files-management/test/factories/files-factory.test.ts create mode 100644 source/lambda/files-management/test/index.test.ts create mode 100644 source/lambda/files-management/test/services/ddb-config-service.test.ts create mode 100644 source/lambda/files-management/test/services/ddb-metadata-service.test.ts create mode 100644 source/lambda/files-management/test/services/s3-presigned-url-service.test.ts create mode 100644 source/lambda/files-management/test/utils/error.test.ts create mode 100644 source/lambda/files-management/test/utils/http-response-formatter.test.ts create mode 100644 source/lambda/files-management/test/utils/multimodal-cache.test.ts create mode 100644 source/lambda/files-management/test/utils/utils.test.ts create mode 100644 source/lambda/files-management/test/validators/file-validator.test.ts create mode 100644 source/lambda/files-management/test/validators/request-validators.test.ts create mode 100644 source/lambda/files-management/tsconfig.json create mode 100644 source/lambda/files-management/utils/constants.ts create mode 100644 source/lambda/files-management/utils/error.ts create mode 100644 source/lambda/files-management/utils/http-response-formatters.ts create mode 100644 source/lambda/files-management/utils/multimodal-cache.ts create mode 100644 source/lambda/files-management/utils/utils.ts create mode 100644 source/lambda/files-management/validators/file-validator.ts create mode 100644 source/lambda/files-management/validators/request-validators.ts create mode 100644 source/lambda/files-metadata-management/index.ts create mode 100644 source/lambda/files-metadata-management/jest.config.js create mode 100644 source/lambda/files-metadata-management/models/types.ts create mode 100644 source/lambda/files-metadata-management/package-lock.json create mode 100644 source/lambda/files-metadata-management/package.json create mode 100644 source/lambda/files-metadata-management/power-tools-init.ts create mode 100644 source/lambda/files-metadata-management/test/index.test.ts create mode 100644 source/lambda/files-metadata-management/test/power-tools-init.test.ts create mode 100644 source/lambda/files-metadata-management/test/utils/error.test.ts create mode 100644 source/lambda/files-metadata-management/test/utils/eventbridge-processor.test.ts create mode 100644 source/lambda/files-metadata-management/test/utils/file-validator.test.ts create mode 100644 source/lambda/files-metadata-management/test/utils/http-response-formatter.test.ts create mode 100644 source/lambda/files-metadata-management/test/utils/metadata-validator.test.ts create mode 100644 source/lambda/files-metadata-management/test/utils/utils.test.ts create mode 100644 source/lambda/files-metadata-management/tsconfig.json create mode 100644 source/lambda/files-metadata-management/utils/constants.ts create mode 100644 source/lambda/files-metadata-management/utils/error.ts create mode 100644 source/lambda/files-metadata-management/utils/eventbridge-processor.ts create mode 100644 source/lambda/files-metadata-management/utils/file-validator.ts create mode 100644 source/lambda/files-metadata-management/utils/http-response-formatters.ts create mode 100644 source/lambda/files-metadata-management/utils/metadata-validator.ts create mode 100644 source/lambda/files-metadata-management/utils/utils.ts delete mode 100644 source/lambda/layers/aws-node-user-agent-config/.gitignore create mode 100644 source/lambda/layers/aws-node-user-agent-config/jest.config.js create mode 100644 source/lambda/layers/aws-sdk-lib/aws-client-manager.test.ts create mode 100644 source/lambda/layers/aws-sdk-lib/aws-client-manager.ts create mode 100644 source/lambda/layers/aws-sdk-lib/index.ts create mode 100644 source/lambda/layers/aws-sdk-lib/jest.config.js create mode 100644 source/lambda/layers/aws-sdk-lib/tsconfig.json create mode 100644 source/lambda/model-info/models/types.ts create mode 100644 source/lambda/use-case-management/agents-handler.ts create mode 100644 source/lambda/use-case-management/mcp-handler.ts create mode 100644 source/lambda/use-case-management/model/adapters/agent-builder-use-case-adapter.ts rename source/lambda/use-case-management/model/{ => adapters}/agent-use-case-adapter.ts (91%) rename source/lambda/use-case-management/model/{ => adapters}/chat-use-case-adapter.ts (94%) create mode 100644 source/lambda/use-case-management/model/adapters/mcp-adapter.ts create mode 100644 source/lambda/use-case-management/model/adapters/workflow-use-case-adapter.ts create mode 100644 source/lambda/use-case-management/model/commands/agent-builder-command.ts create mode 100644 source/lambda/use-case-management/model/commands/case-command.ts create mode 100644 source/lambda/use-case-management/model/commands/mcp-command.ts rename source/lambda/use-case-management/{command.ts => model/commands/use-case-command.ts} (91%) create mode 100644 source/lambda/use-case-management/model/commands/workflow-command.ts delete mode 100644 source/lambda/use-case-management/model/use-case-validator.ts create mode 100644 source/lambda/use-case-management/model/validators/agent-builder-validator.ts create mode 100644 source/lambda/use-case-management/model/validators/agent-validator.ts create mode 100644 source/lambda/use-case-management/model/validators/base-validator.ts create mode 100644 source/lambda/use-case-management/model/validators/config-merge-utils.ts create mode 100644 source/lambda/use-case-management/model/validators/index.ts create mode 100644 source/lambda/use-case-management/model/validators/mcp-validator.ts create mode 100644 source/lambda/use-case-management/model/validators/text-validator.ts create mode 100644 source/lambda/use-case-management/model/validators/validation-utils.ts create mode 100644 source/lambda/use-case-management/model/validators/validator-factory.ts create mode 100644 source/lambda/use-case-management/model/validators/workflow-validator.ts create mode 100644 source/lambda/use-case-management/s3/s3-management.ts create mode 100644 source/lambda/use-case-management/test/agents-handler.test.ts create mode 100644 source/lambda/use-case-management/test/mcp-handler.test.ts create mode 100644 source/lambda/use-case-management/test/model/adapters/agent-builder-use-case-adapter.test.ts rename source/lambda/use-case-management/test/model/{ => adapters}/agent-use-case-adapter.test.ts (59%) rename source/lambda/use-case-management/test/model/{ => adapters}/chat-use-case-adapter.test.ts (96%) create mode 100644 source/lambda/use-case-management/test/model/adapters/mcp-adapter.test.ts create mode 100644 source/lambda/use-case-management/test/model/adapters/workflow-use-case-adapter.test.ts create mode 100644 source/lambda/use-case-management/test/model/command/agent-builder-command.test.ts create mode 100644 source/lambda/use-case-management/test/model/command/workflow-command.test.ts create mode 100644 source/lambda/use-case-management/test/model/commands/mcp-command.test.ts delete mode 100644 source/lambda/use-case-management/test/model/use-case-validator.test.ts create mode 100644 source/lambda/use-case-management/test/model/validators/agent-builder-validator.test.ts create mode 100644 source/lambda/use-case-management/test/model/validators/agent-validator.test.ts create mode 100644 source/lambda/use-case-management/test/model/validators/config-merge-utils.test.ts create mode 100644 source/lambda/use-case-management/test/model/validators/mcp-validator.test.ts create mode 100644 source/lambda/use-case-management/test/model/validators/text-validator.test.ts create mode 100644 source/lambda/use-case-management/test/model/validators/validator-factory.test.ts create mode 100644 source/lambda/use-case-management/test/model/validators/workflow-validator.test.ts create mode 100644 source/lambda/use-case-management/test/s3/s3-management.test.ts rename source/lambda/use-case-management/test/{command.test.ts => use-case-command.test.ts} (99%) rename source/lambda/use-case-management/test/{index.test.ts => use-case-handler.test.ts} (91%) create mode 100644 source/lambda/use-case-management/test/utils/utils.test.ts create mode 100644 source/lambda/use-case-management/test/workflows-handler.test.ts rename source/lambda/use-case-management/{index.ts => use-case-handler.ts} (50%) delete mode 100644 source/lambda/use-case-management/utils/check-env.ts create mode 100644 source/lambda/use-case-management/workflows-handler.ts delete mode 100644 source/model-info/chat-bedrock-ai21.jamba-1-5-large-v1.json delete mode 100644 source/model-info/chat-bedrock-ai21.jamba-1-5-mini-v1.json delete mode 100644 source/model-info/chat-bedrock-amazon-nova-lite-v1.json delete mode 100644 source/model-info/chat-bedrock-amazon-nova-micro-v1.json delete mode 100644 source/model-info/chat-bedrock-amazon-nova-pro-v1.json delete mode 100644 source/model-info/chat-bedrock-anthropic-claude-v3-haiku-v1.json delete mode 100644 source/model-info/chat-bedrock-anthropic-claude-v3.5-sonnet-v1.json delete mode 100644 source/model-info/chat-bedrock-cohere-command-r-plus-v1.json delete mode 100644 source/model-info/chat-bedrock-cohere-command-r-v1.json delete mode 100644 source/model-info/chat-bedrock-llama3-70b-instruct-v1.json delete mode 100644 source/model-info/chat-bedrock-llama3-8b-instruct-v1.json delete mode 100644 source/model-info/ragchat-bedrock-ai21.jamba-1-5-large-v1.json delete mode 100644 source/model-info/ragchat-bedrock-ai21.jamba-1-5-mini-v1.json delete mode 100644 source/model-info/ragchat-bedrock-amazon-nova-lite-v1.json delete mode 100644 source/model-info/ragchat-bedrock-amazon-nova-micro-v1.json delete mode 100644 source/model-info/ragchat-bedrock-amazon-nova-pro-v1.json delete mode 100644 source/model-info/ragchat-bedrock-anthropic-claude-v3-haiku-v1.json delete mode 100644 source/model-info/ragchat-bedrock-anthropic-claude-v3.5-sonnet-v1.json delete mode 100644 source/model-info/ragchat-bedrock-cohere-command-r-plus-v1.json delete mode 100644 source/model-info/ragchat-bedrock-cohere-command-r-v1.json delete mode 100644 source/model-info/ragchat-bedrock-llama3-70b-instruct-v1.json delete mode 100644 source/model-info/ragchat-bedrock-llama3-8b-instruct-v1.json create mode 100755 source/pre-build-ecr-images.sh create mode 100644 source/ui-chat/src/__tests__/components/common/external-link-warning-modal.test.tsx create mode 100644 source/ui-chat/src/__tests__/components/thinking/ThinkingIndicator.test.tsx create mode 100644 source/ui-chat/src/__tests__/models/response.test.ts create mode 100644 source/ui-chat/src/__tests__/pages/chat/types.test.ts create mode 100644 source/ui-chat/src/__tests__/utils/file-upload.test.ts create mode 100644 source/ui-chat/src/components/common/external-link-warning-modal.tsx create mode 100644 source/ui-chat/src/components/multimodal/FileDisplay.tsx create mode 100644 source/ui-chat/src/components/multimodal/FileTokenGroup.tsx create mode 100644 source/ui-chat/src/components/multimodal/__tests__/FileDisplay.test.tsx create mode 100644 source/ui-chat/src/components/thinking/ExpandableContent.scss create mode 100644 source/ui-chat/src/components/thinking/ExpandableContent.tsx create mode 100644 source/ui-chat/src/components/thinking/ThinkingIndicator.scss create mode 100644 source/ui-chat/src/components/thinking/ThinkingIndicator.tsx create mode 100644 source/ui-chat/src/components/thinking/index.ts create mode 100644 source/ui-chat/src/components/tool-usage/ToolUsageList.scss create mode 100644 source/ui-chat/src/components/tool-usage/ToolUsageList.tsx create mode 100644 source/ui-chat/src/components/tool-usage/index.ts create mode 100644 source/ui-chat/src/components/tools/ToolUsageIndicator.scss create mode 100644 source/ui-chat/src/components/tools/ToolUsageIndicator.tsx create mode 100644 source/ui-chat/src/components/tools/index.ts create mode 100644 source/ui-chat/src/hooks/__tests__/use-file-upload.test.ts create mode 100644 source/ui-chat/src/hooks/use-file-upload.ts create mode 100644 source/ui-chat/src/services/__tests__/fileUploadService.test.ts create mode 100644 source/ui-chat/src/services/fileUploadService.ts create mode 100644 source/ui-chat/src/types/file-upload.ts create mode 100644 source/ui-chat/src/utils/extract-thinking-content.ts create mode 100644 source/ui-chat/src/utils/file-upload.ts create mode 100644 source/ui-deployment/src/components/__tests__/dashboard/DashboardNavigation.test.tsx create mode 100644 source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/Gateway.test.tsx create mode 100644 source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/__snapshots__/Gateway.test.tsx.snap create mode 100644 source/ui-deployment/src/components/__tests__/useCaseDetails/gateway/Gateway.test.tsx create mode 100644 source/ui-deployment/src/components/__tests__/useCaseDetails/gateway/GatewayDetails.test.tsx create mode 100644 source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/MCPItem.test.tsx create mode 100644 source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/MCPs.test.tsx create mode 100644 source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/MCPsList.test.tsx create mode 100644 source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/ToolItem.test.tsx create mode 100644 source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/ToolsList.test.tsx create mode 100644 source/ui-deployment/src/components/__tests__/useCaseDetails/memory/MemoryDetails.test.tsx create mode 100644 source/ui-deployment/src/components/__tests__/useCaseDetails/systemPrompt/SystemPromptDetails.test.tsx create mode 100644 source/ui-deployment/src/components/__tests__/useCaseDetails/workflowDetails/WorkflowDetails.test.tsx create mode 100644 source/ui-deployment/src/components/__tests__/useCaseDetails/workflowOrchestration/WorkflowOrchestration.test.tsx create mode 100644 source/ui-deployment/src/components/__tests__/wizard/Workflow/AddAgentModal.test.tsx create mode 100644 source/ui-deployment/src/components/common/Notifications.tsx create mode 100644 source/ui-deployment/src/components/common/index.ts create mode 100644 source/ui-deployment/src/components/commons/__tests__/notifications.test.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/agentDetails/AgentDetails.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/agentDetails/index.ts create mode 100644 source/ui-deployment/src/components/useCaseDetails/gateway/Gateway.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/gateway/GatewayDetails.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/gateway/index.ts create mode 100644 source/ui-deployment/src/components/useCaseDetails/mcps/MCPItem.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/mcps/MCPs.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/mcps/MCPsList.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/mcps/ToolItem.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/mcps/ToolsList.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/mcps/index.ts create mode 100644 source/ui-deployment/src/components/useCaseDetails/memory/Memory.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/memory/MemoryDetails.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/memory/index.ts create mode 100644 source/ui-deployment/src/components/useCaseDetails/runtime/Runtime.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/runtime/RuntimeDetails.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/runtime/index.ts create mode 100644 source/ui-deployment/src/components/useCaseDetails/systemPrompt/SystemPrompt.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/systemPrompt/SystemPromptDetails.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/systemPrompt/index.ts create mode 100644 source/ui-deployment/src/components/useCaseDetails/targets/TargetItem.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/targets/Targets.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/targets/TargetsList.tsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/targets/index.ts create mode 100644 source/ui-deployment/src/components/useCaseDetails/workflowDetails/WorkflowDetails.jsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/workflowDetails/index.js create mode 100644 source/ui-deployment/src/components/useCaseDetails/workflowOrchestration/WorkflowOrchestration.jsx create mode 100644 source/ui-deployment/src/components/useCaseDetails/workflowOrchestration/index.js create mode 100644 source/ui-deployment/src/components/wizard/AgentBuilder/AgentBuilder.tsx create mode 100644 source/ui-deployment/src/components/wizard/AgentBuilder/Memory.tsx create mode 100644 source/ui-deployment/src/components/wizard/AgentBuilder/SystemPrompt.tsx create mode 100644 source/ui-deployment/src/components/wizard/AgentBuilder/Tools.tsx create mode 100644 source/ui-deployment/src/components/wizard/AgentBuilder/__tests__/AgentBuilder.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/AgentBuilder/__tests__/Memory.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/AgentBuilder/__tests__/SystemPrompt.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/AgentBuilder/__tests__/Tools.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/AgentBuilder/index.ts create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/AdditionalConfigurations.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/LambdaTarget.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/MCPGatewayConfiguration.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/MCPRuntimeConfiguration.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/MCPServer.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/MCPServerConfiguration.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/MCPTargetConfiguration.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/OpenAPITarget.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/OutboundAuth.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/SchemaUpload.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/SmithyTarget.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/TargetBasicInfo.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/TargetTypeSelector.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/__tests__/AdditionalConfigurations.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/__tests__/MCPGatewayConfiguration.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/__tests__/MCPRuntimeConfiguration.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/__tests__/MCPServer.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/__tests__/MCPServerConfiguration.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/__tests__/MCPTargetConfiguration.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/__tests__/SchemaUpload.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/__tests__/helpers.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/helpers.tsx create mode 100644 source/ui-deployment/src/components/wizard/MCPServer/index.ts create mode 100644 source/ui-deployment/src/components/wizard/Model/MultimodalInputSupport.tsx create mode 100644 source/ui-deployment/src/components/wizard/Model/__tests__/MultimodalInputSupport.test.tsx delete mode 100644 source/ui-deployment/src/components/wizard/Model/common/ModelNameDropdown.tsx delete mode 100644 source/ui-deployment/src/components/wizard/Model/common/__tests__/ModelNameDropdown.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/Review/AgentBuilderFlowReview.tsx create mode 100644 source/ui-deployment/src/components/wizard/Review/AgentBuilderReview.tsx create mode 100644 source/ui-deployment/src/components/wizard/Review/MCPReview.tsx create mode 100644 source/ui-deployment/src/components/wizard/Review/__tests__/AgentBuilderFlowReview.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/Review/__tests__/AgentBuilderReview.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/Review/__tests__/MCPReview.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/UseCase/EnableProvisionedConcurrency.tsx create mode 100644 source/ui-deployment/src/components/wizard/UseCase/__tests__/EnableProvisionedConcurrency.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/Workflow/AddAgentModal.tsx create mode 100644 source/ui-deployment/src/components/wizard/Workflow/AgentSelection.tsx create mode 100644 source/ui-deployment/src/components/wizard/Workflow/Workflow.tsx create mode 100644 source/ui-deployment/src/components/wizard/Workflow/WorkflowConfigReview.tsx create mode 100644 source/ui-deployment/src/components/wizard/Workflow/WorkflowReview.tsx create mode 100644 source/ui-deployment/src/components/wizard/Workflow/__tests__/Workflow.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/Workflow/__tests__/WorkflowConfigReview.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/Workflow/__tests__/WorkflowReview.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/Workflow/index.ts create mode 100644 source/ui-deployment/src/components/wizard/__tests__/WizardView.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/__tests__/agentBuilderUtils.test.js create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/AgentBuilderReviewStep.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/AgentBuilderStep.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/MCPReviewStep.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/MCPServerStep.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/WorkflowReviewStep.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/WorkflowStep.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/__tests__/AgentBuilderReviewStep.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/__tests__/AgentBuilderStep.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/__tests__/AgentFlowReviewStep.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/__tests__/AgentStep.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/__tests__/KnowledgeBaseStep.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/__tests__/MCPReviewStep.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/__tests__/MCPServerStep.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/__tests__/ModelStep.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/__tests__/PromptStep.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/__tests__/ReviewStep.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/__tests__/UseCaseStep.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/__tests__/VpcStep.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/Steps/__tests__/test-utils.ts create mode 100644 source/ui-deployment/src/components/wizard/interfaces/UseCaseTypes/AgentBuilder.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/UseCaseTypes/MCPHost.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/UseCaseTypes/Workflow.tsx create mode 100644 source/ui-deployment/src/components/wizard/interfaces/UseCaseTypes/__tests__/UseCaseType.test.tsx create mode 100644 source/ui-deployment/src/components/wizard/params-builder.jsx create mode 100644 source/ui-deployment/src/components/wizard/utils/__tests__/mcpSchemaUpload.test.ts create mode 100644 source/ui-deployment/src/components/wizard/utils/mcpSchemaUpload.ts create mode 100644 source/ui-deployment/src/hooks/useNotifications.ts create mode 100644 source/ui-deployment/src/services/__tests__/fetchMcpData.test.ts create mode 100644 source/ui-deployment/src/services/__tests__/fetchSchemaUpload.test.ts create mode 100644 source/ui-deployment/src/services/fetchAgentData.ts create mode 100644 source/ui-deployment/src/services/fetchMcpData.ts create mode 100644 source/ui-deployment/src/services/fetchSchemaUpload.ts create mode 100644 source/ui-deployment/src/utils/KeyValueDisplay.tsx create mode 100644 source/ui-deployment/src/utils/__tests__/KeyValueDisplay.test.tsx create mode 100644 source/ui-deployment/src/utils/notificationHelpers.ts diff --git a/.gitignore b/.gitignore index 9037ac66..0c1003f4 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,8 @@ git-info **/coverage/ **/coverage-reports/ .coverage* +coverage.xml +htmlcov node_modules/ @@ -65,3 +67,14 @@ bom.xml .python-version **/build/ .aider* + +mise.toml + +# Bedrock AgentCore authentication files +.agentcore.yaml +**/.agentcore.yaml + +# Copied gaab-strands-common in agent directories (build artifacts) +# These are copied during build process and should not be tracked +deployment/ecr/gaab-strands-agent/gaab-strands-common/ +deployment/ecr/gaab-strands-workflow-agent/gaab-strands-common/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 0313076e..5f46b6cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,22 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [4.0.0] - 2025-11-20 + +### Added + +- Multi-Agent workflow orchestration to address complex tasks with multiple coordinated agents. +- Agent Builder use case for configuring, deploying, and managing AI Agents from the Management Dashboard. +- MCP Server deployment using images, Lambda functions, OpenAPI specs, or Smitty files. +- Multimodal input capabilities for Agent Builder and workflow use cases. +- AWS Lambda provisioned concurrency support for text and bedrock agent use cases to improve performance and reduce cold starts. + +### Security + +- Upgraded js-yaml to `3.14.2` and `4.1.1` to mitigate CVE-2025-64718 +- Upgraded glob to `10.5.0` to mitigate CVE-2025-64756 +- Upgraded langchain-core to `0.3.80` to mitigate CVE-2025-65106 + ## [3.0.7] - 2025-11-12 ### Security diff --git a/NOTICE.txt b/NOTICE.txt index aba601f1..41eac261 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -40,7 +40,6 @@ This software includes third party software subject to the following copyrights: @aws-cdk/asset-awscli-v1 under the Apache-2.0 license. @aws-cdk/asset-kubectl-v20 under the Apache-2.0 license. @aws-cdk/asset-node-proxy-agent-v6 under the Apache-2.0 license. -@aws-cdk/aws-servicecatalogappregistry-alpha under the Apache-2.0 license. @aws-cdk/cloud-assembly-schema under the Apache-2.0 license. @aws-cdk/integ-tests-alpha under the Apache-2.0 license. @aws-crypto/crc32 under the Apache-2.0 license. @@ -57,6 +56,7 @@ This software includes third party software subject to the following copyrights: @aws-lambda-powertools/tracer under the MIT-0 license. @aws-sdk/abort-controller under the Apache-2.0 license. @aws-sdk/client-api-gateway under the Apache-2.0 license. +@aws-sdk/client-bedrock-agentcore-control under the Apache-2.0 license. @aws-sdk/client-cloudformation under the Apache-2.0 license. @aws-sdk/client-cloudwatch-logs under the Apache-2.0 license. @aws-sdk/client-cognito-identity-provider under the Apache-2.0 license. @@ -130,6 +130,8 @@ This software includes third party software subject to the following copyrights: @aws-sdk/querystring-builder under the Apache-2.0 license. @aws-sdk/querystring-parser under the Apache-2.0 license. @aws-sdk/region-config-resolver under the Apache-2.0 license. +@aws-sdk/s3-presigned-post under the Apache-2.0 license. +@aws-sdk/s3-request-presigner under the Apache-2.0 license. @aws-sdk/service-error-classification under the Apache-2.0 license. @aws-sdk/shared-ini-file-loader under the Apache-2.0 license. @aws-sdk/signature-v4 under the Apache-2.0 license. @@ -150,6 +152,7 @@ This software includes third party software subject to the following copyrights: @aws-sdk/util-defaults-mode-node under the Apache-2.0 license. @aws-sdk/util-dynamodb under the Apache-2.0 license. @aws-sdk/util-endpoints under the Apache-2.0 license. +@aws-sdk/util-format-url under the Apache-2.0 license. @aws-sdk/util-hex-encoding under the Apache-2.0 license. @aws-sdk/util-locate-window under the Apache-2.0 license. @aws-sdk/util-middleware under the Apache-2.0 license. @@ -163,11 +166,13 @@ This software includes third party software subject to the following copyrights: @aws-solutions-constructs/aws-apigateway-lambda under the Apache-2.0 license. @aws-solutions-constructs/aws-apigatewayv2websocket-sqs under the Apache-2.0 license. @aws-solutions-constructs/aws-cloudfront-s3 under the Apache-2.0 license. +@aws-solutions-constructs/aws-constructs-factories under the Apache-2.0 license. @aws-solutions-constructs/aws-lambda-dynamodb under the Apache-2.0 license. @aws-solutions-constructs/aws-sqs-lambda under the Apache-2.0 license. @aws-solutions-constructs/aws-wafwebacl-apigateway under the Apache-2.0 license. @aws-solutions-constructs/core under the Apache-2.0 license. @aws-solutions-constructs/resources under the Apache-2.0 license. +@aws/lambda-invoke-store under the Apache-2.0 license. @babel/code-frame under the MIT license. @babel/compat-data under the MIT license. @babel/core under the MIT license. @@ -180,6 +185,7 @@ This software includes third party software subject to the following copyrights: @babel/helper-define-polyfill-provider under the MIT license. @babel/helper-environment-visitor under the MIT license. @babel/helper-function-name under the MIT license. +@babel/helper-globals under the MIT license. @babel/helper-hoist-variables under the MIT license. @babel/helper-member-expression-to-functions under the MIT license. @babel/helper-module-imports under the MIT license. @@ -385,11 +391,14 @@ This software includes third party software subject to the following copyrights: @jest/console under the MIT license. @jest/core under the MIT license. @jest/create-cache-key-function under the MIT license. +@jest/diff-sequences under the MIT license. @jest/environment under the MIT license. @jest/expect under the MIT license. @jest/expect-utils under the MIT license. @jest/fake-timers under the MIT license. +@jest/get-type under the MIT license. @jest/globals under the MIT license. +@jest/pattern under the MIT license. @jest/reporters under the MIT license. @jest/schemas under the MIT license. @jest/source-map under the MIT license. @@ -398,6 +407,7 @@ This software includes third party software subject to the following copyrights: @jest/transform under the MIT license. @jest/types under the MIT license. @jridgewell/gen-mapping under the MIT license. +@jridgewell/remapping under the MIT license. @jridgewell/resolve-uri under the MIT license. @jridgewell/set-array under the MIT license. @jridgewell/source-map under the MIT license. @@ -573,6 +583,8 @@ This software includes third party software subject to the following copyrights: @smithy/util-uri-escape under the Apache-2.0 license. @smithy/util-utf8 under the Apache-2.0 license. @smithy/util-waiter under the Apache-2.0 license. +@smithy/uuid under the Apache-2.0 license. +@standard-schema/spec under the MIT license. @swc/core under the Apache-2.0 license. @swc/core-darwin-arm64 under the Apache-2.0 AND MIT licenses. @swc/core-darwin-x64 under the Apache-2.0 AND MIT licenses. @@ -601,6 +613,7 @@ This software includes third party software subject to the following copyrights: @testing-library/react under the MIT license. @testing-library/user-event under MIT license. @testing-library/user-event under the MIT license. +@tokenizer/token under the MIT license. @tsconfig/node10 under the MIT license. @tsconfig/node12 under the MIT license. @tsconfig/node14 under the MIT license. @@ -614,10 +627,12 @@ This software includes third party software subject to the following copyrights: @types/babel__generator under the MIT license. @types/babel__template under the MIT license. @types/babel__traverse under the MIT license. +@types/chai under the MIT license. @types/cls-hooked under the MIT license. @types/cookie under the MIT license. @types/cypress under the MIT license. @types/debug under the MIT license. +@types/deep-eql under the MIT license. @types/eslint under the MIT license. @types/eslint-scope under the MIT license. @types/estree under the MIT license. @@ -661,7 +676,9 @@ This software includes third party software subject to the following copyrights: @types/yauzl under the MIT license. @typescript-eslint/eslint-plugin under the MIT license. @typescript-eslint/parser under the BSD-2-Clause license. +@typescript-eslint/project-service under the MIT license. @typescript-eslint/scope-manager under the MIT license. +@typescript-eslint/tsconfig-utils under the MIT license. @typescript-eslint/type-utils under the MIT license. @typescript-eslint/types under the MIT license. @typescript-eslint/typescript-estree under the BSD-2-Clause license. @@ -748,6 +765,7 @@ asn1 under the MIT license. assert-plus under the MIT license. assertion-error under the MIT license. ast-types under the MIT license. +ast-v8-to-istanbul under the MIT license. astral-regex under the MIT license. async under the MIT license. async-function under the MIT license. @@ -763,6 +781,7 @@ aws-cdk under the Apache-2.0 license. aws-cdk-lib under the Apache-2.0 license. aws-jwt-verify under the Apache-2.0 license. aws-lambda-powertools under the MIT license. +aws-opentelemetry-distro under the Apache-2.0 license. aws-sdk under the Apache-2.0 license. aws-sdk-client-mock under the MIT license. aws-sdk-client-mock-jest under the MIT license. @@ -785,6 +804,7 @@ bail under the MIT license. balanced-match under the MIT license. base-64 under the MIT license. base64-js under the MIT license. +baseline-browser-mapping under the Apache-2.0 license. bcrypt-pbkdf under the BSD-3-Clause license. big.js under the MIT license. binary-extensions under the MIT license. @@ -1040,6 +1060,7 @@ fflate under the MIT license. figures under the MIT license. file-entry-cache under the MIT license. file-loader under the MIT license. +file-type under the MIT license. filelist under the Apache-2.0 license. fill-range under the MIT license. finalhandler under the MIT license. @@ -1093,6 +1114,7 @@ graphemer under the MIT license. graphql under the MIT license. greenlet under the MIT license. h11 under the MIT license. +handlebars under the MIT license. harmony-reflect under the Apache-2.0 license. has under the MIT license. has-bigints under the MIT license. @@ -1482,6 +1504,7 @@ path-to-regexp under the MIT license. path-type under the MIT license. pathe under the MIT license. pathval under the MIT license. +peek-readable under the MIT license. pend under the MIT license. performance-now under the MIT license. picocolors under the ISC license. @@ -1526,9 +1549,10 @@ pyasn1 under the BSD-2-Clause license. pycparser under the 0BSD license. pydantic under the MIT license. pydantic-core under the MIT license. -pyjwt under the MIT license. +PyJWT under the MIT license. PyJWT under the MIT license. pytest under the MIT license. +pytest-asyncio under the Apache-2.0 license. pytest-cov under the MIT license. pytest-env under the MIT license. python-dateutil under the Dual License license(s). @@ -1575,6 +1599,7 @@ react-use-websocket under MIT license. react-use-websocket under the MIT license. read-cache under the MIT license. readable-stream under the MIT license. +readable-web-to-node-stream under the MIT license. readdirp under the MIT license. readline under the BSD license. recast under the MIT license. @@ -1685,13 +1710,15 @@ stackframe under the MIT license. stacktrace-parser under the MIT license. statuses under the MIT license. std-env under the MIT license. +strands-agents under the Apache-2.0 License. +strands-agents-tools under the Apache-2.0 License. strict-event-emitter under the MIT license. -string_decoder under the MIT license. string-length under the MIT license. string-width under the MIT license. string.prototype.trim under the MIT license. string.prototype.trimend under the MIT license. string.prototype.trimstart under the MIT license. +string_decoder under the MIT license. stringify-entities under the MIT license. strip-ansi under the MIT license. strip-bom under the MIT license. @@ -1700,6 +1727,7 @@ strip-indent under the MIT license. strip-json-comments under the MIT license. strip-literal under the MIT license. strnum under the MIT license. +strtok3 under the MIT license. style-dictionary under the Apache-2.0 license. style-to-object under the MIT license. sucrase under the MIT license. @@ -1737,6 +1765,7 @@ tmpl under the BSD-3-Clause license. to-fast-properties under the MIT license. to-regex-range under the MIT license. toidentifier under the MIT license. +token-types under the MIT license. tough-cookie under the BSD-3-Clause license. tr46 under the MIT license. traverse under the MIT license. @@ -1765,6 +1794,7 @@ typescript under the Apache-2.0 license. typing-extensions under the PSF-2.0 license. typing-inspection under the MIT license. ufo under the MIT license. +uglify-js under the BSD-2-Clause license. ulid under the MIT license. unbox-primitive under the MIT license. uncontrollable under the MIT license. @@ -1839,6 +1869,7 @@ which-typed-array under the MIT license. why-is-node-running under the MIT license. wide-align under the ISC license. word-wrap under the MIT license. +wordwrap under the MIT license. wrap-ansi under the MIT license. wrappy under the ISC license. wrapt under the BSD-2-Clause license. diff --git a/README.md b/README.md index 69e7260f..bfe65cae 100644 --- a/README.md +++ b/README.md @@ -381,9 +381,9 @@ Follow the standard React development workflow to make changes to the code, and When you're ready to deploy your customized UI projects, follow the instructions in the main README file for building and deploying the solution using the AWS CDK. -## Anonymized data collection +## Data collection -This solution collects anonymized operational metrics to help AWS improve the quality and features of the solution. For more information, including how to disable this capability, please see the [implementation guide](https://docs.aws.amazon.com/solutions/latest/generative-ai-application-builder-on-aws/anonymized-data-collection.html). +This solution sends operational metrics to AWS (the “Data”) about the use of this solution. We use this Data to better understand how customers use this solution and related services and products. AWS’s collection of this Data is subject to the [AWS Privacy Notice](https://aws.amazon.com/privacy/). --- diff --git a/deployment/build-s3-dist.sh b/deployment/build-s3-dist.sh index ba5d6a45..90d83964 100755 --- a/deployment/build-s3-dist.sh +++ b/deployment/build-s3-dist.sh @@ -60,6 +60,96 @@ echo "-------------------------------------------------------------------------- cd $template_dir/cdk-solution-helper npm ci --omit=dev +echo "------------------------------------------------------------------------------" +echo "[Prep] Copying gaab-strands-common to agent source directories" +echo "------------------------------------------------------------------------------" + +copy_gaab_strands_common() { + local source_common="$template_dir/../deployment/ecr/gaab-strands-common" + local ecr_dir="$template_dir/../deployment/ecr" + + # Validate source exists + if [ ! -d "$source_common" ]; then + echo "ERROR: gaab-strands-common source directory not found at $source_common" + exit 1 + fi + + echo "Source: $source_common" + + # Known agent directories + local agent_dirs=("gaab-strands-agent" "gaab-strands-workflow-agent") + local copied_count=0 + + # Copy to each agent directory + for agent in "${agent_dirs[@]}"; do + local agent_dir="$ecr_dir/$agent" + + if [ ! -d "$agent_dir" ]; then + echo "WARNING: Agent directory not found: $agent_dir" + continue + fi + + echo "Processing $agent..." + local dest="$agent_dir/gaab-strands-common" + + # Remove existing copy if present + if [ -d "$dest" ]; then + echo " Removing existing gaab-strands-common" + rm -rf "$dest" + fi + + # Copy with exclusions using rsync + echo " Copying gaab-strands-common..." + rsync -a \ + --exclude='.venv' \ + --exclude='__pycache__' \ + --exclude='.pytest_cache' \ + --exclude='htmlcov' \ + --exclude='.coverage' \ + --exclude='*.pyc' \ + --exclude='*.pyo' \ + --exclude='.git' \ + "$source_common/" "$dest/" + + if [ $? -eq 0 ]; then + echo " ✓ Successfully copied to $agent" + copied_count=$((copied_count + 1)) + else + echo "ERROR: Failed to copy gaab-strands-common to $dest" + exit 1 + fi + done + + if [ $copied_count -eq 0 ]; then + echo "ERROR: No agent directories found or all copies failed" + exit 1 + else + echo "Successfully copied gaab-strands-common to $copied_count agent directory(ies)" + fi + + # Delete source directory in CI/CD pipeline only + # DIST_OUTPUT_BUCKET is only set in the CI/CD pipeline + # This prevents CodeBuild Stage 2 from scanning gaab-strands-common (which has no Dockerfile) + if [ -n "$DIST_OUTPUT_BUCKET" ]; then + echo "CI/CD pipeline detected (DIST_OUTPUT_BUCKET is set)" + echo "Deleting source gaab-strands-common directory..." + + if rm -rf "$source_common"; then + echo "✓ Successfully deleted $source_common" + else + echo "ERROR: Failed to delete source gaab-strands-common directory at $source_common" + echo "This is required in CI/CD to prevent CodeBuild from scanning directories without Dockerfiles" + exit 1 + fi + else + echo "Local build detected (DIST_OUTPUT_BUCKET not set)" + echo "Keeping source gaab-strands-common directory for local development" + fi +} + +# Execute the copy function +copy_gaab_strands_common + echo "------------------------------------------------------------------------------" echo "[Synth] CDK Project" echo "------------------------------------------------------------------------------" diff --git a/deployment/ecr/.dockerignore b/deployment/ecr/.dockerignore new file mode 100644 index 00000000..d8630d82 --- /dev/null +++ b/deployment/ecr/.dockerignore @@ -0,0 +1,45 @@ +# Docker ignore file for gaab-strands-agent build +# Build context is deployment/ecr/ + +# Ignore Python cache and compiled files +__pycache__ +*.pyc +*.pyo +*.pyd +.Python +*.so +*.egg +*.egg-info +dist +build + +# Ignore test artifacts +.pytest_cache +.coverage +.tox + +# Ignore version control +.git +.gitignore + +# Ignore test directories +test +tests + +# Ignore IDE files +.vscode +.idea +*.swp +*.swo +*~ + +# Ignore CI/CD +.github +.gitlab-ci.yml +Jenkinsfile + +# Ignore docs +docs + +# Ignore MCP directory +mcp \ No newline at end of file diff --git a/deployment/ecr/gaab-strands-agent/.dockerignore b/deployment/ecr/gaab-strands-agent/.dockerignore new file mode 100644 index 00000000..d39378e1 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/.dockerignore @@ -0,0 +1,23 @@ +# Ignore virtual environments +.venv +venv +__pycache__ +*.pyc +*.pyo +*.pyd + +# Ignore test artifacts +.pytest_cache +.coverage +htmlcov + +# Ignore IDE files +.vscode +.idea +*.swp + +# Ignore gaab-strands-common's venv and test artifacts +gaab-strands-common/.venv +gaab-strands-common/.pytest_cache +gaab-strands-common/htmlcov +gaab-strands-common/__pycache__ diff --git a/deployment/ecr/gaab-strands-agent/Dockerfile b/deployment/ecr/gaab-strands-agent/Dockerfile new file mode 100644 index 00000000..07b8af6d --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/Dockerfile @@ -0,0 +1,66 @@ +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/python:3.13-slim + +# Install system dependencies with security updates +RUN apt-get update && apt-get upgrade -y \ + && apt-get install -y --no-install-recommends \ + curl \ + openssl \ + && rm -rf /var/lib/apt/lists/* \ + && apt-get clean + +WORKDIR /app + +# Install pip and UV for dependency management +RUN pip install --no-cache-dir --upgrade "pip>=25.3" \ + && pip install --no-cache-dir "uv>=0.5.0" + +# Debug: Show build context structure +RUN echo "=== Build context contents ===" && ls -la . 2>/dev/null || true + +# Copy shared library - must be present in build context +COPY gaab-strands-common /tmp/gaab-strands-common + +# Install shared library using UV +RUN uv pip install --system -e /tmp/gaab-strands-common + +# Debug: Verify shared library was copied +RUN echo "=== Verifying gaab-strands-common ===" && ls -la /tmp/gaab-strands-common + +# Copy dependency files and README from current directory +COPY pyproject.toml uv.lock README.md ./ + +# Install dependencies from pyproject.toml (excluding gaab-strands-common which is already installed) +# Extract non-local dependencies and install them +RUN python3 -c "import tomllib; \ + data = tomllib.load(open('pyproject.toml', 'rb')); \ + deps = [d for d in data['project']['dependencies'] if 'gaab-strands-common' not in d]; \ + print('\n'.join(deps))" > /tmp/deps.txt && \ + uv pip install --system -r /tmp/deps.txt + +# Debug: Show current directory before copying src +RUN echo "=== Current directory contents ===" && ls -la . + +# Copy source code from current directory +COPY src/ ./src/ + +# Install the package itself (no dependencies, they're already installed) +RUN uv pip install --system --no-deps -e . + +# Set Python path +ENV PYTHONPATH=/app/src + +# Create non-root user for security +RUN groupadd -r appuser && useradd -r -g appuser appuser \ + && chown -R appuser:appuser /app + +USER appuser + +# Expose port for AgentCore Runtime +EXPOSE 8080 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \ + CMD python -c "import sys; sys.exit(0)" || exit 1 + +# Run the application with OpenTelemetry instrumentation +CMD ["opentelemetry-instrument", "python", "src/main.py"] diff --git a/deployment/ecr/gaab-strands-agent/README.md b/deployment/ecr/gaab-strands-agent/README.md new file mode 100644 index 00000000..471bc026 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/README.md @@ -0,0 +1,165 @@ +# Configurable Strands Agent + +Configurable AI agent built with Strands framework for AWS Bedrock Agent Core Runtime. + +## Quick Start + +```bash +# Build container +./scripts/build-container.sh + +# Deploy to ECR +AWS_ACCOUNT_ID=123456789012 ./scripts/deploy-ecr.sh + +# Run tests +./scripts/run_unit_tests.sh +``` + +## Configuration + +### Environment Variables + +- `AGENT_CONFIG_TABLE` (optional): DynamoDB table name +- `AGENT_CONFIG_KEY` (optional): Configuration key +- `AWS_REGION` (default: us-east-1) +- `LOG_LEVEL` (default: INFO) + +### AgentCore Runtime Payload + +```json +{ + "input": "Your message here" +} +``` + +## Project Structure + +``` +src/ +├── main.py # AgentCore entrypoint +├── configurable_agent.py # Main agent class +├── ddb_helper.py # DynamoDB operations +├── models.py # Data models +└── tools_manager.py # Tool loading and management +test/ # Unit tests +scripts/ # Build/deploy scripts +``` + +## Build Commands + +### Basic Build and Deploy + +```bash +# Build with default settings +./scripts/build-container.sh + +# Deploy to your AWS account +./scripts/deploy-ecr.sh +``` + +### Custom Build Options + +```bash +# Build with custom tag +TAG=v1.0.0 ./scripts/build-container.sh + +# Build for specific platform (e.g., ARM64) +PLATFORM=linux/arm64 ./scripts/build-container.sh + +# Build without cache +NO_CACHE=true ./scripts/build-container.sh + +# Deploy to custom repository +ECR_REPOSITORY=my-custom-repo ./scripts/deploy-ecr.sh +``` + +### Testing + +```bash +# Run tests with coverage (requires UV) +./scripts/run_unit_tests.sh + +# Run tests with HTML coverage report +./scripts/run_unit_tests.sh --coverage-html + +# Run tests without coverage +./scripts/run_unit_tests.sh --no-coverage + +# Run tests directly with UV (after uv sync and editable install) +uv run pytest + +# Run tests with coverage using UV +uv run pytest --cov=src --cov-report=html + +# Or use the all-in-one script (recommended) +./scripts/run_unit_tests.sh +``` + +## Development + +### Prerequisites + +- Python 3.13+ +- UV package manager (required) + +### Installing UV + +```bash +# Using pip (recommended for corporate environments) +pip install uv>=0.5.0 + +# Using pipx (isolated installation) +pipx install uv>=0.5.0 + +# Using Homebrew (macOS) +brew install uv + +# For more installation options, visit: +# https://docs.astral.sh/uv/getting-started/installation/ +``` + +### Setup Development Environment + +**Option 1: Use the test script (recommended, handles everything automatically)** + +```bash +./scripts/run_unit_tests.sh +``` + +**Option 2: Manual setup with UV** + +```bash +# Sync dependencies (creates .venv and installs everything from uv.lock) +uv sync + +# Install the package in editable mode (required for tests to find modules) +uv pip install -e ".[dev,test]" + +# Run tests +uv run pytest +``` + +**Note:** +- The `uv sync` command automatically installs `gaab-strands-common` from the local path as configured in `pyproject.toml`. +- The editable install (`uv pip install -e`) is required for pytest to properly import modules from the `src/` directory. +- Use `./scripts/run_unit_tests.sh` for the most reliable test execution as it handles all environment setup automatically. + +## Key Environment Variables + +### Build Configuration + +- `IMAGE_TAG` - Custom image tag (default: latest) +- `BUILD_ARGS` - Additional Docker build arguments +- `PLATFORM` - Target platform (linux/arm64, linux/amd64) + +### ECR Configuration + +- `AWS_REGION` - AWS region (default: us-east-1) +- `AWS_ACCOUNT_ID` - AWS account ID (auto-detected if not set) +- `ECR_REPOSITORY` - ECR repository name (default: gaab-strands-agent) + +### CI/CD Integration + +- `VERSION` - Image version tag for CI/CD pipelines +- `PUBLIC_ECR_REGISTRY` - Custom ECR registry URL +- `PUBLIC_ECR_TAG` - Pipeline tag override diff --git a/deployment/ecr/gaab-strands-agent/pyproject.toml b/deployment/ecr/gaab-strands-agent/pyproject.toml new file mode 100644 index 00000000..a5580f81 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/pyproject.toml @@ -0,0 +1,96 @@ +[project] +name = "gaab-strands-agent" +version = "4.0.0" +description = "GAAB Strands Agent Runtime for Amazon Bedrock AgentCore" +readme = "README.md" +requires-python = ">=3.13" +license = { text = "Apache-2.0" } +authors = [{ name = "Amazon Web Services" }] +classifiers = [ + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.13", +] + +dependencies = [ + "setuptools>=70.0.0", + "pip>=25.0", + "wheel>=0.42.0", + + # AWS SDK + "boto3>=1.35.0", + + # Strands SDK with OpenTelemetry support + "strands-agents[otel]>=1.10.0", + "strands-agents-tools>=0.2.9", + + # bedrock-agentcore + "bedrock-agentcore>=0.1.5", + + # OpenTelemetry for observability + "aws-opentelemetry-distro>=0.12.1", + + # Pydantic for data validation and parsing + "pydantic>=2.0.0", + + # Shared library + "gaab-strands-common", +] + +[tool.uv.sources] +gaab-strands-common = { path = "../gaab-strands-common", editable = true } + +[project.optional-dependencies] +dev = [ + "pytest>=8.0.0", + "pytest-cov>=5.0.0", + "black>=24.0.0", + "isort>=5.12.0", + "mypy>=1.8.0", +] + +test = [ + "pytest>=8.0.0", + "pytest-cov>=5.0.0", + "moto>=5.0.0", + "pytest-mock>=3.12.0", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src"] + +[tool.uv] +dev-dependencies = [ + "pytest>=8.0.0", + "pytest-cov>=5.0.0", + "black>=24.0.0", + "isort>=5.12.0", + "mypy>=1.8.0", + "moto>=5.0.0", + "pytest-mock>=3.12.0", +] + +[tool.black] +line-length = 100 +target-version = ['py313'] + +[tool.isort] +profile = "black" +line_length = 100 + +[tool.mypy] +python_version = "3.13" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = true + +[tool.pytest.ini_options] +testpaths = ["test"] +pythonpath = ["."] +python_files = ["test_*.py", "*_test.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] +addopts = "--import-mode=importlib" diff --git a/deployment/ecr/gaab-strands-agent/scripts/build-container.sh b/deployment/ecr/gaab-strands-agent/scripts/build-container.sh new file mode 100755 index 00000000..f626a3da --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/scripts/build-container.sh @@ -0,0 +1,286 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/bin/bash + +set -e # Exit on any error +set -u # Exit on undefined variables + +# Enable debug mode if DEBUG environment variable is set +if [ "${DEBUG:-}" = "true" ]; then + set -x +fi + +echo "=== Building Configurable Strands Agent Container ===" + +# Navigate to agent directory (parent of scripts/) +cd "$(dirname "$0")/.." + +# Logging functions for consistent output +log_info() { + echo "ℹ️ $1" +} + +log_success() { + echo "✅ $1" +} + +log_error() { + echo "❌ $1" >&2 +} + +log_warning() { + echo "⚠️ $1" +} + +# Enhanced configuration with environment variable support +IMAGE_NAME="${IMAGE_NAME:-gaab-strands-agent}" +TAG="${TAG:-latest}" + +# Build options configuration +BUILD_ARGS="${BUILD_ARGS:-}" +NO_CACHE="${NO_CACHE:-false}" +PLATFORM="${PLATFORM:-}" + +# Validation functions +validate_docker() { + log_info "Validating Docker environment..." + + # Check if Docker is available + if ! command -v docker &> /dev/null; then + log_error "Docker is not installed or not in PATH" + log_error "Please install Docker and ensure it's running" + exit 1 + fi + + # Check if Docker daemon is running + if ! docker info &> /dev/null; then + log_error "Docker daemon is not running" + log_error "Please start Docker and try again" + exit 1 + fi + + # Check Docker version for compatibility + local docker_version + docker_version=$(docker version --format '{{.Server.Version}}' 2>/dev/null || echo "unknown") + log_info "Docker version: $docker_version" + + log_success "Docker environment validated" +} + +validate_build_context() { + log_info "Validating build context..." + + # Check if Dockerfile exists + if [ ! -f "Dockerfile" ]; then + log_error "Dockerfile not found in current directory" + log_error "Current directory: $(pwd)" + log_error "Please ensure you're in the correct directory" + exit 1 + fi + + # Check if pyproject.toml exists for UV workflow + if [ ! -f "pyproject.toml" ]; then + log_warning "pyproject.toml not found - UV workflow may not be available" + fi + + # Check if required source files exist + if [ ! -d "src" ]; then + log_warning "Source directory 'src' not found - this may be expected" + fi + + # Verify gaab-strands-common package exists (required dependency) + if [ ! -d "../gaab-strands-common" ]; then + log_error "gaab-strands-common package not found at ../gaab-strands-common" + log_error "The shared library is required for building this agent" + log_error "Expected structure:" + log_error " deployment/ecr/" + log_error " ├── gaab-strands-agent/ (current)" + log_error " └── gaab-strands-common/ (required)" + exit 1 + fi + + log_success "Build context validated (including gaab-strands-common)" +} + +# UV detection - check if UV is available in the environment +check_uv_available() { + log_info "Checking for UV availability..." + + if ! command -v uv &> /dev/null; then + log_error "UV is not installed or not in PATH" + log_error "" + log_error "UV is required for building this container." + log_error "" + log_error "Please install UV using one of these methods:" + log_error "" + log_error " 1. Using pip (recommended for corporate environments):" + log_error " pip install uv>=0.5.0" + log_error "" + log_error " 2. Using pipx (isolated installation):" + log_error " pipx install uv>=0.5.0" + log_error "" + log_error " 3. Using your system package manager:" + log_error " - macOS: brew install uv" + log_error " - Linux: Check your distribution's package manager" + log_error "" + log_error " 4. For more installation options, visit:" + log_error " https://docs.astral.sh/uv/getting-started/installation/" + log_error "" + log_error "After installation, ensure UV is in your PATH and try again." + exit 1 + fi + + # Verify UV is functional + local uv_version + uv_version=$(uv --version 2>/dev/null | cut -d' ' -f2 || echo "unknown") + + if [ "$uv_version" = "unknown" ]; then + log_error "UV found but version could not be determined" + log_error "UV may not be properly installed or configured" + exit 1 + fi + + log_success "UV detected (version: $uv_version)" +} + + + +# Enhanced build function with better error handling +build_docker_image() { + log_info "Starting Docker image build..." + + log_info "Configuration:" + echo " 📦 Image Name: $IMAGE_NAME" + echo " 🏷️ Tag: $TAG" + echo " 📁 Build Context: $(pwd)" + echo " 🔧 Package Manager: UV" + echo " 📚 Shared Library: gaab-strands-common (../gaab-strands-common)" + + if [ -n "$PLATFORM" ]; then + echo " 🏗️ Platform: $PLATFORM" + fi + + if [ "$NO_CACHE" = "true" ]; then + echo " 🚫 Cache: Disabled" + fi + + if [ -n "$BUILD_ARGS" ]; then + echo " ⚙️ Build Args: $BUILD_ARGS" + fi + + echo "" + + # Construct build command + local build_cmd="docker build" + + # Add no-cache flag if requested + if [ "$NO_CACHE" = "true" ]; then + build_cmd="$build_cmd --no-cache" + fi + + # Add platform if specified + if [ -n "$PLATFORM" ]; then + build_cmd="$build_cmd --platform $PLATFORM" + fi + + # Add build args if specified + if [ -n "$BUILD_ARGS" ]; then + build_cmd="$build_cmd $BUILD_ARGS" + fi + + # Add tag and context (build from current directory) + build_cmd="$build_cmd -t $IMAGE_NAME:$TAG ." + + log_info "Build command: $build_cmd" + log_info "Tests will run during build process..." + echo "" + + # Execute build with error handling + if eval "$build_cmd"; then + log_success "Docker image built successfully!" + log_success "Tests passed during build!" + + # Display image information + echo "" + log_info "Image Details:" + docker images --format "table {{.Repository}}:{{.Tag}}\t{{.Size}}\t{{.CreatedAt}}" | head -1 + docker images --format "table {{.Repository}}:{{.Tag}}\t{{.Size}}\t{{.CreatedAt}}" | grep "^$IMAGE_NAME:$TAG" + + # Get image ID and size + local image_id + local image_size + image_id=$(docker images --format "{{.ID}}" "$IMAGE_NAME:$TAG" | head -1) + image_size=$(docker images --format "{{.Size}}" "$IMAGE_NAME:$TAG" | head -1) + + echo "" + log_info "Build Summary:" + echo " 🆔 Image ID: $image_id" + echo " 📏 Image Size: $image_size" + echo " 🏷️ Full Tag: $IMAGE_NAME:$TAG" + + else + log_error "Docker build failed!" + log_error "" + log_error "Common causes of build failures:" + log_error " - Missing dependencies in Dockerfile" + log_error " - Test failures during build" + log_error " - Network connectivity issues" + log_error " - Insufficient disk space" + log_error " - Invalid Dockerfile syntax" + log_error " - UV/pip dependency resolution conflicts" + log_error "" + log_error "Troubleshooting steps:" + log_error " 1. Check Docker logs above for specific errors" + log_error " 2. Verify Dockerfile syntax" + log_error " 3. Ensure all required files are present" + log_error " 4. Check available disk space: df -h" + log_error " 5. Try building with --no-cache: NO_CACHE=true ./scripts/build-container.sh" + log_error " 6. Try fallback mode: USE_UV=false ./scripts/build-container.sh" + exit 1 + fi +} + +# Display usage information +display_usage() { + echo "" + log_info "Environment Variables:" + echo " IMAGE_NAME - Docker image name (default: gaab-strands-agent)" + echo " TAG - Docker image tag (default: latest)" + echo " BUILD_ARGS - Additional build arguments" + echo " NO_CACHE - Disable build cache (true/false, default: false)" + echo " PLATFORM - Target platform (e.g., linux/amd64, linux/arm64)" + echo " DEBUG - Enable debug output (true/false, default: false)" + echo "" + log_info "Prerequisites:" + echo " - gaab-strands-common package must exist at ../gaab-strands-common" + echo " - UV must be installed (pip install uv>=0.5.0)" + echo "" + log_info "Examples:" + echo " # Basic UV build" + echo " ./scripts/build-container.sh" + echo "" + echo " # Build with custom tag" + echo " TAG=v1.0.0 ./scripts/build-container.sh" + echo "" + echo " # Build without cache" + echo " NO_CACHE=true ./scripts/build-container.sh" + echo "" + echo " # Build for specific platform (AgentCore ARM64)" + echo " PLATFORM=linux/arm64 ./scripts/build-container.sh" + echo "" + echo " # Debug mode with verbose output" + echo " DEBUG=true ./scripts/build-container.sh" + echo "" +} + +# Main execution +main() { + validate_docker + validate_build_context + check_uv_available + build_docker_image +} + +# Run main function +main \ No newline at end of file diff --git a/deployment/ecr/gaab-strands-agent/scripts/deploy-ecr.sh b/deployment/ecr/gaab-strands-agent/scripts/deploy-ecr.sh new file mode 100755 index 00000000..21dd38b1 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/scripts/deploy-ecr.sh @@ -0,0 +1,388 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/bin/bash + +set -e # Exit on any error +set -u # Exit on undefined variables + +# Enable debug mode if DEBUG environment variable is set +if [ "${DEBUG:-}" = "true" ]; then + set -x +fi + +echo "=== Deploying Configurable Strands Agent to ECR ===" + +# Navigate to agent directory (parent of scripts/) +cd "$(dirname "$0")/.." + +# Verify we're in the correct directory +if [ ! -f "Dockerfile" ]; then + log_error "Dockerfile not found. Current directory: $(pwd)" + log_error "This script must be run from deployment/ecr/gaab-strands-agent/scripts/" + exit 1 +fi + +# Logging function for consistent output +log_info() { + echo "ℹ️ $1" +} + +log_success() { + echo "✅ $1" +} + +log_error() { + echo "❌ $1" >&2 +} + +log_warning() { + echo "⚠️ $1" +} + +# Validation function for required environment variables +validate_required_vars() { + local missing_vars=() + + # Check for required variables based on context + if [ -z "${AWS_REGION:-}" ] && [ -z "${AWS_DEFAULT_REGION:-}" ]; then + missing_vars+=("AWS_REGION or AWS_DEFAULT_REGION") + fi + + if [ ${#missing_vars[@]} -gt 0 ]; then + log_error "Missing required environment variables:" + for var in "${missing_vars[@]}"; do + log_error " - $var" + done + log_error "Please set the required variables and try again." + exit 1 + fi +} + +# Enhanced configuration with environment variable support +# Core AWS configuration +AWS_REGION="${AWS_REGION:-${AWS_DEFAULT_REGION:-us-east-1}}" + +# ECR repository configuration with enhanced customization +ECR_REPOSITORY="${ECR_REPOSITORY:-gaab-strands-agent}" +IMAGE_NAME="${IMAGE_NAME:-gaab-strands-agent}" + +# Image tag resolution with CI/CD support +if [ -n "${VERSION:-}" ]; then + # Use VERSION environment variable (CI/CD context) + # Remove double 'v' prefix if present (e.g., vv4.0.0 -> v4.0.0) + RESOLVED_VERSION=$(echo "$VERSION" | sed 's/^vv/v/') + IMAGE_TAG="${IMAGE_TAG:-$RESOLVED_VERSION}" +elif [ -n "${PUBLIC_ECR_TAG:-}" ]; then + # Use PUBLIC_ECR_TAG for CI/CD pipeline overrides + IMAGE_TAG="${IMAGE_TAG:-$PUBLIC_ECR_TAG}" +else + # Default to latest for local development + IMAGE_TAG="${IMAGE_TAG:-latest}" +fi + +# Registry configuration with CI/CD override support +if [ -n "${PUBLIC_ECR_REGISTRY:-}" ]; then + # CI/CD context with custom registry + ECR_REGISTRY_URL="$PUBLIC_ECR_REGISTRY" + log_info "Using custom ECR registry from PUBLIC_ECR_REGISTRY: $ECR_REGISTRY_URL" +else + # Local development or standard AWS ECR + ECR_REGISTRY_URL="" # Will be constructed with AWS account ID +fi + +# Validate required variables +validate_required_vars + +log_info "Starting ECR deployment process..." +log_info "Configuration validation passed" + +# Enhanced AWS Account ID resolution with better error handling +resolve_aws_account_id() { + if [ -n "${AWS_ACCOUNT_ID:-}" ]; then + log_info "Using provided AWS Account ID: $AWS_ACCOUNT_ID" + return 0 + fi + + log_info "AWS_ACCOUNT_ID not provided, auto-detecting from current AWS credentials..." + + # Try to get account ID with timeout and better error handling + if ! AWS_ACCOUNT_ID=$(timeout 30 aws sts get-caller-identity --query Account --output text 2>/dev/null); then + log_error "Failed to auto-detect AWS Account ID" + log_error "This could be due to:" + log_error " - AWS credentials not configured" + log_error " - Network connectivity issues" + log_error " - Insufficient permissions" + log_error "" + log_error "Solutions:" + log_error " 1. Configure AWS credentials: aws configure" + log_error " 2. Set AWS_ACCOUNT_ID manually: export AWS_ACCOUNT_ID=123456789012" + log_error " 3. Check network connectivity to AWS" + exit 1 + fi + + if [ -z "$AWS_ACCOUNT_ID" ]; then + log_error "AWS Account ID is empty after auto-detection" + exit 1 + fi + + log_success "Auto-detected AWS Account ID: $AWS_ACCOUNT_ID" +} + +# Enhanced ECR URI construction with registry override support +construct_ecr_uri() { + if [ -n "${ECR_REGISTRY_URL:-}" ]; then + # Custom registry (CI/CD context) + ECR_URI="$ECR_REGISTRY_URL/$ECR_REPOSITORY" + else + # Standard AWS ECR - use ECR_REPOSITORY as the full repository name + ECR_URI="$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$ECR_REPOSITORY" + fi +} + +# Resolve AWS Account ID +resolve_aws_account_id + +# Construct ECR URI +construct_ecr_uri + +# Enhanced configuration display +display_configuration() { + log_info "Deployment Configuration:" + echo " AWS Region: $AWS_REGION" + echo " AWS Account: $AWS_ACCOUNT_ID" + echo " ECR Repository: $ECR_REPOSITORY" + echo " Image Name: $IMAGE_NAME" + echo " Image Tag: $IMAGE_TAG" + echo " ECR URI: $ECR_URI:$IMAGE_TAG" + + if [ -n "${ECR_REGISTRY_URL:-}" ]; then + echo " Custom Registry: $ECR_REGISTRY_URL" + fi + + if [ -n "${VERSION:-}" ]; then + echo " Version Source: VERSION environment variable" + elif [ -n "${PUBLIC_ECR_TAG:-}" ]; then + echo " Version Source: PUBLIC_ECR_TAG environment variable" + else + echo " Version Source: Default (latest)" + fi + + echo "" +} + +# Enhanced Docker image validation +validate_docker_image() { + log_info "Validating local Docker image..." + + # Check if Docker is available + if ! command -v docker &> /dev/null; then + log_error "Docker is not installed or not in PATH" + log_error "Please install Docker and ensure it's running" + exit 1 + fi + + # Check if Docker daemon is running + if ! docker info &> /dev/null; then + log_error "Docker daemon is not running" + log_error "Please start Docker and try again" + exit 1 + fi + + # Check if the specific image exists + if ! docker images --format "table {{.Repository}}:{{.Tag}}" | grep -q "^$IMAGE_NAME:latest$"; then + log_error "Local Docker image '$IMAGE_NAME:latest' not found" + log_error "Available images:" + docker images --format "table {{.Repository}}:{{.Tag}}" | head -10 + log_error "" + log_error "Please run './scripts/build-container.sh' first to build the image" + exit 1 + fi + + log_success "Docker image '$IMAGE_NAME:latest' found locally" +} + +# Display configuration +display_configuration + +# Validate Docker image +validate_docker_image + +# Enhanced ECR login with better error handling +ecr_login() { + echo "" + log_info "Step 1: Logging into ECR..." + + local login_registry + if [ -n "${ECR_REGISTRY_URL:-}" ]; then + # Custom registry login + login_registry="$ECR_REGISTRY_URL" + log_info "Logging into custom registry: $login_registry" + else + # Standard AWS ECR login + login_registry="$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com" + log_info "Logging into AWS ECR: $login_registry" + fi + + # Attempt ECR login with timeout and better error handling + if ! timeout 60 aws ecr get-login-password --region "$AWS_REGION" | docker login --username AWS --password-stdin "$login_registry" 2>/dev/null; then + log_error "ECR login failed" + log_error "This could be due to:" + log_error " - Invalid AWS credentials" + log_error " - Insufficient ECR permissions" + log_error " - Network connectivity issues" + log_error " - Invalid region: $AWS_REGION" + log_error "" + log_error "Required permissions:" + log_error " - ecr:GetAuthorizationToken" + log_error " - ecr:BatchCheckLayerAvailability" + log_error " - ecr:GetDownloadUrlForLayer" + log_error " - ecr:BatchGetImage" + exit 1 + fi + + log_success "Successfully logged into ECR" +} + +# Perform ECR login +ecr_login + +# Enhanced ECR repository management +manage_ecr_repository() { + echo "" + log_info "Step 2: Managing ECR repository..." + + local full_repo_name="$ECR_REPOSITORY" + + # Skip repository creation for custom registries (CI/CD context) + if [ -n "${ECR_REGISTRY_URL:-}" ]; then + log_info "Using custom registry - skipping repository creation" + log_info "Repository: $full_repo_name" + return 0 + fi + + log_info "Checking repository: $full_repo_name" + + # Check if repository exists with better error handling + if aws ecr describe-repositories --repository-names "$full_repo_name" --region "$AWS_REGION" &>/dev/null; then + log_success "ECR repository exists: $full_repo_name" + else + log_info "Repository does not exist, creating: $full_repo_name" + + # Create repository with enhanced error handling + if ! aws ecr create-repository \ + --repository-name "$full_repo_name" \ + --region "$AWS_REGION" \ + --image-scanning-configuration scanOnPush=true \ + --encryption-configuration encryptionType=AES256 \ + &>/dev/null; then + + log_error "Failed to create ECR repository: $full_repo_name" + log_error "This could be due to:" + log_error " - Insufficient permissions (ecr:CreateRepository)" + log_error " - Repository name conflicts" + log_error " - Region-specific issues" + exit 1 + fi + + log_success "ECR repository created: $full_repo_name" + log_info "Repository features enabled:" + log_info " - Image scanning on push" + log_info " - AES256 encryption" + fi +} + +# Manage ECR repository +manage_ecr_repository + +# Enhanced image tagging with validation +tag_docker_image() { + echo "" + log_info "Step 3: Tagging Docker image..." + + local source_image="$IMAGE_NAME:latest" + local target_image="$ECR_URI:$IMAGE_TAG" + + log_info "Tagging: $source_image -> $target_image" + + if ! docker tag "$source_image" "$target_image"; then + log_error "Failed to tag Docker image" + log_error "Source: $source_image" + log_error "Target: $target_image" + exit 1 + fi + + log_success "Docker image tagged successfully" +} + +# Enhanced image pushing +push_docker_image() { + echo "" + log_info "Step 4: Pushing image to ECR..." + + local target_image="$ECR_URI:$IMAGE_TAG" + + log_info "Pushing: $target_image" + + # Standard docker push (image already built and tagged) + if ! docker push "$target_image"; then + log_error "Failed to push image to ECR" + log_error "Target: $target_image" + log_error "" + log_error "This could be due to:" + log_error " - Network connectivity issues" + log_error " - ECR repository permissions" + log_error " - Image size limits" + log_error " - Image not properly tagged" + exit 1 + fi + + log_success "Image pushed successfully to ECR" +} + +# Tag and push the image +tag_docker_image +push_docker_image + +# Enhanced deployment summary with usage instructions +deployment_summary() { + echo "" + echo "🎉 ==================================" + log_success "ECR Deployment Completed Successfully!" + echo "🎉 ==================================" + echo "" + + log_info "Deployment Summary:" + echo " 📦 Image URI: $ECR_URI:$IMAGE_TAG" + echo " 🏷️ Image Tag: $IMAGE_TAG" + echo " 🌍 AWS Region: $AWS_REGION" + echo " 🏢 AWS Account: $AWS_ACCOUNT_ID" + echo " 📁 Repository: $ECR_REPOSITORY/$IMAGE_NAME" + + if [ -n "${VERSION:-}" ]; then + echo " 🔖 Version Source: VERSION environment variable ($VERSION)" + elif [ -n "${PUBLIC_ECR_TAG:-}" ]; then + echo " 🔖 Version Source: PUBLIC_ECR_TAG environment variable ($PUBLIC_ECR_TAG)" + fi + + echo "" + log_info "Usage Instructions:" + echo " 🔧 In CDK/CloudFormation:" + echo " Use image URI: $ECR_URI:$IMAGE_TAG" + echo "" + echo " 🚀 In AgentCore Runtime:" + echo " Set container image to: $ECR_URI:$IMAGE_TAG" + echo "" + echo " 📋 For custom deployments:" + echo " export CUSTOM_AGENT_IMAGE_URI=\"$ECR_URI:$IMAGE_TAG\"" + echo "" + + log_info "Next Steps:" + echo " 1. Update your CDK stack parameters with the new image URI" + echo " 2. Deploy your infrastructure: cdk deploy" + echo " 3. Verify the deployment in AWS Console" + echo "" +} + +# Display deployment summary +deployment_summary \ No newline at end of file diff --git a/deployment/ecr/gaab-strands-agent/scripts/run_unit_tests.sh b/deployment/ecr/gaab-strands-agent/scripts/run_unit_tests.sh new file mode 100755 index 00000000..8ee4cfea --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/scripts/run_unit_tests.sh @@ -0,0 +1,401 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/bin/bash +# Script to run unit tests for the Configurable Strands Agent + +set -e # Exit on any error +set -u # Exit on undefined variables + +# Enable debug mode if DEBUG environment variable is set +if [ "${DEBUG:-}" = "true" ]; then + set -x +fi + +# Change to the project root directory (one level up from scripts/) +cd "$(dirname "$0")/.." + +# Logging functions for consistent output +log_info() { + echo "ℹ️ $1" +} + +log_success() { + echo "✅ $1" +} + +log_error() { + echo "❌ $1" >&2 +} + +log_warning() { + echo "⚠️ $1" +} + +# Configuration +PYTHON_CMD="${PYTHON_CMD:-python3}" +TEST_RUNNER="${TEST_RUNNER:-test/run_tests.py}" +COVERAGE="${COVERAGE:-true}" +VENV_DIR="${VENV_DIR:-.venv}" +SKIP_VENV="${SKIP_VENV:-false}" + +# Validation functions +validate_python() { + log_info "Validating Python environment..." + + if ! command -v "$PYTHON_CMD" &> /dev/null; then + log_error "Python command '$PYTHON_CMD' not found" + log_error "Please install Python 3 or set PYTHON_CMD environment variable" + exit 1 + fi + + local python_version + python_version=$($PYTHON_CMD --version 2>&1) + log_info "Using: $python_version" + + # Check Python version (require 3.8+) + local version_check + version_check=$($PYTHON_CMD -c "import sys; print(sys.version_info >= (3, 8))") + if [ "$version_check" != "True" ]; then + log_error "Python 3.8 or higher is required" + exit 1 + fi + + log_success "Python environment validated" +} + +# UV detection +check_uv_available() { + log_info "Checking for UV availability..." + + if ! command -v uv &> /dev/null; then + log_error "UV is not installed or not in PATH" + log_error "" + log_error "UV is required for running tests." + log_error "" + log_error "Please install UV using one of these methods:" + log_error "" + log_error " 1. Using pip (recommended for corporate environments):" + log_error " pip install uv>=0.5.0" + log_error "" + log_error " 2. Using pipx (isolated installation):" + log_error " pipx install uv>=0.5.0" + log_error "" + log_error " 3. Using your system package manager:" + log_error " - macOS: brew install uv" + log_error " - Linux: Check your distribution's package manager" + log_error "" + log_error " 4. For more installation options, visit:" + log_error " https://docs.astral.sh/uv/getting-started/installation/" + log_error "" + log_error "After installation, ensure UV is in your PATH and try again." + exit 1 + fi + + # Verify UV is functional + local uv_version + uv_version=$(uv --version 2>/dev/null | cut -d' ' -f2 || echo "unknown") + + if [ "$uv_version" = "unknown" ]; then + log_error "UV found but version could not be determined" + log_error "UV may not be properly installed or configured" + exit 1 + fi + + log_success "UV detected (version: $uv_version)" +} + +# Virtual environment management with UV +setup_virtual_environment() { + if [ "$SKIP_VENV" = "true" ]; then + log_info "Skipping virtual environment setup (SKIP_VENV=true)" + return 0 + fi + + log_info "Setting up UV-managed virtual environment..." + setup_uv_environment +} + +setup_uv_environment() { + # Check if pyproject.toml exists for UV workflow + if [ ! -f "pyproject.toml" ]; then + log_error "pyproject.toml not found" + log_error "UV requires pyproject.toml for dependency management" + exit 1 + fi + + # Create virtual environment using UV + if [ ! -d "$VENV_DIR" ]; then + log_info "Creating UV virtual environment at $VENV_DIR..." + uv venv "$VENV_DIR" + log_success "UV virtual environment created" + else + log_info "Using existing virtual environment at $VENV_DIR" + fi + + # Activate virtual environment + log_info "Activating virtual environment..." + # shellcheck source=/dev/null + source "$VENV_DIR/bin/activate" + + # Install gaab-strands-common first (local dependency) + local common_lib_path="../gaab-strands-common" + if [ -d "$common_lib_path" ]; then + log_info "Installing gaab-strands-common from local directory..." + uv pip install -e "$common_lib_path" + log_success "gaab-strands-common installed" + else + log_error "gaab-strands-common directory not found at $common_lib_path" + log_error "The shared library is required for running tests" + exit 1 + fi + + # Install dependencies using UV sync (installs both dependencies and dev-dependencies) + log_info "Installing dependencies using UV sync..." + uv sync + + log_success "UV virtual environment setup completed" +} + +cleanup_virtual_environment() { + if [ "$SKIP_VENV" = "true" ]; then + return 0 + fi + + # Only show deactivation message if we actually have an active virtual environment + if [ -n "${VIRTUAL_ENV:-}" ]; then + log_info "Deactivating virtual environment..." + + # Try to deactivate and capture the result + if command -v deactivate >/dev/null 2>&1; then + deactivate 2>/dev/null || { + log_warning "Virtual environment deactivation failed, but continuing..." + } + else + # If deactivate function isn't available, just unset the environment variable + unset VIRTUAL_ENV + log_info "Virtual environment variables cleared" + fi + + # Verify deactivation + if [ -z "${VIRTUAL_ENV:-}" ]; then + log_success "Virtual environment deactivated successfully" + fi + fi +} + +validate_test_environment() { + log_info "Validating test environment..." + + if [ ! -f "$TEST_RUNNER" ]; then + log_error "Test runner not found: $TEST_RUNNER" + log_error "Current directory: $(pwd)" + log_error "Please ensure you're in the correct directory" + exit 1 + fi + + if [ ! -d "test" ]; then + log_error "Test directory not found" + exit 1 + fi + + log_success "Test environment validated" +} + +# Enhanced test execution with better error handling +run_tests() { + log_info "Starting unit test execution..." + log_info "Configuration:" + echo " 🐍 Python Command: $PYTHON_CMD" + echo " 🧪 Test Runner: $TEST_RUNNER" + echo " 📁 Working Directory: $(pwd)" + echo " 🔧 Virtual Environment: $VENV_DIR" + + if [ "$COVERAGE" = "true" ]; then + echo " 📊 Coverage: Enabled" + fi + + if [ "$SKIP_VENV" = "true" ]; then + echo " ⚠️ Virtual Environment: Skipped" + fi + + echo "" + + # Determine which Python command to use + local python_exec + if [ "$SKIP_VENV" = "true" ]; then + python_exec="$PYTHON_CMD" + else + python_exec="python" # Use the activated venv python + fi + + # Run the test runner with enhanced error handling + if [ "$COVERAGE" = "true" ]; then + log_info "Running tests with coverage..." + if ! $python_exec -m coverage run "$TEST_RUNNER"; then + log_error "Unit tests failed!" + exit 1 + fi + + # Generate coverage report + log_info "Generating coverage report..." + $python_exec -m coverage report + + # Generate XML coverage report if requested + if [ "${COVERAGE_XML:-true}" = "true" ]; then + log_info "Generating XML coverage report..." + $python_exec -m coverage xml + log_info "XML coverage report generated at " + fi + + # Generate HTML coverage report if requested + if [ "${COVERAGE_HTML:-false}" = "true" ]; then + log_info "Generating HTML coverage report..." + $python_exec -m coverage html + log_info "HTML coverage report generated in htmlcov/" + fi + + else + log_info "Running tests..." + if ! $python_exec "$TEST_RUNNER"; then + log_error "Unit tests failed!" + log_error "" + log_error "Troubleshooting steps:" + log_error " 1. Check test output above for specific failures" + log_error " 2. Verify all dependencies are installed" + log_error " 3. Check Python version compatibility" + log_error " 4. Run with DEBUG=true for more verbose output" + log_error " 5. Try recreating virtual environment: rm -rf $VENV_DIR" + exit 1 + fi + fi + + log_success "All unit tests passed!" +} + +# Display usage information +display_usage() { + echo "Usage: $0 [options]" + log_info "Environment Variables:" + echo " PYTHON_CMD - Python command to use (default: python3)" + echo " TEST_RUNNER - Test runner script (default: test/run_tests.py)" + echo " COVERAGE - Enable coverage reporting (true/false, default: true)" + echo " COVERAGE_XML - Generate XML coverage report (true/false, default: true)" + echo " COVERAGE_HTML - Generate HTML coverage report (true/false, default: false)" + echo " DEBUG - Enable debug output (true/false, default: false)" + echo " VENV_DIR - Virtual environment directory (default: .venv)" + echo " SKIP_VENV - Skip virtual environment setup (true/false, default: false)" + echo "" + log_info "Command Line Options:" + echo " -h, --help Show this help message" + echo " -c, --coverage Enable coverage reporting (default: enabled)" + echo " --no-coverage Disable coverage reporting" + echo " --coverage-html Enable coverage reporting with HTML output" + echo "" + log_info "Examples:" + echo " # Basic test run with coverage (default behavior)" + echo " ./scripts/run_unit_tests.sh" + echo "" + echo " # Run without coverage report" + echo " ./scripts/run_unit_tests.sh --no-coverage" + echo "" + echo " # Run with coverage and HTML report" + echo " ./scripts/run_unit_tests.sh --coverage-html" + echo "" + echo " # Use environment variables to disable coverage" + echo " COVERAGE=false ./scripts/run_unit_tests.sh" + echo "" + echo " # Use specific Python version" + echo " PYTHON_CMD=python3.11 ./scripts/run_unit_tests.sh" + echo "" + echo " # Skip virtual environment (use system Python)" + echo " SKIP_VENV=true ./scripts/run_unit_tests.sh" + echo "" + echo " # Use custom virtual environment directory" + echo " VENV_DIR=test-env ./scripts/run_unit_tests.sh" + echo "" + echo " # Clean virtual environment and recreate" + echo " rm -rf .venv && ./scripts/run_unit_tests.sh" + echo "" +} + +# Cleanup function for trap +cleanup_on_exit() { + local exit_code=$? + + # Only cleanup if we're not skipping venv and there's an active virtual environment + if [ "$SKIP_VENV" != "true" ] && [ -n "${VIRTUAL_ENV:-}" ]; then + cleanup_virtual_environment + fi + + if [ $exit_code -ne 0 ]; then + log_error "Script exited with error code $exit_code" + fi + + exit $exit_code +} + +# Set trap for cleanup +trap cleanup_on_exit EXIT INT TERM + +# Parse command line arguments +parse_arguments() { + while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + display_usage + exit 0 + ;; + -c|--coverage) + COVERAGE="true" + log_info "Coverage reporting enabled via command line" + ;; + --no-coverage) + COVERAGE="false" + log_info "Coverage reporting disabled via command line" + ;; + --coverage-xml) + COVERAGE="true" + COVERAGE_XML="true" + log_info "Coverage reporting with XML output enabled via command line" + ;; + --no-coverage-xml) + COVERAGE_XML="false" + log_info "XML coverage reporting disabled via command line" + ;; + --coverage-html) + COVERAGE="true" + COVERAGE_HTML="true" + log_info "Coverage reporting with HTML output enabled via command line" + ;; + *) + log_error "Unknown option: $1" + display_usage + exit 1 + ;; + esac + shift + done +} + +# Main execution +main() { + echo "🧪 Running unit tests for Configurable Strands Agent..." + echo "================================================" + validate_python + check_uv_available + validate_test_environment + setup_virtual_environment + run_tests +} + +echo "================================================" + +# Parse arguments first +parse_arguments "$@" + +# Run main function +main + +echo "================================================" +log_success "Unit test execution completed!" \ No newline at end of file diff --git a/deployment/ecr/gaab-strands-agent/src/configurable_agent.py b/deployment/ecr/gaab-strands-agent/src/configurable_agent.py new file mode 100644 index 00000000..0c572ef7 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/src/configurable_agent.py @@ -0,0 +1,249 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/usr/bin/env python3 +""" +Configurable Strands Agent - Main agent orchestration class +""" + +import logging +import os +from typing import Any, List, Optional + +from gaab_strands_common import DynamoDBHelper, ToolsManager, UseCaseConfig +from gaab_strands_common.utils.helpers import build_guardrail_config, create_boto_config +from strands import Agent +from strands.models import BedrockModel +from strands.session import SessionManager + +logger = logging.getLogger(__name__) + + +class ConfigurableAgent: + """Configurable Strands Agent that loads configuration from DynamoDB""" + + def __init__( + self, + table_name: str, + config_key: str, + region: str, + session_manager: SessionManager = None, + ): + """ + Initialize ConfigurableAgent + """ + self.table_name = table_name + self.config_key = config_key + self.region = region + self.session_manager = session_manager + + self.config: Optional[UseCaseConfig] = None + self.agent: Optional[Agent] = None + self.ddb_helper: Optional[DynamoDBHelper] = None + self.tools_manager: Optional[ToolsManager] = None + self.loaded_tools: List[Any] = [] + + logger.info(f"Initializing agent for table: {self.table_name}, key: {self.config_key}") + # Load configuration and create agent + self._initialize() + + def _initialize(self): + """Initialize the agent with configuration from DynamoDB""" + # Create DynamoDB helper + self.ddb_helper = DynamoDBHelper(self.table_name, self.region) + + # Load configuration + config_dict = self.ddb_helper.get_config(self.config_key) + + # Validate use case type + use_case_type = config_dict.get("UseCaseType") + if use_case_type != "AgentBuilder": + raise ValueError(f"Expected AgentBuilder, got {use_case_type}") + + # Create agent config + self.config = UseCaseConfig.from_ddb_config(config_dict) + logger.info(f"Loaded config for agent: {self.config.use_case_name}") + + # Set tool environment variables before loading tools + self._set_tool_environment_variables() + + self.tools_manager = ToolsManager(self.region, config=self.config) + + strands_tool_ids = self.config.agent_builder_params.get_tool_ids() + mcp_servers = self.config.agent_builder_params.get_mcp_servers() + custom_tool_ids = self.config.agent_builder_params.get_custom_tool_ids() + + logger.info( + "Tool configuration: %d built-in tool(s), %d custom tool(s), %d MCP server(s)", + len(strands_tool_ids), + len(custom_tool_ids), + len(mcp_servers), + ) + + try: + self.loaded_tools = self.tools_manager.load_all_tools( + mcp_servers=mcp_servers, + strands_tool_ids=strands_tool_ids, + custom_tool_ids=custom_tool_ids, + ) + logger.info(f"Successfully loaded {len(self.loaded_tools)} tool(s)") + except Exception as e: + logger.error(f"Error loading tools: {e}", exc_info=True) + logger.warning("Agent will initialize without tools") + self.loaded_tools = [] + + # Create agent + self._create_agent() + logger.info("Agent initialization completed") + + def _normalize_tool_names(self, tool_ids: List[str]) -> List[str]: + """Normalize tool IDs by replacing spaces/hyphens with underscores and converting to uppercase""" + return [tool_id.replace(" ", "_").replace("-", "_").upper() for tool_id in tool_ids] + + def _extract_env_var_name( + self, key_without_prefix: str, normalized_tool_names: List[str] + ) -> Optional[str]: + """Extract environment variable name from key by matching against known tool names""" + # Try to match against known tool names + for tool_name in normalized_tool_names: + if key_without_prefix.startswith(tool_name + "_"): + return key_without_prefix[len(tool_name) + 1 :] + + # Fallback: simple split for single-word tool names + parts = key_without_prefix.split("_", 1) + return parts[1] if len(parts) >= 2 else None + + def _set_env_var_from_param(self, env_var_name: str, param_obj: Any, key: str) -> bool: + """Set environment variable from parameter object, returns True if successful""" + if isinstance(param_obj, dict) and "Value" in param_obj: + os.environ[env_var_name] = str(param_obj["Value"]) + logger.debug(f"Set environment variable {env_var_name} from ModelParams") + return True + + if not isinstance(param_obj, dict): + os.environ[env_var_name] = str(param_obj) + logger.debug( + f"Set environment variable {env_var_name} from ModelParams (legacy format)" + ) + return True + + logger.warning(f"ModelParams key {key} has ENV_ prefix but missing 'Value' field") + return False + + def _set_tool_environment_variables(self): + """ + Extract and set environment variables from LlmParams.ModelParams for tools. + + ModelParams keys with pattern ENV__ are + extracted and set as environment variables with name . + ModelParams values are objects with 'Value' and 'Type' fields. + + Tool names can contain underscores (e.g., CURRENT_TIME), so we need to + match against known tool IDs to properly extract the environment variable name. + """ + if not self.config or not self.config.llm_params: + return + + model_params = self.config.llm_params.model_params or {} + tool_ids = self.config.agent_builder_params.get_tool_ids() + normalized_tool_names = self._normalize_tool_names(tool_ids) + + env_vars_set = 0 + for key, param_obj in model_params.items(): + if not key.startswith("ENV_"): + continue + + key_without_prefix = key[4:] + env_var_name = self._extract_env_var_name(key_without_prefix, normalized_tool_names) + + if env_var_name and self._set_env_var_from_param(env_var_name, param_obj, key): + env_vars_set += 1 + + if env_vars_set > 0: + logger.info(f"Set {env_vars_set} tool environment variable(s) from ModelParams") + + def _create_model(self) -> BedrockModel: + """Create Bedrock model from configuration""" + if not self.config: + raise ValueError("No configuration loaded") + + bedrock_params = self.config.llm_params.bedrock_llm_params + + # Log environment and configuration for debugging + logger.info(f"Environment AWS_REGION: {os.getenv('AWS_REGION')}") + logger.info(f"Configured region: {self.region}") + logger.info(f"Inference type: {bedrock_params.bedrock_inference_type}") + logger.info(f"Model identifier from config: {bedrock_params.model_identifier}") + + # Check if this is a cross-region inference profile + is_cross_region_profile = bedrock_params.model_identifier.startswith("us.") + if is_cross_region_profile: + logger.info( + f"Detected cross-region inference profile: {bedrock_params.model_identifier}" + ) + + # Create Botocore Config with retry settings and user agent + boto_config = create_boto_config(self.region) + + # Build guardrail configuration if available + guardrail_config = build_guardrail_config(bedrock_params) + + model_config = { + "model_id": bedrock_params.model_identifier, + "region_name": self.region, + "temperature": self.config.llm_params.temperature, + "streaming": self.config.llm_params.streaming, + "boto_client_config": boto_config, + **guardrail_config, + } + + return BedrockModel(**model_config) + + def _create_agent(self): + """Create Strands agent with loaded configuration""" + if not self.config: + raise ValueError("No configuration loaded") + + # Create model + model = self._create_model() + + if self.loaded_tools: + logger.info(f"Creating agent with {len(self.loaded_tools)} tool(s)") + if self.tools_manager: + tool_sources = self.tools_manager.get_tool_sources() + logger.debug(f"Tool sources: {tool_sources}") + else: + logger.info("Creating agent without tools") + + additional_params = {} + if ( + self.config.agent_builder_params.memory_config + and self.config.agent_builder_params.memory_config.long_term_enabled + and self.session_manager + ): + logger.info("Long-term memory enabled - adding session manager to agent") + additional_params["session_manager"] = self.session_manager + else: + logger.info("Long-term memory disabled or session manager not available") + logger.info(f"Session manager exists: {self.session_manager is not None}") + + # Create agent with configuration and loaded tools + self.agent = Agent( + name=self.config.use_case_name, + system_prompt=self.config.agent_builder_params.system_prompt, + tools=self.loaded_tools, + model=model, + **additional_params, + ) + + def get_agent(self) -> Agent: + """Get the configured Strands agent""" + if not self.agent: + raise ValueError("Agent not initialized") + return self.agent + + def get_config(self) -> UseCaseConfig: + """Get the agent configuration""" + if not self.config: + raise ValueError("Configuration not loaded") + return self.config diff --git a/deployment/ecr/gaab-strands-agent/src/main.py b/deployment/ecr/gaab-strands-agent/src/main.py new file mode 100644 index 00000000..2a8b48f2 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/src/main.py @@ -0,0 +1,205 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/usr/bin/env python3 +""" +Configurable Strands Agent - Main Entry Point +AgentCore Runtime Integration + +TODO: Future enhancements to consider: +- Support for multiple model providers (Anthropic, Gemini, Ollama, LlamaAPI, etc.) +- Enhanced BedrockModel configuration (top_p, stop_sequences) +- Tool configuration and management +- Advanced conversation management and memory +""" + +import logging +import os +import sys +from typing import Any, Dict, Optional + +from bedrock_agentcore.memory.integrations.strands.config import ( + AgentCoreMemoryConfig, + RetrievalConfig, +) +from bedrock_agentcore.memory.integrations.strands.session_manager import ( + AgentCoreMemorySessionManager, +) +from bedrock_agentcore.runtime import BedrockAgentCoreApp +from configurable_agent import ConfigurableAgent +from gaab_strands_common import ( + ENV_AWS_REGION, + ENV_MEMORY_ID, + ENV_MEMORY_STRATEGY_ID, + ENV_USE_CASE_CONFIG_KEY, + ENV_USE_CASE_TABLE_NAME, + RuntimeStreaming, +) +from gaab_strands_common.multimodal.multimodal_processor import MultimodalRequestProcessor +from gaab_strands_common.utils.helpers import extract_user_message + + +# Suppress OpenTelemetry context warnings +logging.getLogger("opentelemetry.context").setLevel(logging.ERROR) + +logger = logging.getLogger(__name__) + +# Initialize the AgentCore app +app = BedrockAgentCoreApp() + +# Module-level private agent instance (singleton pattern) +_configurable_agent: Optional[ConfigurableAgent] = None + + +def validate_environment() -> tuple[str, str, str, str]: + """Validate required environment variables and return them""" + required_vars = { + ENV_USE_CASE_TABLE_NAME: os.getenv(ENV_USE_CASE_TABLE_NAME), + ENV_USE_CASE_CONFIG_KEY: os.getenv(ENV_USE_CASE_CONFIG_KEY), + ENV_AWS_REGION: os.getenv(ENV_AWS_REGION), + ENV_MEMORY_ID: os.getenv(ENV_MEMORY_ID), + } + + missing_vars = [name for name, value in required_vars.items() if not value] + if missing_vars: + raise ValueError(f"Missing required environment variables: {', '.join(missing_vars)}") + + strategy_id = os.getenv(ENV_MEMORY_STRATEGY_ID, "") + + table_name, config_key, region, memory_id = required_vars.values() + logger.info( + f"Environment validated - Table: {table_name}, Key: {config_key}, Region: {region}, Memory ID: {memory_id}" + ) + return table_name, config_key, region, memory_id, strategy_id + + +def get_agent_instance(session_id: str = None, actor_id=None) -> ConfigurableAgent: + """Get or create the singleton agent instance""" + global _configurable_agent + + if _configurable_agent is None: + logger.info("Initializing Configurable Strands Agent") + logger.info("Testing longterm memory") + # Validate environment variables first + table_name, config_key, region, memory_id, strategy_id = validate_environment() + + # Create session manager only if strategy_id exists + session_manager = None + if strategy_id: + agentcore_memory_config = AgentCoreMemoryConfig( + memory_id=memory_id, + session_id=session_id, + actor_id=actor_id, + retrieval_config={ + "/strategies/{memoryStrategyId}/actors/{actorId}": RetrievalConfig( + top_k=5, relevance_score=0.7, strategy_id=strategy_id + ) + }, + ) + session_manager = AgentCoreMemorySessionManager( + agentcore_memory_config=agentcore_memory_config, region_name=region + ) + + # Create agent with validated parameters + _configurable_agent = ConfigurableAgent( + table_name=table_name, + config_key=config_key, + region=region, + session_manager=session_manager, + ) + logger.info("Agent initialized successfully") + + return _configurable_agent + + +@app.entrypoint +def invoke(payload: Dict[str, Any]): + """AgentCore Runtime entrypoint function""" + try: + # Extract session ID and create memory client if needed + session_id = payload.get("conversationId") + actor_id = payload.get("userId") + logger.info(f"Session ID: {session_id}") + logger.info(f"Actor ID: {actor_id}") + + # Get agent instance with session context + agent_instance = get_agent_instance(session_id=session_id, actor_id=actor_id) + strands_agent = agent_instance.get_agent() + config = agent_instance.get_config() + + region = os.getenv(ENV_AWS_REGION) + multimodal_processor = MultimodalRequestProcessor(region) + has_files = multimodal_processor.has_files(payload) + multimodal_enabled = multimodal_processor.is_multimodal_enabled(config) + logger.debug(f"Multimodal enabled: {multimodal_enabled}") + logger.debug(f"Has files: {has_files}") + + # Determine processing mode and handle accordingly + if has_files and multimodal_enabled: + logger.debug("Multimodal request detected - processing files") + user_message = multimodal_processor.process_multimodal_request(payload) + elif has_files and not multimodal_enabled: + logger.warning("FILES IGNORED: User sent files but multimodal is disabled. Enable multimodal in configuration to process files. ") + user_message = extract_user_message(payload) + else: + # No files present - process as text-only regardless of multimodal setting + if multimodal_enabled: + logger.debug("Text-only request (multimodal enabled but no files provided)") + else: + logger.debug("Text-only request (multimodal disabled)") + user_message = extract_user_message(payload) + + logger.debug(f"User message: {user_message[:100]}...") + + if config.llm_params.streaming: + logger.debug("Using streaming mode") + return RuntimeStreaming.stream_response(strands_agent, user_message, config) + + # Non-streaming response + response = strands_agent(user_message) + + return { + "result": str(response), + "agent_name": config.use_case_name, + "model_id": config.llm_params.bedrock_llm_params.model_id, + } + + except ValueError as e: + # Configuration or validation errors + logger.error(f"Validation error: {e}") + return {"type": "error", "error": "Invalid configuration or request", "message": str(e)} + + except RuntimeError as e: + # Agent execution errors + logger.error(f"Runtime error: {e}") + return {"type": "error", "error": "Agent execution failed", "message": str(e)} + + except Exception as e: + # Unexpected errors + logger.error(f"Unexpected error processing request: {e}", exc_info=True) + return {"type": "error", "error": "Request processing failed", "message": str(e)} + + +def main(): + """ + Main entry point for the application. + + This function is called when the Lambda container starts. It: + 1. Initializes the workflow agent (validates environment and loads config) + 2. Starts the AgentCore Runtime application + 3. Handles startup errors gracefully + + Exits: + 1: If initialization fails + """ + logger.info("Starting the Agent") + + try: + app.run() + except Exception as e: + logger.error(f"Failed to start workflow agent: {e}", exc_info=True) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/deployment/ecr/gaab-strands-agent/test/conftest.py b/deployment/ecr/gaab-strands-agent/test/conftest.py new file mode 100644 index 00000000..907bf18f --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/test/conftest.py @@ -0,0 +1,40 @@ +""" +Pytest configuration for test suite +""" + +import os +import pytest +from unittest.mock import patch + + +def mock_requires_access_token(*args, **kwargs): + """Mock decorator that just returns the function unchanged""" + + def decorator(func): + return func + + return decorator + + +# Apply the patch at module level before any imports +patcher = patch("bedrock_agentcore.identity.auth.requires_access_token", mock_requires_access_token) +patcher.start() + + +def pytest_sessionfinish(session, exitstatus): + """Called after whole test run finished""" + patcher.stop() + + +@pytest.fixture(autouse=True) +def mock_environment(): + """Mock environment variables for all tests""" + with patch.dict( + os.environ, + { + "AWS_REGION": "us-east-1", + "AWS_SDK_USER_AGENT": '{"user_agent_extra": "test-agent"}', + }, + clear=False, + ): + yield diff --git a/deployment/ecr/gaab-strands-agent/test/run_tests.py b/deployment/ecr/gaab-strands-agent/test/run_tests.py new file mode 100644 index 00000000..6ee51eb2 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/test/run_tests.py @@ -0,0 +1,29 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/usr/bin/env python3 +""" +Test runner for all unit tests +""" + +import os +import sys +import unittest + + +def run_all_tests(): + """Run all unit tests""" + # Discover and run all tests in the test directory + loader = unittest.TestLoader() + start_dir = os.path.dirname(__file__) + suite = loader.discover(start_dir, pattern="test_*.py") + + runner = unittest.TextTestRunner(verbosity=2) + result = runner.run(suite) + + return result.wasSuccessful() + + +if __name__ == "__main__": + success = run_all_tests() + sys.exit(0 if success else 1) diff --git a/deployment/ecr/gaab-strands-agent/test/test_configurable_agent.py b/deployment/ecr/gaab-strands-agent/test/test_configurable_agent.py new file mode 100644 index 00000000..f37cb6f1 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/test/test_configurable_agent.py @@ -0,0 +1,1106 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/usr/bin/env python3 +""" +Unit tests for configurable_agent.py +""" + +import os +import sys +import unittest +from unittest.mock import MagicMock, patch + +# Add src directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "src")) + +from configurable_agent import ConfigurableAgent + + +class TestConfigurableAgent(unittest.TestCase): + """Test ConfigurableAgent class""" + + def setUp(self): + """Set up test data""" + self.test_ddb_config = { + "UseCaseName": "Test Agent", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": { + "SystemPrompt": "You are a test assistant.", + "Tools": [{"ToolId": "HTTP Request"}, {"ToolId": "File Operations"}], + "MemoryConfig": {"LongTermEnabled": "Yes"}, + }, + "LlmParams": { + "BedrockLlmParams": { + "ModelId": "amazon.nova-lite-v1:0", + }, + "ModelProvider": "Bedrock", + "Temperature": 0.7, + "Streaming": True, + "Verbose": False, + "ModelParams": {}, + }, + } + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_initialization_success( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test successful agent initialization""" + # Mock DDB helper + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = self.test_ddb_config + mock_ddb_helper.return_value = mock_ddb_instance + + # Mock ToolsManager + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + # Mock BedrockModel + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + # Mock Agent + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify initialization + self.assertEqual(agent.table_name, "test-table") + self.assertEqual(agent.config_key, "test-key") + self.assertEqual(agent.region, "us-east-1") + self.assertIsNotNone(agent.config) + self.assertIsNotNone(agent.agent) + self.assertIsNotNone(agent.tools_manager) + + # Verify config parsing + self.assertEqual(agent.config.use_case_name, "Test Agent") + self.assertEqual( + agent.config.llm_params.bedrock_llm_params.model_identifier, "amazon.nova-lite-v1:0" + ) + + # Verify ToolsManager was called + mock_tools_manager_instance.load_all_tools.assert_called_once() + + @patch("configurable_agent.DynamoDBHelper") + def test_invalid_use_case_type(self, mock_ddb_helper): + """Test error handling for invalid use case type""" + # Mock DDB helper with wrong use case type + invalid_config = self.test_ddb_config.copy() + invalid_config["UseCaseType"] = "Workflow" + + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = invalid_config + mock_ddb_helper.return_value = mock_ddb_instance + + with self.assertRaises(ValueError) as context: + ConfigurableAgent("test-table", "test-key", "us-east-1") + + self.assertIn("Expected AgentBuilder, got Workflow", str(context.exception)) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_bedrock_model_creation( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test BedrockModel creation with correct parameters""" + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = self.test_ddb_config + mock_ddb_helper.return_value = mock_ddb_instance + + # Mock ToolsManager + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify BedrockModel was called with correct parameters + mock_bedrock_model.assert_called_once() + call_args = mock_bedrock_model.call_args[1] + + self.assertEqual(call_args["model_id"], "amazon.nova-lite-v1:0") + self.assertEqual(call_args["region_name"], "us-east-1") + self.assertEqual(call_args["temperature"], 0.7) + self.assertTrue(call_args["streaming"]) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_agent_creation( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test Agent creation with correct parameters""" + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = self.test_ddb_config + mock_ddb_helper.return_value = mock_ddb_instance + + # Mock ToolsManager with some tools + mock_tool1 = MagicMock() + mock_tool2 = MagicMock() + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [mock_tool1, mock_tool2] + mock_tools_manager_instance.get_tool_sources.return_value = { + "tool1": "Strands", + "tool2": "Strands", + } + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify Agent was called with correct parameters + mock_agent.assert_called_once() + call_args = mock_agent.call_args[1] + + self.assertEqual(call_args["name"], "Test Agent") + self.assertEqual(call_args["system_prompt"], "You are a test assistant.") + self.assertEqual(call_args["model"], mock_model_instance) + self.assertEqual(call_args["tools"], [mock_tool1, mock_tool2]) # Now includes loaded tools + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_get_methods(self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager): + """Test get_agent and get_config methods""" + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = self.test_ddb_config + mock_ddb_helper.return_value = mock_ddb_instance + + # Mock ToolsManager + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Test get_agent + returned_agent = agent.get_agent() + self.assertEqual(returned_agent, mock_agent_instance) + + # Test get_config + returned_config = agent.get_config() + self.assertEqual(returned_config.use_case_name, "Test Agent") + + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_inference_profile_model_creation( + self, mock_bedrock_model, mock_agent, mock_ddb_helper + ): + """Test BedrockModel creation with inference profile""" + # Create inference profile config + inference_profile_config = { + "UseCaseName": "Inference Profile Agent", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": { + "SystemPrompt": "You are a helpful AI assistant.", + "MemoryConfig": {"LongTermEnabled": True}, + }, + "LlmParams": { + "BedrockLlmParams": { + "BedrockInferenceType": "INFERENCE_PROFILE", + "InferenceProfileId": "us.anthropic.claude-3-7-sonnet-20250219-v1:0", + }, + "ModelProvider": "Bedrock", + "Temperature": 0.5, + "Streaming": True, + "Verbose": False, + "ModelParams": {}, + }, + } + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = inference_profile_config + mock_ddb_helper.return_value = mock_ddb_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify BedrockModel was called with inference profile ID + mock_bedrock_model.assert_called_once() + call_args = mock_bedrock_model.call_args[1] + + self.assertEqual(call_args["model_id"], "us.anthropic.claude-3-7-sonnet-20250219-v1:0") + + # Verify config parsing + self.assertEqual( + agent.config.llm_params.bedrock_llm_params.inference_profile_id, + "us.anthropic.claude-3-7-sonnet-20250219-v1:0", + ) + self.assertEqual( + agent.config.llm_params.bedrock_llm_params.model_identifier, + "us.anthropic.claude-3-7-sonnet-20250219-v1:0", + ) + self.assertIsNone(agent.config.llm_params.bedrock_llm_params.model_id) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_tool_loading_with_mcp_servers( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test tool loading with both built-in tools and MCP servers""" + # Add MCP servers to config + config_with_mcp = self.test_ddb_config.copy() + config_with_mcp["AgentBuilderParams"]["MCPServers"] = [ + { + "UseCaseId": "gateway-calendar", + "UseCaseName": "Gateway Calendar", + "Url": "https://example-gateway.bedrock-agentcore.us-east-1.amazonaws.com/calendar", + "Type": "gateway", + }, + { + "UseCaseId": "runtime-database", + "UseCaseName": "Runtime Database", + "Url": "https://example-bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test-runtime/invocations", + "Type": "runtime", + }, + ] + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config_with_mcp + mock_ddb_helper.return_value = mock_ddb_instance + + # Mock ToolsManager with tools + mock_tool1 = MagicMock() + mock_tool2 = MagicMock() + mock_tool3 = MagicMock() + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [ + mock_tool1, + mock_tool2, + mock_tool3, + ] + mock_tools_manager_instance.get_tool_sources.return_value = { + "http_request": "Strands", + "get_calendar_events": "MCP-Gateway", + "query_database": "MCP-Runtime", + } + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify ToolsManager.load_all_tools was called with correct parameters + mock_tools_manager_instance.load_all_tools.assert_called_once_with( + mcp_servers=[ + { + "use_case_id": "gateway-calendar", + "url": "https://example-gateway.bedrock-agentcore.us-east-1.amazonaws.com/calendar", + "type": "gateway", + }, + { + "use_case_id": "runtime-database", + "url": "https://example-bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test-runtime/invocations", + "type": "runtime", + }, + ], + strands_tool_ids=["HTTP Request", "File Operations"], + custom_tool_ids=[], + ) + + # Verify tools were loaded + self.assertEqual(len(agent.loaded_tools), 3) + self.assertEqual(agent.loaded_tools, [mock_tool1, mock_tool2, mock_tool3]) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_tool_loading_failure_graceful_handling( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test that agent initializes successfully even if tool loading fails""" + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = self.test_ddb_config + mock_ddb_helper.return_value = mock_ddb_instance + + # Mock ToolsManager to raise an exception + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.side_effect = Exception("Tool loading failed") + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + # Should not raise exception - agent should initialize with empty tools + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify agent was created successfully + self.assertIsNotNone(agent.agent) + self.assertEqual(agent.loaded_tools, []) + + # Verify Agent was called with empty tools list + mock_agent.assert_called_once() + call_args = mock_agent.call_args[1] + self.assertEqual(call_args["tools"], []) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_env_vars_extracted_from_model_params( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test that ENV_ prefixed ModelParams are extracted correctly""" + # Add ModelParams with ENV_ prefix and configure tools + config_with_env = self.test_ddb_config.copy() + config_with_env["AgentBuilderParams"]["Tools"] = [ + {"ToolId": "Current Time"}, + {"ToolId": "Weather"}, + ] + config_with_env["LlmParams"]["ModelParams"] = { + "ENV_CURRENT_TIME_DEFAULT_TIMEZONE": {"Value": "America/New_York", "Type": "string"}, + "ENV_WEATHER_LOCATION": {"Value": "Seattle", "Type": "string"}, + "REGULAR_PARAM": {"Value": "should-not-be-set", "Type": "string"}, + } + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config_with_env + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + # Clear any existing env vars + if "DEFAULT_TIMEZONE" in os.environ: + del os.environ["DEFAULT_TIMEZONE"] + if "LOCATION" in os.environ: + del os.environ["LOCATION"] + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify environment variables were set correctly + # ENV_CURRENT_TIME_DEFAULT_TIMEZONE -> DEFAULT_TIMEZONE (everything after ENV_CURRENT_TIME_) + self.assertEqual(os.environ.get("DEFAULT_TIMEZONE"), "America/New_York") + # ENV_WEATHER_LOCATION -> LOCATION (everything after ENV_WEATHER_) + self.assertEqual(os.environ.get("LOCATION"), "Seattle") + + # Verify non-ENV_ prefixed param was not set + self.assertIsNone(os.environ.get("REGULAR_PARAM")) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_env_vars_value_field_extraction( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test that Value field is extracted from ModelParams objects""" + # Add ModelParams with Value field and configure tool + config_with_env = self.test_ddb_config.copy() + config_with_env["AgentBuilderParams"]["Tools"] = [ + {"ToolId": "Tool"}, + ] + config_with_env["LlmParams"]["ModelParams"] = { + "ENV_TOOL_CONFIG_VALUE": {"Value": "extracted-value", "Type": "string"}, + } + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config_with_env + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + # Clear any existing env var + if "CONFIG_VALUE" in os.environ: + del os.environ["CONFIG_VALUE"] + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify the Value field was extracted and set + self.assertEqual(os.environ.get("CONFIG_VALUE"), "extracted-value") + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_env_vars_set_with_correct_names( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test that environment variables are set with correct names (ENV__ prefix removed)""" + # Add ModelParams with various ENV_ patterns and configure tools + config_with_env = self.test_ddb_config.copy() + config_with_env["AgentBuilderParams"]["Tools"] = [ + {"ToolId": "Current Time"}, + {"ToolId": "Database"}, + {"ToolId": "Weather"}, + ] + config_with_env["LlmParams"]["ModelParams"] = { + "ENV_CURRENT_TIME_DEFAULT_TIMEZONE": {"Value": "UTC", "Type": "string"}, + "ENV_DATABASE_CONNECTION_STRING": {"Value": "conn-string", "Type": "string"}, + "ENV_WEATHER_LOCATION": {"Value": "Seattle", "Type": "string"}, + } + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config_with_env + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + # Clear any existing env vars + for var in ["DEFAULT_TIMEZONE", "CONNECTION_STRING", "LOCATION"]: + if var in os.environ: + del os.environ[var] + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify environment variables have correct names (prefix removed) + # ENV_CURRENT_TIME_DEFAULT_TIMEZONE -> DEFAULT_TIMEZONE + self.assertEqual(os.environ.get("DEFAULT_TIMEZONE"), "UTC") + # ENV_DATABASE_CONNECTION_STRING -> CONNECTION_STRING + self.assertEqual(os.environ.get("CONNECTION_STRING"), "conn-string") + # ENV_WEATHER_LOCATION -> LOCATION + self.assertEqual(os.environ.get("LOCATION"), "Seattle") + + # Verify the full keys are NOT set as env vars + self.assertIsNone(os.environ.get("ENV_CURRENT_TIME_DEFAULT_TIMEZONE")) + self.assertIsNone(os.environ.get("ENV_DATABASE_CONNECTION_STRING")) + self.assertIsNone(os.environ.get("ENV_WEATHER_LOCATION")) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_non_env_prefixed_params_ignored( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test that non-ENV_ prefixed ModelParams are ignored""" + # Add ModelParams with mixed prefixes and configure tool + config_with_env = self.test_ddb_config.copy() + config_with_env["AgentBuilderParams"]["Tools"] = [ + {"ToolId": "Tool"}, + ] + config_with_env["LlmParams"]["ModelParams"] = { + "ENV_TOOL_VALID_VAR": {"Value": "should-be-set", "Type": "string"}, + "REGULAR_PARAM": {"Value": "should-not-be-set", "Type": "string"}, + "ANOTHER_PARAM": {"Value": "also-not-set", "Type": "string"}, + "temperature": {"Value": "0.5", "Type": "float"}, + } + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config_with_env + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + # Clear any existing env vars + for var in ["VALID_VAR", "REGULAR_PARAM", "ANOTHER_PARAM", "temperature"]: + if var in os.environ: + del os.environ[var] + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify only ENV_ prefixed param was set + self.assertEqual(os.environ.get("VALID_VAR"), "should-be-set") + + # Verify non-ENV_ prefixed params were NOT set + self.assertIsNone(os.environ.get("REGULAR_PARAM")) + self.assertIsNone(os.environ.get("ANOTHER_PARAM")) + self.assertIsNone(os.environ.get("temperature")) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + @patch("configurable_agent.logger") + def test_env_vars_logging( + self, mock_logger, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test that setting environment variables is logged correctly""" + # Add ModelParams with ENV_ prefix and configure tools + config_with_env = self.test_ddb_config.copy() + config_with_env["AgentBuilderParams"]["Tools"] = [ + {"ToolId": "Weather"}, + {"ToolId": "Current Time"}, + {"ToolId": "Calculator"}, + ] + config_with_env["LlmParams"]["ModelParams"] = { + "ENV_WEATHER_LOCATION": {"Value": "Seattle", "Type": "string"}, + "ENV_CURRENT_TIME_DEFAULT_TIMEZONE": {"Value": "UTC", "Type": "string"}, + "ENV_CALCULATOR_PRECISION": {"Value": "10", "Type": "string"}, + } + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config_with_env + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify debug logging for each environment variable + debug_calls = [ + call + for call in mock_logger.debug.call_args_list + if "Set environment variable" in str(call) + ] + self.assertEqual(len(debug_calls), 3) + + # Verify info logging for summary + info_calls = [ + call + for call in mock_logger.info.call_args_list + if "tool environment variable(s) from ModelParams" in str(call) + ] + self.assertEqual(len(info_calls), 1) + # Check that it logged "Set 3 tool environment variable(s) from ModelParams" + self.assertIn("Set 3 tool environment variable(s) from ModelParams", str(info_calls[0])) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_env_vars_no_model_params( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test that agent initializes correctly when ModelParams is empty or missing""" + # Test with empty ModelParams + config_empty_params = self.test_ddb_config.copy() + config_empty_params["LlmParams"]["ModelParams"] = {} + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config_empty_params + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + # Should not raise exception + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + self.assertIsNotNone(agent.agent) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_env_vars_legacy_format_support( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test that legacy format (direct value without Value field) is supported""" + # Add ModelParams with legacy format (direct value) and configure tool + config_with_env = self.test_ddb_config.copy() + config_with_env["AgentBuilderParams"]["Tools"] = [ + {"ToolId": "Tool"}, + ] + config_with_env["LlmParams"]["ModelParams"] = { + "ENV_TOOL_LEGACY_VAR": "direct-value-no-dict", + } + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config_with_env + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + # Clear any existing env var + if "LEGACY_VAR" in os.environ: + del os.environ["LEGACY_VAR"] + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify environment variable was set from legacy format + self.assertEqual(os.environ.get("LEGACY_VAR"), "direct-value-no-dict") + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + @patch("configurable_agent.logger") + def test_env_vars_missing_value_field_warning( + self, mock_logger, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test that warning is logged when ENV_ key has dict without Value field""" + # Add ModelParams with ENV_ prefix but missing Value field and configure tool + config_with_env = self.test_ddb_config.copy() + config_with_env["AgentBuilderParams"]["Tools"] = [ + {"ToolId": "Tool"}, + ] + config_with_env["LlmParams"]["ModelParams"] = { + "ENV_TOOL_INVALID": {"Type": "string"}, # Missing Value field + } + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config_with_env + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify warning was logged + warning_calls = [ + call + for call in mock_logger.warning.call_args_list + if "ENV_ prefix but missing 'Value' field" in str(call) + ] + self.assertEqual(len(warning_calls), 1) + self.assertIn("ENV_TOOL_INVALID", str(warning_calls[0])) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_memory_enabled_with_session_manager( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test agent creation with memory enabled and session manager available""" + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = self.test_ddb_config + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + mock_session_manager = MagicMock() + + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1", mock_session_manager) + + # Verify Agent was called with session_manager + mock_agent.assert_called_once() + call_kwargs = mock_agent.call_args[1] + self.assertIn("session_manager", call_kwargs) + self.assertEqual(call_kwargs["session_manager"], mock_session_manager) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_memory_disabled_ignores_session_manager( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test agent creation with memory disabled ignores session manager""" + # Disable memory in config + config_no_memory = self.test_ddb_config.copy() + config_no_memory["AgentBuilderParams"]["MemoryConfig"]["LongTermEnabled"] = False + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config_no_memory + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + mock_session_manager = MagicMock() + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1", mock_session_manager) + + # Verify Agent was called without session_manager + mock_agent.assert_called_once() + call_kwargs = mock_agent.call_args[1] + self.assertNotIn("session_manager", call_kwargs) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_custom_tools_loading( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test loading custom tools along with built-in tools""" + config_with_custom = self.test_ddb_config.copy() + config_with_custom["AgentBuilderParams"]["CustomTools"] = [ + {"ToolId": "custom-s3-reader"}, + {"ToolId": "custom-calculator"}, + ] + + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config_with_custom + mock_ddb_helper.return_value = mock_ddb_instance + + # Mock ToolsManager with mixed tools + mock_builtin_tool = MagicMock() + mock_custom_tool1 = MagicMock() + mock_custom_tool2 = MagicMock() + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [ + mock_builtin_tool, + mock_custom_tool1, + mock_custom_tool2, + ] + mock_tools_manager_instance.get_tool_sources.return_value = { + "http_request": "Strands", + "custom_s3_reader": "Custom", + "custom_calculator": "Custom", + } + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify ToolsManager.load_all_tools was called with custom tools + mock_tools_manager_instance.load_all_tools.assert_called_once_with( + mcp_servers=[], + strands_tool_ids=["HTTP Request", "File Operations"], + custom_tool_ids=["custom-s3-reader", "custom-calculator"], + ) + + # Verify all tools were loaded + self.assertEqual(len(agent.loaded_tools), 3) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_cross_region_inference_profile_detection( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test detection and logging of cross-region inference profiles""" + # Create config with cross-region inference profile + config_cross_region = self.test_ddb_config.copy() + config_cross_region["LlmParams"]["BedrockLlmParams"][ + "ModelId" + ] = "us.anthropic.claude-3-5-sonnet-20241022-v2:0" + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config_cross_region + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + with patch("configurable_agent.logger") as mock_logger: + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify cross-region profile was detected and logged + cross_region_log_calls = [ + call + for call in mock_logger.info.call_args_list + if "Detected cross-region inference profile" in str(call) + ] + self.assertEqual(len(cross_region_log_calls), 1) + self.assertIn( + "us.anthropic.claude-3-5-sonnet-20241022-v2:0", str(cross_region_log_calls[0]) + ) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_session_manager_parameter( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test that session_id parameter is stored correctly""" + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = self.test_ddb_config + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + mock_session_manager = MagicMock() + + agent = ConfigurableAgent( + "test-table", + "test-key", + "us-east-1", + session_manager=mock_session_manager, + ) + + # Verify session_manager is stored + self.assertEqual(agent.session_manager, mock_session_manager) + + @patch("configurable_agent.DynamoDBHelper") + def test_ddb_connection_error_handling(self, mock_ddb_helper): + """Test error handling when DynamoDB connection fails""" + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.side_effect = Exception("DynamoDB connection failed") + mock_ddb_helper.return_value = mock_ddb_instance + + with self.assertRaises(Exception) as context: + ConfigurableAgent("test-table", "test-key", "us-east-1") + + self.assertIn("DynamoDB connection failed", str(context.exception)) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.BedrockModel") + def test_model_creation_error_handling( + self, mock_bedrock_model, mock_ddb_helper, mock_tools_manager + ): + """Test error handling when model creation fails""" + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = self.test_ddb_config + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_bedrock_model.side_effect = Exception("Model creation failed") + + with self.assertRaises(Exception) as context: + ConfigurableAgent("test-table", "test-key", "us-east-1") + + self.assertIn("Model creation failed", str(context.exception)) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_get_methods_error_handling( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test error handling in get_agent and get_config methods""" + # Create agent instance without proper initialization + agent = ConfigurableAgent.__new__(ConfigurableAgent) + agent.agent = None + agent.config = None + + with self.assertRaises(ValueError) as context: + agent.get_agent() + self.assertIn("Agent not initialized", str(context.exception)) + + with self.assertRaises(ValueError) as context: + agent.get_config() + self.assertIn("Configuration not loaded", str(context.exception)) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_comprehensive_tool_configuration( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test comprehensive tool configuration with all tool types""" + # Create config with all tool types + comprehensive_config = self.test_ddb_config.copy() + comprehensive_config["AgentBuilderParams"]["Tools"] = [ + {"ToolId": "HTTP Request"}, + {"ToolId": "File Operations"}, + ] + comprehensive_config["AgentBuilderParams"]["CustomTools"] = [ + {"ToolId": "custom-s3-reader"}, + {"ToolId": "custom-database"}, + ] + comprehensive_config["AgentBuilderParams"]["MCPServers"] = [ + { + "UseCaseId": "gateway-calendar", + "Url": "https://gateway.example.com/calendar", + "Type": "gateway", + }, + { + "UseCaseId": "runtime-weather", + "Url": "https://runtime.example.com/weather", + "Type": "runtime", + }, + ] + + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = comprehensive_config + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools = [MagicMock() for _ in range(6)] # 2 built-in + 2 custom + 2 MCP + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = mock_tools + mock_tools_manager_instance.get_tool_sources.return_value = { + "http_request": "Strands", + "file_operations": "Strands", + "custom_s3_reader": "Custom", + "custom_database": "Custom", + "get_calendar_events": "MCP-Gateway", + "get_weather": "MCP-Runtime", + } + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + mock_tools_manager_instance.load_all_tools.assert_called_once_with( + mcp_servers=[ + { + "use_case_id": "gateway-calendar", + "url": "https://gateway.example.com/calendar", + "type": "gateway", + }, + { + "use_case_id": "runtime-weather", + "url": "https://runtime.example.com/weather", + "type": "runtime", + }, + ], + strands_tool_ids=["HTTP Request", "File Operations"], + custom_tool_ids=["custom-s3-reader", "custom-database"], + ) + + self.assertEqual(len(agent.loaded_tools), 6) + self.assertEqual(agent.loaded_tools, mock_tools) + + +if __name__ == "__main__": + unittest.main() diff --git a/deployment/ecr/gaab-strands-agent/test/test_ddb_helper.py b/deployment/ecr/gaab-strands-agent/test/test_ddb_helper.py new file mode 100644 index 00000000..e6ac0fe0 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/test/test_ddb_helper.py @@ -0,0 +1,362 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/usr/bin/env python3 +""" +Unit tests for ddb_helper.py +""" + +import os +import sys +import unittest +from unittest.mock import Mock, patch, MagicMock + +# Add src directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "src")) + +from gaab_strands_common import DynamoDBHelper + +_original_get_config = DynamoDBHelper.get_config +_original_get_mcp_configs = DynamoDBHelper.get_mcp_configs + + +# Create wrapper functions that strip the access_token parameter +def patched_get_config(self, key: str, access_token=None): + """Patched version that ignores access_token""" + return _original_get_config.__wrapped__(self, key) + + +def patched_get_mcp_configs(self, mcp_ids, access_token=None): + """Patched version that ignores access_token""" + return _original_get_mcp_configs.__wrapped__(self, mcp_ids) + + +DynamoDBHelper.get_config = patched_get_config +DynamoDBHelper.get_mcp_configs = patched_get_mcp_configs + + +class TestDynamoDBHelper(unittest.TestCase): + """Test DynamoDBHelper class""" + + def setUp(self): + """Set up test environment""" + # Set test environment variables + os.environ["USE_CASE_TABLE_NAME"] = "test-table" + os.environ["AWS_REGION"] = "us-east-1" + os.environ["M2M_IDENTITY_NAME"] = "test-identity-provider" + + def tearDown(self): + """Clean up test environment""" + # Clean up environment variables + if "USE_CASE_TABLE_NAME" in os.environ: + del os.environ["USE_CASE_TABLE_NAME"] + if "AWS_REGION" in os.environ: + del os.environ["AWS_REGION"] + if "M2M_IDENTITY_NAME" in os.environ: + del os.environ["M2M_IDENTITY_NAME"] + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_initialization_with_params(self, mock_boto3_resource): + """Test DynamoDBHelper initialization with explicit parameters""" + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="test-table", region="us-east-1") + + mock_boto3_resource.assert_called_once_with("dynamodb", region_name="us-east-1") + mock_dynamodb.Table.assert_called_once_with("test-table") + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_initialization_with_different_params(self, mock_boto3_resource): + """Test DynamoDBHelper initialization with different parameters""" + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="custom-table", region="us-west-2") + + mock_boto3_resource.assert_called_once_with("dynamodb", region_name="us-west-2") + mock_dynamodb.Table.assert_called_once_with("custom-table") + + def test_initialization_without_table_name(self): + """Test DynamoDBHelper initialization fails without table name""" + with self.assertRaises(TypeError) as context: + DynamoDBHelper() + + self.assertIn("missing 2 required positional arguments", str(context.exception)) + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_get_use_case_item_success(self, mock_boto3_resource): + """Test successful retrieval of use case item""" + # Mock DynamoDB response + mock_item = { + "key": "test-key", + "config": {"UseCaseName": "Test Agent", "UseCaseType": "AgentBuilder"}, + } + + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_table.get_item.return_value = {"Item": mock_item} + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="test-table", region="us-east-1") + result = helper.get_config("test-key") + + self.assertEqual(result, mock_item["config"]) + mock_table.get_item.assert_called_once_with(Key={"key": "test-key"}) + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_get_use_case_item_not_found(self, mock_boto3_resource): + """Test retrieval when item not found""" + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_table.get_item.return_value = {} # No 'Item' key + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="test-table", region="us-east-1") + + with self.assertRaises(ValueError) as context: + helper.get_config("nonexistent-key") + + self.assertIn("Configuration not found", str(context.exception)) + mock_table.get_item.assert_called_once_with(Key={"key": "nonexistent-key"}) + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_get_use_case_item_exception(self, mock_boto3_resource): + """Test retrieval when DynamoDB raises exception""" + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_table.get_item.side_effect = Exception("DynamoDB error") + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="test-table", region="us-east-1") + + with self.assertRaises(Exception) as context: + helper.get_config("test-key") + + self.assertIn("DynamoDB error", str(context.exception)) + mock_table.get_item.assert_called_once_with(Key={"key": "test-key"}) + + # Create a second helper with different parameters + helper2 = DynamoDBHelper(table_name="custom-table", region="us-west-2") + + self.assertIsNotNone(helper2.table) + # boto3.resource is called twice now (once for each helper) + self.assertEqual(mock_boto3_resource.call_count, 2) + # Check the second call was with the right parameters + mock_boto3_resource.assert_any_call("dynamodb", region_name="us-west-2") + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_get_config_success(self, mock_boto3_resource): + """Test successful get_config""" + mock_config = {"UseCaseName": "Test Config", "UseCaseType": "AgentBuilder"} + mock_item = {"key": "test-key", "config": mock_config} + + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_table.get_item.return_value = {"Item": mock_item} + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="test-table", region="us-east-1") + result = helper.get_config("test-key") + + self.assertEqual(result, mock_config) + mock_table.get_item.assert_called_once_with(Key={"key": "test-key"}) + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_get_config_not_found(self, mock_boto3_resource): + """Test get_config when item not found""" + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_table.get_item.return_value = {} + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="test-table", region="us-east-1") + + with self.assertRaises(ValueError) as context: + helper.get_config("nonexistent-key") + + self.assertIn("Configuration not found", str(context.exception)) + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_get_mcp_configs_empty_list(self, mock_boto3_resource): + """Test get_mcp_configs with empty list""" + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="test-table", region="us-east-1") + result = helper.get_mcp_configs([]) + + self.assertEqual(result, []) + mock_table.get_item.assert_not_called() + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_get_mcp_configs_success(self, mock_boto3_resource): + """Test successful batch fetching of MCP configs""" + mock_config1 = { + "UseCaseName": "Gateway MCP", + "UseCaseType": "MCPServer", + "MCPParams": { + "GatewayParams": { + "GatewayUrl": "https://test.com", + "GatewayArn": "arn:aws:test", + "GatewayId": "gw-1", + "GatewayName": "Test Gateway", + } + }, + } + mock_config2 = { + "UseCaseName": "Runtime MCP", + "UseCaseType": "MCPServer", + "MCPParams": { + "RuntimeParams": { + "EcrUri": "123.dkr.ecr.us-east-1.amazonaws.com/test:latest", + "AgentARN": "arn:aws:bedrock:test", + } + }, + } + + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_table.get_item.side_effect = [ + {"Item": {"key": "mcp-1", "config": mock_config1}}, + {"Item": {"key": "mcp-2", "config": mock_config2}}, + ] + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="test-table", region="us-east-1") + result = helper.get_mcp_configs(["mcp-1", "mcp-2"]) + + self.assertEqual(len(result), 2) + self.assertEqual(result[0], mock_config1) + self.assertEqual(result[1], mock_config2) + self.assertEqual(mock_table.get_item.call_count, 2) + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_get_mcp_configs_invalid_use_case_type(self, mock_boto3_resource): + """Test get_mcp_configs with invalid UseCaseType""" + mock_config = {"UseCaseName": "Not MCP", "UseCaseType": "AgentBuilder"} # Wrong type + + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_table.get_item.return_value = {"Item": {"key": "mcp-1", "config": mock_config}} + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="test-table", region="us-east-1") + + with self.assertRaises(ValueError) as context: + helper.get_mcp_configs(["mcp-1"]) + + self.assertIn("Invalid UseCaseType", str(context.exception)) + self.assertIn("expected 'MCPServer'", str(context.exception)) + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_get_mcp_configs_missing_config(self, mock_boto3_resource): + """Test get_mcp_configs when one config is missing""" + mock_config = { + "UseCaseName": "Gateway MCP", + "UseCaseType": "MCPServer", + "MCPParams": {"GatewayParams": {}}, + } + + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_table.get_item.side_effect = [ + {"Item": {"key": "mcp-1", "config": mock_config}}, + {}, + ] # Missing item + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="test-table", region="us-east-1") + result = helper.get_mcp_configs(["mcp-1", "mcp-missing"]) + + # Should return only the successful config + self.assertEqual(len(result), 1) + self.assertEqual(result[0], mock_config) + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_get_mcp_configs_partial_failure(self, mock_boto3_resource): + """Test get_mcp_configs with partial failures""" + mock_config = { + "UseCaseName": "Gateway MCP", + "UseCaseType": "MCPServer", + "MCPParams": {"GatewayParams": {}}, + } + + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_table.get_item.side_effect = [ + {"Item": {"key": "mcp-1", "config": mock_config}}, + Exception("DynamoDB error"), + {}, # Missing item + ] + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="test-table", region="us-east-1") + result = helper.get_mcp_configs(["mcp-1", "mcp-error", "mcp-missing"]) + + # Should return only the successful config + self.assertEqual(len(result), 1) + self.assertEqual(result[0], mock_config) + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_fetch_and_validate_mcp_config_success(self, mock_boto3_resource): + """Test _fetch_and_validate_mcp_config with valid config""" + mock_config = {"UseCaseName": "Test MCP", "UseCaseType": "MCPServer", "MCPParams": {}} + + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_table.get_item.return_value = {"Item": {"key": "mcp-1", "config": mock_config}} + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="test-table", region="us-east-1") + result = helper._fetch_and_validate_mcp_config("mcp-1") + + self.assertEqual(result, mock_config) + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_fetch_and_validate_mcp_config_not_found(self, mock_boto3_resource): + """Test _fetch_and_validate_mcp_config when config not found""" + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_table.get_item.return_value = {} + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="test-table", region="us-east-1") + result = helper._fetch_and_validate_mcp_config("mcp-missing") + + self.assertIsNone(result) + + @patch("gaab_strands_common.ddb_helper.boto3.resource") + def test_fetch_and_validate_mcp_config_no_config_field(self, mock_boto3_resource): + """Test _fetch_and_validate_mcp_config when config field is missing""" + mock_dynamodb = MagicMock() + mock_table = MagicMock() + mock_table.get_item.return_value = {"Item": {"key": "mcp-1"}} # No config field + mock_dynamodb.Table.return_value = mock_table + mock_boto3_resource.return_value = mock_dynamodb + + helper = DynamoDBHelper(table_name="test-table", region="us-east-1") + result = helper._fetch_and_validate_mcp_config("mcp-1") + + self.assertIsNone(result) + + +if __name__ == "__main__": + unittest.main() diff --git a/deployment/ecr/gaab-strands-agent/test/test_integration.py b/deployment/ecr/gaab-strands-agent/test/test_integration.py new file mode 100644 index 00000000..d7cf9783 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/test/test_integration.py @@ -0,0 +1,545 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/usr/bin/env python3 +""" +Integration tests for ConfigurableAgent with ToolsManager +Tests end-to-end flow from configuration to tool execution +""" + +import os +import sys +import unittest +from unittest.mock import MagicMock, patch + +# Add src directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "src")) + +from configurable_agent import ConfigurableAgent + + +class TestConfigurableAgentIntegration(unittest.TestCase): + """Integration tests for ConfigurableAgent with all tool sources""" + + def setUp(self): + """Set up test data for various scenarios""" + # Base configuration + self.base_config = { + "UseCaseName": "Integration Test Agent", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": { + "SystemPrompt": "You are a helpful assistant.", + "MemoryConfig": {"LongTermEnabled": True}, + }, + "LlmParams": { + "BedrockLlmParams": { + "ModelId": "amazon.nova-lite-v1:0", + }, + "ModelProvider": "Bedrock", + "Temperature": 0.7, + "Streaming": True, + "Verbose": False, + "ModelParams": {}, + }, + } + + # Gateway MCP server configuration + self.gateway_mcp_config = { + "UseCaseName": "Gateway Calendar", + "UseCaseType": "MCPServer", + "MCPParams": { + "GatewayParams": { + "GatewayUrl": "https://test.gateway.com/mcp", + "GatewayArn": "arn:aws:bedrock-agentcore:us-east-1:123:gateway/test", + "GatewayId": "test-gateway", + "GatewayName": "Test Gateway", + "TargetParams": [ + { + "TargetName": "Calendar Service", + "TargetType": "openApiSchema", + "OutboundAuthParams": { + "OutboundAuthProviderArn": "arn:aws:bedrock-agentcore:us-east-1:123:token-vault/test", + "OutboundAuthProviderType": "OAUTH", + }, + } + ], + } + }, + } + + # Runtime MCP server configuration + self.runtime_mcp_config = { + "UseCaseName": "Runtime Database", + "UseCaseType": "MCPServer", + "MCPParams": { + "RuntimeParams": { + "EcrUri": "123.dkr.ecr.us-east-1.amazonaws.com/test:latest", + "AgentARN": "arn:aws:bedrock-agentcore:us-east-1:123:runtime/test", + } + }, + } + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_scenario_no_tools( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test agent initialization with no tools configured""" + # Configuration with no tools or MCP servers + config = self.base_config.copy() + config["AgentBuilderParams"]["Tools"] = [] + config["AgentBuilderParams"]["MCPServers"] = [] + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + # Should initialize successfully with no tools + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify + self.assertIsNotNone(agent.agent) + self.assertEqual(len(agent.loaded_tools), 0) + mock_tools_manager_instance.load_all_tools.assert_called_once_with( + mcp_servers=[], strands_tool_ids=[], custom_tool_ids=[] + ) + + # Verify Agent was created with empty tools list + mock_agent.assert_called_once() + call_args = mock_agent.call_args[1] + self.assertEqual(call_args["tools"], []) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_scenario_only_builtin_tools( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test agent initialization with only built-in Strands tools""" + # Configuration with only built-in tools + config = self.base_config.copy() + config["AgentBuilderParams"]["Tools"] = [ + {"ToolId": "HTTP Request"}, + {"ToolId": "File Operations"}, + ] + config["AgentBuilderParams"]["MCPServers"] = [] + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config + mock_ddb_helper.return_value = mock_ddb_instance + + # Mock tools + mock_http_tool = MagicMock() + mock_http_tool.name = "http_request" + mock_file_tool = MagicMock() + mock_file_tool.name = "file_operations" + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [mock_http_tool, mock_file_tool] + mock_tools_manager_instance.get_tool_sources.return_value = { + "http_request": "Strands", + "file_operations": "Strands", + } + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + # Should initialize successfully with built-in tools + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify + self.assertIsNotNone(agent.agent) + self.assertEqual(len(agent.loaded_tools), 2) + mock_tools_manager_instance.load_all_tools.assert_called_once_with( + mcp_servers=[], + strands_tool_ids=["HTTP Request", "File Operations"], + custom_tool_ids=[], + ) + + # Verify Agent was created with tools + mock_agent.assert_called_once() + call_args = mock_agent.call_args[1] + self.assertEqual(call_args["tools"], [mock_http_tool, mock_file_tool]) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_scenario_only_mcp_tools( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test agent initialization with only MCP tools""" + # Configuration with only MCP servers + config = self.base_config.copy() + config["AgentBuilderParams"]["Tools"] = [] + config["AgentBuilderParams"]["MCPServers"] = [ + { + "UseCaseId": "gateway-calendar", + "Url": "https://gateway.example.com/calendar", + "Type": "gateway", + }, + { + "UseCaseId": "runtime-database", + "Url": "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test/invocations?qualifier=DEFAULT", + "Type": "runtime", + }, + ] + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config + mock_ddb_helper.return_value = mock_ddb_instance + + # Mock MCP tools + mock_calendar_tool = MagicMock() + mock_calendar_tool.name = "get_calendar_events" + mock_db_tool = MagicMock() + mock_db_tool.name = "query_database" + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [mock_calendar_tool, mock_db_tool] + mock_tools_manager_instance.get_tool_sources.return_value = { + "get_calendar_events": "MCP-Gateway-gateway-calendar", + "query_database": "MCP-Runtime-runtime-database", + } + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + # Should initialize successfully with MCP tools + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify + self.assertIsNotNone(agent.agent) + self.assertEqual(len(agent.loaded_tools), 2) + mock_tools_manager_instance.load_all_tools.assert_called_once_with( + mcp_servers=[ + { + "use_case_id": "gateway-calendar", + "url": "https://gateway.example.com/calendar", + "type": "gateway", + }, + { + "use_case_id": "runtime-database", + "url": "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test/invocations?qualifier=DEFAULT", + "type": "runtime", + }, + ], + strands_tool_ids=[], + custom_tool_ids=[], + ) + + # Verify Agent was created with MCP tools + mock_agent.assert_called_once() + call_args = mock_agent.call_args[1] + self.assertEqual(call_args["tools"], [mock_calendar_tool, mock_db_tool]) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_scenario_mixed_tools( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test agent initialization with both built-in and MCP tools""" + # Configuration with both built-in tools and MCP servers + config = self.base_config.copy() + config["AgentBuilderParams"]["Tools"] = [ + {"ToolId": "HTTP Request"}, + {"ToolId": "File Operations"}, + ] + config["AgentBuilderParams"]["MCPServers"] = [ + { + "UseCaseId": "gateway-calendar", + "Url": "https://gateway.example.com/calendar", + "Type": "gateway", + }, + { + "UseCaseId": "runtime-database", + "Url": "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test/invocations?qualifier=DEFAULT", + "Type": "runtime", + }, + ] + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config + mock_ddb_helper.return_value = mock_ddb_instance + + # Mock all tools + mock_http_tool = MagicMock() + mock_http_tool.name = "http_request" + mock_file_tool = MagicMock() + mock_file_tool.name = "file_operations" + mock_calendar_tool = MagicMock() + mock_calendar_tool.name = "get_calendar_events" + mock_db_tool = MagicMock() + mock_db_tool.name = "query_database" + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [ + mock_http_tool, + mock_file_tool, + mock_calendar_tool, + mock_db_tool, + ] + mock_tools_manager_instance.get_tool_sources.return_value = { + "http_request": "Strands", + "file_operations": "Strands", + "get_calendar_events": "MCP-Gateway-gateway-calendar", + "query_database": "MCP-Runtime-runtime-database", + } + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + # Should initialize successfully with mixed tools + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify + self.assertIsNotNone(agent.agent) + self.assertEqual(len(agent.loaded_tools), 4) + mock_tools_manager_instance.load_all_tools.assert_called_once_with( + mcp_servers=[ + { + "use_case_id": "gateway-calendar", + "url": "https://gateway.example.com/calendar", + "type": "gateway", + }, + { + "use_case_id": "runtime-database", + "url": "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test/invocations?qualifier=DEFAULT", + "type": "runtime", + }, + ], + strands_tool_ids=["HTTP Request", "File Operations"], + custom_tool_ids=[], + ) + + # Verify Agent was created with all tools + mock_agent.assert_called_once() + call_args = mock_agent.call_args[1] + self.assertEqual(len(call_args["tools"]), 4) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_error_propagation_and_logging( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test proper error propagation and logging when tool loading fails""" + # Configuration with tools + config = self.base_config.copy() + config["AgentBuilderParams"]["Tools"] = [{"ToolId": "HTTP Request"}] + config["AgentBuilderParams"]["MCPServers"] = [ + { + "UseCaseId": "gateway-calendar", + "Url": "https://gateway.example.com/calendar", + "Type": "gateway", + } + ] + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config + mock_ddb_helper.return_value = mock_ddb_instance + + # Mock ToolsManager to raise exception + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.side_effect = Exception( + "Network error loading MCP tools" + ) + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + # Should initialize successfully despite tool loading failure + with self.assertLogs(level="ERROR") as log_context: + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify agent was created with empty tools + self.assertIsNotNone(agent.agent) + self.assertEqual(len(agent.loaded_tools), 0) + + # Verify error was logged + self.assertTrue(any("Error loading tools" in message for message in log_context.output)) + + # Verify Agent was created with empty tools list + mock_agent.assert_called_once() + call_args = mock_agent.call_args[1] + self.assertEqual(call_args["tools"], []) + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_tool_sources_tracking( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test that tool sources are properly tracked and logged""" + # Configuration with mixed tools + config = self.base_config.copy() + config["AgentBuilderParams"]["Tools"] = [{"ToolId": "HTTP Request"}] + config["AgentBuilderParams"]["MCPServers"] = [ + { + "UseCaseId": "gateway-calendar", + "UseCaseName": "Gateway Calendar", + "Url": "https://example-gateway.bedrock-agentcore.us-east-1.amazonaws.com/calendar", + "Type": "gateway", + } + ] + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config + mock_ddb_helper.return_value = mock_ddb_instance + + # Mock tools + mock_http_tool = MagicMock() + mock_http_tool.name = "http_request" + mock_calendar_tool = MagicMock() + mock_calendar_tool.name = "get_calendar_events" + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [ + mock_http_tool, + mock_calendar_tool, + ] + mock_tools_manager_instance.get_tool_sources.return_value = { + "http_request": "Strands", + "get_calendar_events": "MCP-Gateway-gateway-calendar", + } + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + # Initialize agent + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify get_tool_sources was called + mock_tools_manager_instance.get_tool_sources.assert_called_once() + + # Verify tools are tracked + tool_sources = agent.tools_manager.get_tool_sources() + self.assertEqual(len(tool_sources), 2) + self.assertEqual(tool_sources["http_request"], "Strands") + self.assertEqual(tool_sources["get_calendar_events"], "MCP-Gateway-gateway-calendar") + + @patch("configurable_agent.ToolsManager") + @patch("configurable_agent.DynamoDBHelper") + @patch("configurable_agent.Agent") + @patch("configurable_agent.BedrockModel") + def test_configuration_extraction( + self, mock_bedrock_model, mock_agent, mock_ddb_helper, mock_tools_manager + ): + """Test that tool IDs and MCP server IDs are correctly extracted from configuration""" + # Configuration with specific IDs + config = self.base_config.copy() + config["AgentBuilderParams"]["Tools"] = [ + {"ToolId": "HTTP Request"}, + {"ToolId": "File Operations"}, + {"ToolId": "Code Interpreter"}, + ] + config["AgentBuilderParams"]["MCPServers"] = [ + { + "UseCaseId": "gateway-calendar", + "UseCaseName": "Gateway Calendar", + "Url": "https://example-gateway.bedrock-agentcore.us-east-1.amazonaws.com/calendar", + "Type": "gateway", + }, + { + "UseCaseId": "runtime-database", + "UseCaseName": "Runtime Database", + "Url": "https://example-bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test-runtime/invocations", + "Type": "runtime", + }, + { + "UseCaseId": "gateway-slack", + "UseCaseName": "Gateway Slack", + "Url": "https://example-gateway.bedrock-agentcore.us-east-1.amazonaws.com/slack", + "Type": "gateway", + }, + ] + + # Setup mocks + mock_ddb_instance = MagicMock() + mock_ddb_instance.get_config.return_value = config + mock_ddb_helper.return_value = mock_ddb_instance + + mock_tools_manager_instance = MagicMock() + mock_tools_manager_instance.load_all_tools.return_value = [] + mock_tools_manager_instance.get_tool_sources.return_value = {} + mock_tools_manager.return_value = mock_tools_manager_instance + + mock_model_instance = MagicMock() + mock_bedrock_model.return_value = mock_model_instance + + mock_agent_instance = MagicMock() + mock_agent.return_value = mock_agent_instance + + # Initialize agent + agent = ConfigurableAgent("test-table", "test-key", "us-east-1") + + # Verify correct IDs were extracted and passed to ToolsManager + mock_tools_manager_instance.load_all_tools.assert_called_once_with( + mcp_servers=[ + { + "use_case_id": "gateway-calendar", + "url": "https://example-gateway.bedrock-agentcore.us-east-1.amazonaws.com/calendar", + "type": "gateway", + }, + { + "use_case_id": "runtime-database", + "url": "https://example-bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test-runtime/invocations", + "type": "runtime", + }, + { + "use_case_id": "gateway-slack", + "url": "https://example-gateway.bedrock-agentcore.us-east-1.amazonaws.com/slack", + "type": "gateway", + }, + ], + strands_tool_ids=["HTTP Request", "File Operations", "Code Interpreter"], + custom_tool_ids=[], + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/deployment/ecr/gaab-strands-agent/test/test_main_memory.py b/deployment/ecr/gaab-strands-agent/test/test_main_memory.py new file mode 100644 index 00000000..30f09189 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/test/test_main_memory.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from unittest.mock import Mock, patch +import os + + +class TestMainMemoryConfiguration: + """Test cases for memory configuration in main.py.""" + + def setup_method(self): + """Set up test fixtures.""" + # Set up environment variables + self.env_vars = { + "USE_CASE_TABLE_NAME": "test-table", + "USE_CASE_CONFIG_KEY": "test-key", + "AWS_REGION": "us-east-1", + "MEMORY_ID": "test-memory-id", + "MEMORY_STRATEGY_ID": "test-strategy-id" + } + + @patch.dict(os.environ, { + "USE_CASE_TABLE_NAME": "test-table", + "USE_CASE_CONFIG_KEY": "test-key", + "AWS_REGION": "us-east-1", + "MEMORY_ID": "test-memory-id", + "MEMORY_STRATEGY_ID": "test-strategy-id" + }) + def test_validate_environment_with_strategy_id(self): + """Test validate_environment returns strategy_id when provided.""" + from src.main import validate_environment + + table_name, config_key, region, memory_id, strategy_id = validate_environment() + + assert table_name == "test-table" + assert config_key == "test-key" + assert region == "us-east-1" + assert memory_id == "test-memory-id" + assert strategy_id == "test-strategy-id" + + @patch.dict(os.environ, { + "USE_CASE_TABLE_NAME": "test-table", + "USE_CASE_CONFIG_KEY": "test-key", + "AWS_REGION": "us-east-1", + "MEMORY_ID": "test-memory-id" + }) + def test_validate_environment_without_strategy_id(self): + """Test validate_environment with default strategy_id.""" + from src.main import validate_environment + + table_name, config_key, region, memory_id, strategy_id = validate_environment() + + assert table_name == "test-table" + assert config_key == "test-key" + assert region == "us-east-1" + assert memory_id == "test-memory-id" + assert strategy_id == "" # default value + + @patch('src.main.ConfigurableAgent') + @patch('src.main.AgentCoreMemorySessionManager') + @patch('src.main.validate_environment') + def test_get_agent_instance_with_strategy_id(self, mock_validate_env, mock_session_manager_class, mock_configurable_agent_class): + """Test get_agent_instance creates session manager when strategy_id exists.""" + from src.main import get_agent_instance + + # Mock validate_environment return with strategy_id + mock_validate_env.return_value = ("table", "key", "region", "memory-id", "strategy-id") + + # Mock session manager + mock_session_manager = Mock() + mock_session_manager_class.return_value = mock_session_manager + + # Mock configurable agent + mock_agent = Mock() + mock_configurable_agent_class.return_value = mock_agent + + with patch('src.main._configurable_agent', None): + result = get_agent_instance("session-id", "actor-id") + + # Verify session manager was created + mock_session_manager_class.assert_called_once() + mock_configurable_agent_class.assert_called_once_with( + table_name="table", + config_key="key", + region="region", + session_manager=mock_session_manager + ) + + @patch('src.main.ConfigurableAgent') + @patch('src.main.AgentCoreMemorySessionManager') + @patch('src.main.validate_environment') + def test_get_agent_instance_without_strategy_id(self, mock_validate_env, mock_session_manager_class, mock_configurable_agent_class): + """Test get_agent_instance does not create session manager when strategy_id is empty.""" + from src.main import get_agent_instance + + # Mock validate_environment return with empty strategy_id + mock_validate_env.return_value = ("table", "key", "region", "memory-id", "") + + # Mock configurable agent + mock_agent = Mock() + mock_configurable_agent_class.return_value = mock_agent + + with patch('src.main._configurable_agent', None): + result = get_agent_instance("session-id", "actor-id") + + # Verify session manager was NOT created + mock_session_manager_class.assert_not_called() + mock_configurable_agent_class.assert_called_once_with( + table_name="table", + config_key="key", + region="region", + session_manager=None + ) + + @patch.dict(os.environ, { + "USE_CASE_TABLE_NAME": "test-table", + "USE_CASE_CONFIG_KEY": "test-key", + "AWS_REGION": "us-east-1", + "MEMORY_ID": "test-memory-id" + }) + def test_validate_environment_missing_strategy_id_env_var(self): + """Test validate_environment when MEMORY_STRATEGY_ID env var is missing.""" + from src.main import validate_environment + + # Ensure MEMORY_STRATEGY_ID is not in environment + if "MEMORY_STRATEGY_ID" in os.environ: + del os.environ["MEMORY_STRATEGY_ID"] + + table_name, config_key, region, memory_id, strategy_id = validate_environment() + + assert strategy_id == "" # default value when env var is missing diff --git a/deployment/ecr/gaab-strands-agent/test/test_mcp_tools_loader.py b/deployment/ecr/gaab-strands-agent/test/test_mcp_tools_loader.py new file mode 100644 index 00000000..1a930119 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/test/test_mcp_tools_loader.py @@ -0,0 +1,727 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +from unittest.mock import Mock, patch + +import pytest +from gaab_strands_common import MCPToolsLoader +from gaab_strands_common.models import RuntimeMCPParams + + +class TestMCPToolsLoader: + """Test suite for MCPToolsLoader with new dict-based interface""" + + @pytest.fixture + def loader(self): + """Create MCPToolsLoader instance""" + return MCPToolsLoader("us-east-1") + + @pytest.fixture + def gateway_server_dict(self): + """Sample Gateway MCP server dict""" + return { + "use_case_id": "gateway-server-1", + "url": "https://gateway1.example.com/mcp", + "type": "gateway", + } + + @pytest.fixture + def runtime_server_dict(self): + """Sample Runtime MCP server dict""" + return { + "use_case_id": "runtime-server-1", + "url": "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agent%3Aus-east-1%3A123%3Aagent%2Ftest/invocations?qualifier=DEFAULT", + "type": "runtime", + } + + def test_init(self): + """Test MCPToolsLoader initialization without DynamoDB helper""" + loader = MCPToolsLoader("us-west-2") + + assert loader.region == "us-west-2" + assert loader._active_mcp_clients == [] + # Verify no ddb_helper attribute + assert not hasattr(loader, "ddb_helper") + + def test_load_tools_empty_list(self, loader): + """Test load_tools with empty MCP server list""" + result = loader.load_tools([]) + + assert result == [] + + def test_load_tools_with_new_mcp_server_dict_format( + self, loader, gateway_server_dict, runtime_server_dict + ): + """Test load_tools with new MCP server dict format""" + mcp_servers = [gateway_server_dict, runtime_server_dict] + + # Mock the discovery methods to return mock tools + mock_gateway_tool = Mock() + mock_gateway_tool.name = "gateway_test_tool" + mock_runtime_tool = Mock() + mock_runtime_tool.name = "runtime_test_tool" + + loader._discover_gateway_tools = Mock(return_value=[mock_gateway_tool]) + loader._discover_runtime_tools = Mock(return_value=[mock_runtime_tool]) + + # Execute + result = loader.load_tools(mcp_servers) + + # Verify + assert len(result) == 2 + assert mock_gateway_tool in result + assert mock_runtime_tool in result + + # Verify discovery methods were called with correct parameters + loader._discover_gateway_tools.assert_called_once_with( + "gateway-server-1", "https://gateway1.example.com/mcp" + ) + loader._discover_runtime_tools.assert_called_once_with( + "runtime-server-1", + "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agent%3Aus-east-1%3A123%3Aagent%2Ftest/invocations?qualifier=DEFAULT", + ) + + def test_categorize_servers_with_type_field(self, loader): + """Test _categorize_servers using type field from dict""" + mcp_servers = [ + { + "use_case_id": "gateway-1", + "url": "https://gateway1.example.com/mcp", + "type": "gateway", + }, + { + "use_case_id": "runtime-1", + "url": "https://runtime1.example.com/mcp", + "type": "runtime", + }, + { + "use_case_id": "gateway-2", + "url": "https://gateway2.example.com/mcp", + "type": "gateway", + }, + ] + + gateway_servers, runtime_servers = loader._categorize_servers(mcp_servers) + + # Verify categorization + assert len(gateway_servers) == 2 + assert len(runtime_servers) == 1 + + # Check gateway servers + assert gateway_servers[0]["name"] == "gateway-1" + assert gateway_servers[0]["url"] == "https://gateway1.example.com/mcp" + assert gateway_servers[1]["name"] == "gateway-2" + assert gateway_servers[1]["url"] == "https://gateway2.example.com/mcp" + + # Check runtime servers + assert runtime_servers[0]["name"] == "runtime-1" + assert runtime_servers[0]["url"] == "https://runtime1.example.com/mcp" + + def test_categorize_servers_missing_required_fields(self, loader, caplog): + """Test _categorize_servers handling of missing required fields""" + mcp_servers = [ + {"use_case_id": "valid-server", "url": "https://example.com/mcp", "type": "gateway"}, + { + # Missing use_case_id + "url": "https://example.com/mcp", + "type": "gateway", + }, + { + "use_case_id": "missing-url", + # Missing url + "type": "runtime", + }, + { + "use_case_id": "missing-type", + "url": "https://example.com/mcp", + # Missing type + }, + ] + + with caplog.at_level("WARNING"): + gateway_servers, runtime_servers = loader._categorize_servers(mcp_servers) + + # Should only include the valid server + assert len(gateway_servers) == 1 + assert len(runtime_servers) == 0 + assert gateway_servers[0]["name"] == "valid-server" + + # Should log warnings for invalid servers + assert "MCP server missing required fields" in caplog.text + + def test_categorize_servers_invalid_type_values(self, loader, caplog): + """Test _categorize_servers handling of invalid type values""" + mcp_servers = [ + {"use_case_id": "valid-gateway", "url": "https://example.com/mcp", "type": "gateway"}, + {"use_case_id": "invalid-type-1", "url": "https://example.com/mcp", "type": "invalid"}, + { + "use_case_id": "invalid-type-2", + "url": "https://example.com/mcp", + "type": "Gateway", # Wrong case + }, + ] + + with caplog.at_level("WARNING"): + gateway_servers, runtime_servers = loader._categorize_servers(mcp_servers) + + # Should only include the valid server + assert len(gateway_servers) == 1 + assert len(runtime_servers) == 0 + assert gateway_servers[0]["name"] == "valid-gateway" + + # Should log warnings for invalid type values + assert "Invalid server type" in caplog.text + + def test_no_dynamodb_calls_made(self, loader): + """Test that no DynamoDB calls are made in the new implementation""" + mcp_servers = [ + {"use_case_id": "test-server", "url": "https://example.com/mcp", "type": "gateway"} + ] + + # Mock discovery methods + loader._discover_gateway_tools = Mock(return_value=[]) + loader._discover_runtime_tools = Mock(return_value=[]) + + # Execute + loader.load_tools(mcp_servers) + + # Verify no DynamoDB-related methods are called + # (This is implicit since we removed ddb_helper from constructor) + # The test passes if no AttributeError is raised for missing ddb_helper + + def test_discover_gateway_tools_with_direct_url(self, loader): + """Test Gateway tool discovery with direct URL""" + server_name = "test-gateway" + gateway_url = "https://gateway.example.com/mcp" + + # Mock the method to avoid complex dependency mocking + loader._discover_gateway_tools = Mock(return_value=[]) + + result = loader._discover_gateway_tools(server_name, gateway_url) + + # Should return empty list (mocked) + assert result == [] + + def test_discover_runtime_tools_with_direct_url(self, loader): + """Test Runtime tool discovery with direct URL""" + server_name = "test-runtime" + runtime_url = "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test/invocations?qualifier=DEFAULT" + + # Mock the method to avoid complex dependency mocking + loader._discover_runtime_tools = Mock(return_value=[]) + + result = loader._discover_runtime_tools(server_name, runtime_url) + + # Should return empty list (mocked) + assert result == [] + + def test_error_handling_during_categorization(self, loader, caplog): + """Test error handling during server categorization""" + mcp_servers = [ + {"use_case_id": "valid-server", "url": "https://example.com/mcp", "type": "gateway"}, + # Invalid server that might cause errors + None, # This should be handled gracefully + {"use_case_id": "another-valid", "url": "https://example.com/mcp", "type": "runtime"}, + ] + + with caplog.at_level("ERROR"): + gateway_servers, runtime_servers = loader._categorize_servers(mcp_servers) + + # Should continue processing valid servers despite errors + assert len(gateway_servers) == 1 + assert len(runtime_servers) == 1 + assert gateway_servers[0]["name"] == "valid-server" + assert runtime_servers[0]["name"] == "another-valid" + + # Should log error for problematic server + assert "Error categorizing server" in caplog.text + + def test_load_tools_integration_with_mixed_servers(self, loader): + """Test complete load_tools flow with mixed Gateway and Runtime servers""" + mcp_servers = [ + { + "use_case_id": "gateway-1", + "url": "https://gateway1.example.com/mcp", + "type": "gateway", + }, + { + "use_case_id": "runtime-1", + "url": "https://runtime1.example.com/mcp", + "type": "runtime", + }, + { + "use_case_id": "gateway-2", + "url": "https://gateway2.example.com/mcp", + "type": "gateway", + }, + ] + + # Mock discovery methods + mock_gateway_tool1 = Mock() + mock_gateway_tool1.name = "gateway_tool_1" + mock_gateway_tool2 = Mock() + mock_gateway_tool2.name = "gateway_tool_2" + mock_runtime_tool = Mock() + mock_runtime_tool.name = "runtime_tool_1" + + def mock_gateway_discovery(name, url): + if name == "gateway-1": + return [mock_gateway_tool1] + elif name == "gateway-2": + return [mock_gateway_tool2] + return [] + + def mock_runtime_discovery(name, url): + if name == "runtime-1": + return [mock_runtime_tool] + return [] + + loader._discover_gateway_tools = Mock(side_effect=mock_gateway_discovery) + loader._discover_runtime_tools = Mock(side_effect=mock_runtime_discovery) + + # Execute + result = loader.load_tools(mcp_servers) + + # Verify all tools were discovered + assert len(result) == 3 + tool_names = [tool.name for tool in result] + assert "gateway_tool_1" in tool_names + assert "gateway_tool_2" in tool_names + assert "runtime_tool_1" in tool_names + + # Verify discovery methods were called correctly + assert loader._discover_gateway_tools.call_count == 2 + assert loader._discover_runtime_tools.call_count == 1 + + +class TestRuntimeMCPToolDiscoveryWithDirectURL: + """Test suite for Runtime MCP tool discovery with direct URL""" + + @pytest.fixture + def loader(self): + """Create MCPToolsLoader instance""" + return MCPToolsLoader("us-east-1") + + @pytest.fixture + def runtime_params(self): + """Create RuntimeMCPParams instance for testing""" + return RuntimeMCPParams( + EcrUri="123.dkr.ecr.us-east-1.amazonaws.com/test:latest", + RuntimeArn="arn:aws:bedrock-agent:us-east-1:123456789:agent/test-agent", + RuntimeUrl="https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test/invocations", + ) + + def test_runtime_url_used_directly(self, loader): + """Test that Runtime URL is used directly without construction""" + server_name = "test-runtime" + runtime_url = "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agent%3Aus-east-1%3A123%3Aagent%2Ftest/invocations?qualifier=DEFAULT" + + # Mock the imports inside the function + with ( + patch("strands.tools.mcp.MCPClient") as mock_mcp_client_class, + patch("mcp.client.streamable_http.streamablehttp_client") as mock_streamablehttp, + patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator, + ): + + # Setup mocks + mock_client_instance = Mock() + mock_client_instance.list_tools_sync.return_value = [] + mock_mcp_client_class.return_value = mock_client_instance + + # Mock the requires_access_token decorator + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + # Execute + result = loader._discover_runtime_tools(server_name, runtime_url) + + # Verify the URL was used directly + client_factory = mock_mcp_client_class.call_args[0][0] + client_factory() + + # Verify streamablehttp_client was called with the exact URL provided + call_args = mock_streamablehttp.call_args + url = call_args[0][0] + assert url == runtime_url + + def test_no_url_construction_logic(self, loader): + """Test that no URL construction logic is used""" + server_name = "test-runtime" + runtime_url = "https://custom.runtime.url/mcp" + + # Mock the imports inside the function + with ( + patch("strands.tools.mcp.MCPClient") as mock_mcp_client_class, + patch("mcp.client.streamable_http.streamablehttp_client") as mock_streamablehttp, + patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator, + ): + + # Setup mocks + mock_client_instance = Mock() + mock_client_instance.list_tools_sync.return_value = [] + mock_mcp_client_class.return_value = mock_client_instance + + # Mock the requires_access_token decorator + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + # Execute with custom URL + result = loader._discover_runtime_tools(server_name, runtime_url) + + # Verify the URL was used directly + client_factory = mock_mcp_client_class.call_args[0][0] + client_factory() + + call_args = mock_streamablehttp.call_args + url = call_args[0][0] + # The URL should be the direct runtime_url + assert url == runtime_url + + def test_gateway_url_used_directly(self, loader): + """Test that Gateway URL is used directly without extraction""" + server_name = "test-gateway" + gateway_url = "https://custom.gateway.url/mcp" + + # Mock the imports inside the function + with ( + patch("strands.tools.mcp.MCPClient") as mock_mcp_client_class, + patch("mcp.client.streamable_http.streamablehttp_client") as mock_streamablehttp, + patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator, + ): + + # Setup mocks + mock_client_instance = Mock() + mock_client_instance.list_tools_sync.return_value = [] + mock_mcp_client_class.return_value = mock_client_instance + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + # Execute + result = loader._discover_gateway_tools(server_name, gateway_url) + + # Verify the URL was used directly (if MCP client was called) + if mock_mcp_client_class.call_args: + client_factory = mock_mcp_client_class.call_args[0][0] + client_factory() + + call_args = mock_streamablehttp.call_args + url = call_args[0][0] + assert url == gateway_url + + # Test passes if no authentication error occurs + assert result == [] # Empty result due to auth failure is expected + + @patch("strands.tools.mcp.MCPClient") + @patch("mcp.client.streamable_http.streamablehttp_client") + @patch("time.sleep") + def test_retry_logic_max_retries_exceeded( + self, mock_sleep, mock_streamablehttp, mock_mcp_client_class, loader, caplog + ): + """Test retry logic when max retries are exceeded""" + runtime_url = "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test/invocations" + + # Setup mocks to always fail with 429 + mock_client_instance = Mock() + mock_client_instance.start.side_effect = Exception("429 Too Many Requests") + mock_mcp_client_class.return_value = mock_client_instance + + # Mock the requires_access_token decorator + with patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator: + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + # Execute - should fail after max retries + result = loader._discover_runtime_tools("TestRuntime", runtime_url) + + # Verify max retries (3 attempts) + assert mock_client_instance.start.call_count == 3 + + # Should return empty list after exhausting retries + assert result == [] + + # Should log error + assert "Error discovering tools" in caplog.text or len(result) == 0 + + def test_successful_tool_discovery_with_direct_url(self, loader): + """Test successful tool discovery using direct URL""" + server_name = "test-runtime" + runtime_url = "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test/invocations?qualifier=DEFAULT" + + # Mock the imports inside the function + with ( + patch("strands.tools.mcp.MCPClient") as mock_mcp_client_class, + patch("mcp.client.streamable_http.streamablehttp_client") as mock_streamablehttp, + patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator, + ): + + # Setup mocks + mock_tool1 = Mock() + mock_tool1.name = "direct_url_tool_1" + mock_tool2 = Mock() + mock_tool2.name = "direct_url_tool_2" + + mock_client_instance = Mock() + mock_client_instance.list_tools_sync.return_value = [mock_tool1, mock_tool2] + mock_mcp_client_class.return_value = mock_client_instance + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + # Execute + result = loader._discover_runtime_tools(server_name, runtime_url) + + # Verify tools were returned + assert len(result) == 2 + assert result[0].name == "direct_url_tool_1" + assert result[1].name == "direct_url_tool_2" + + # Verify client was stored for tool execution + assert len(loader._active_mcp_clients) == 1 + assert loader._active_mcp_clients[0] == mock_client_instance + + @patch("strands.tools.mcp.MCPClient") + @patch("mcp.client.streamable_http.streamablehttp_client") + def test_empty_tools_list(self, mock_streamablehttp, mock_mcp_client_class, loader): + """Test handling of empty tools list from Runtime server""" + runtime_url = "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test/invocations" + + # Setup mocks to return empty list + mock_client_instance = Mock() + mock_client_instance.list_tools_sync.return_value = [] + mock_mcp_client_class.return_value = mock_client_instance + + # Mock the requires_access_token decorator + with patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator: + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + # Execute with runtime server that returns empty tools + result = loader._discover_runtime_tools("test-runtime", runtime_url) + + # Verify empty list is returned + assert result == [] + + # Verify client was created and called + mock_mcp_client_class.assert_called_once() + mock_client_instance.list_tools_sync.assert_called_once() + + def test_tool_loading_with_mixed_gateway_and_runtime_servers(self, loader): + """Test tool loading with mixed Gateway and Runtime servers""" + mcp_servers = [ + { + "use_case_id": "gateway-1", + "url": "https://gateway1.example.com/mcp", + "type": "gateway", + }, + { + "use_case_id": "runtime-1", + "url": "https://runtime1.example.com/mcp", + "type": "runtime", + }, + { + "use_case_id": "gateway-2", + "url": "https://gateway2.example.com/mcp", + "type": "gateway", + }, + ] + + # Mock discovery methods to return different tools + gateway_tools = [Mock(name="gateway_tool_1"), Mock(name="gateway_tool_2")] + runtime_tools = [Mock(name="runtime_tool_1")] + + def mock_discover_gateway(name, url): + if "gateway1" in url: + return [gateway_tools[0]] + elif "gateway2" in url: + return [gateway_tools[1]] + return [] + + def mock_discover_runtime(name, url): + if "runtime1" in url: + return runtime_tools + return [] + + loader._discover_gateway_tools = Mock(side_effect=mock_discover_gateway) + loader._discover_runtime_tools = Mock(side_effect=mock_discover_runtime) + + # Execute + result = loader.load_tools(mcp_servers) + + # Verify all tools are loaded + assert len(result) == 3 + assert gateway_tools[0] in result + assert gateway_tools[1] in result + assert runtime_tools[0] in result + + # Verify discovery methods were called correctly + assert loader._discover_gateway_tools.call_count == 2 + assert loader._discover_runtime_tools.call_count == 1 + + def test_error_handling_for_invalid_server_configurations(self, loader): + """Test error handling for invalid server configurations""" + invalid_servers = [ + {"use_case_id": "valid-server", "url": "https://example.com/mcp", "type": "gateway"}, + { + # Missing use_case_id + "url": "https://example.com/mcp", + "type": "gateway", + }, + { + "use_case_id": "invalid-type-server", + "url": "https://example.com/mcp", + "type": "invalid", + }, + ] + + # Mock discovery method for valid server + mock_tool = Mock() + mock_tool.name = "valid_tool" + loader._discover_gateway_tools = Mock(return_value=[mock_tool]) + + # Execute - should handle invalid servers gracefully + result = loader.load_tools(invalid_servers) + + # Should only load tools from valid server + assert len(result) == 1 + assert result[0] == mock_tool + + # Verify the discovery method was called for valid server + loader._discover_gateway_tools.assert_called_once_with( + "valid-server", "https://example.com/mcp" + ) + + @patch("strands.tools.mcp.MCPClient") + @patch("mcp.client.streamable_http.streamablehttp_client") + def test_runtime_with_special_characters_in_url( + self, mock_streamablehttp, mock_mcp_client_class, loader + ): + """Test Runtime discovery with special characters in URL""" + # URL with encoded special characters + runtime_url = "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agent%3Aus-east-1%3A123%3Aagent%2Ftest-agent_v2.0/invocations" + + # Setup mocks + mock_client_instance = Mock() + mock_client_instance.list_tools_sync.return_value = [] + mock_mcp_client_class.return_value = mock_client_instance + + # Mock the requires_access_token decorator + with patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator: + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + # Execute + result = loader._discover_runtime_tools("TestRuntime", runtime_url) + + # Verify URL was used directly + client_factory = mock_mcp_client_class.call_args[0][0] + client_factory() + + url = mock_streamablehttp.call_args[0][0] + # Verify URL matches what was provided + assert url == runtime_url + + @patch("strands.tools.mcp.MCPClient") + @patch("mcp.client.streamable_http.streamablehttp_client") + def test_runtime_different_regions(self, mock_streamablehttp, mock_mcp_client_class): + """Test Runtime discovery works with different AWS regions""" + regions = ["us-east-1", "us-west-2", "eu-west-1", "ap-southeast-1"] + + for region in regions: + # Create loader for specific region + loader = MCPToolsLoader(region) + + runtime_url = ( + f"https://bedrock-agentcore.{region}.amazonaws.com/runtimes/test/invocations" + ) + + # Setup mocks + mock_client_instance = Mock() + mock_client_instance.list_tools_sync.return_value = [] + mock_mcp_client_class.return_value = mock_client_instance + + # Mock the requires_access_token decorator + with patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator: + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + # Execute + loader._discover_runtime_tools("TestRuntime", runtime_url) + + # Verify URL contains correct region + client_factory = mock_mcp_client_class.call_args[0][0] + client_factory() + + url = mock_streamablehttp.call_args[0][0] + assert f"bedrock-agentcore.{region}.amazonaws.com" in url + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/deployment/ecr/gaab-strands-agent/test/test_models.py b/deployment/ecr/gaab-strands-agent/test/test_models.py new file mode 100644 index 00000000..0a0c472b --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/test/test_models.py @@ -0,0 +1,744 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/usr/bin/env python3 +""" +Unit tests for auto-parsing Pydantic models +""" + +import copy +import os +import sys +import unittest + +from pydantic import ValidationError + +# Add src directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "src")) + +from gaab_strands_common.models import ( + AgentBuilderParams, + BedrockLlmParams, + GatewayMCPParams, + LlmParams, + MCPParams, + MCPServerConfig, + MCPServerReference, + MemoryConfig, + RuntimeMCPParams, + StrandsToolReference, +) +from gaab_strands_common.models import ( + UseCaseConfig as AgentConfig, +) # Alias for backward compatibility in tests + + +class TestAutoParsing(unittest.TestCase): + """Test auto-parsing models with real DDB structure""" + + def setUp(self): + """Set up test data""" + self.full_config = { + "UseCaseName": "Test Agent", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": { + "SystemPrompt": "You are a helpful AI assistant.", + "Tools": [ + {"ToolId": "HTTP Request"}, + {"ToolId": "File Operations"}, + {"ToolId": "JSON Parser"}, + ], + "MemoryConfig": {"LongTermEnabled": True}, + }, + "LlmParams": { + "BedrockLlmParams": { + "ModelId": "amazon.nova-lite-v1:0", + "BedrockInferenceType": "QUICK_START", + "GuardrailIdentifier": "test-guardrail", + "GuardrailVersion": "1", + }, + "ModelProvider": "Bedrock", + "Temperature": 0.7, + "Streaming": True, + "Verbose": False, + "ModelParams": {"custom_param": "value"}, + }, + } + + self.minimal_config = { + "UseCaseName": "Minimal Agent", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": {"SystemPrompt": "You are minimal."}, + "LlmParams": { + "BedrockLlmParams": {"ModelId": "amazon.nova-lite-v1:0"}, + "ModelProvider": "Bedrock", + "Temperature": 0.5, + }, + } + + self.real_ddb_config = { + "AgentBuilderParams": { + "MemoryConfig": {"LongTermEnabled": True}, + "SystemPrompt": "You are a helpful AI assistant.", + "Tools": [ + {"ToolId": "HTTP Request"}, + {"ToolId": "File Operations"}, + {"ToolId": "JSON Parser"}, + {"ToolId": "Text Processing"}, + {"ToolId": "Date/Time Utils"}, + {"ToolId": "Math Operations"}, + ], + }, + "LlmParams": { + "BedrockLlmParams": { + "BedrockInferenceType": "QUICK_START", + "ModelId": "amazon.nova-lite-v1:0", + }, + "ModelParams": {}, + "ModelProvider": "Bedrock", + "Streaming": True, + "Temperature": 0.1, + "Verbose": False, + }, + "UseCaseName": "ag-test-1", + "UseCaseType": "AgentBuilder", + } + + self.inference_profile_config = { + "UseCaseName": "Inference Profile Agent", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": { + "SystemPrompt": "You are a helpful AI assistant.", + "MemoryConfig": {"LongTermEnabled": True}, + }, + "LlmParams": { + "BedrockLlmParams": { + "BedrockInferenceType": "INFERENCE_PROFILE", + "InferenceProfileId": "us.anthropic.claude-3-7-sonnet-20250219-v1:0", + }, + "ModelProvider": "Bedrock", + "Temperature": 0.5, + "Streaming": True, + "Verbose": False, + "ModelParams": {}, + }, + } + + self.provisioned_config = { + "UseCaseName": "Provisioned Model Agent", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": {"SystemPrompt": "You are a helpful AI assistant."}, + "LlmParams": { + "BedrockLlmParams": { + "BedrockInferenceType": "PROVISIONED", + "ModelArn": "arn:aws:bedrock:us-east-1:123456789012:provisioned-model/abcdef123456", + }, + "ModelProvider": "Bedrock", + "Temperature": 0.3, + }, + } + + def test_full_config_parsing(self): + """Test full config with all fields""" + config = AgentConfig.from_ddb_config(self.full_config) + + # Test top level + self.assertEqual(config.use_case_name, "Test Agent") + self.assertEqual(config.use_case_type, "AgentBuilder") + + # Test agent params + self.assertEqual( + config.agent_builder_params.system_prompt, "You are a helpful AI assistant." + ) + self.assertEqual(len(config.agent_builder_params.tools), 3) + # Tools are now StrandsToolReference objects + tool_ids = config.agent_builder_params.get_tool_ids() + self.assertEqual(tool_ids, ["HTTP Request", "File Operations", "JSON Parser"]) + self.assertTrue(config.agent_builder_params.memory_config.long_term_enabled) + + # Test LLM params + self.assertEqual(config.llm_params.model_provider, "Bedrock") + self.assertEqual(config.llm_params.temperature, 0.7) + self.assertTrue(config.llm_params.streaming) + self.assertFalse(config.llm_params.verbose) + + # Test Bedrock params + self.assertEqual( + config.llm_params.bedrock_llm_params.model_identifier, "amazon.nova-lite-v1:0" + ) + self.assertEqual(config.llm_params.bedrock_llm_params.bedrock_inference_type, "QUICK_START") + self.assertEqual( + config.llm_params.bedrock_llm_params.guardrail_identifier, "test-guardrail" + ) + self.assertEqual(config.llm_params.bedrock_llm_params.guardrail_version, "1") + # Test model identifier property + + def test_minimal_config_parsing(self): + """Test minimal config with defaults""" + config = AgentConfig.from_ddb_config(self.minimal_config) + + self.assertEqual(config.use_case_name, "Minimal Agent") + self.assertEqual(config.agent_builder_params.system_prompt, "You are minimal.") + self.assertEqual(config.agent_builder_params.get_tool_ids(), []) # Default empty list + self.assertFalse( + config.agent_builder_params.memory_config.long_term_enabled + ) # Default False + self.assertEqual(config.llm_params.temperature, 0.5) + self.assertTrue(config.llm_params.streaming) # Default True + self.assertFalse(config.llm_params.verbose) # Default False + self.assertIsNone(config.llm_params.bedrock_llm_params.guardrail_identifier) # Default None + + def test_real_ddb_config(self): + """Test with actual DDB config structure""" + config = AgentConfig.from_ddb_config(self.real_ddb_config) + + self.assertEqual(config.use_case_name, "ag-test-1") + self.assertEqual(len(config.agent_builder_params.tools), 6) + self.assertEqual(config.llm_params.temperature, 0.1) + self.assertTrue(config.llm_params.streaming) + self.assertFalse(config.llm_params.verbose) + self.assertEqual(config.llm_params.bedrock_llm_params.bedrock_inference_type, "QUICK_START") + + def test_tools_parsing(self): + """Test tools list parsing from DDB format""" + config = AgentConfig.from_ddb_config(self.full_config) + expected_tools = ["HTTP Request", "File Operations", "JSON Parser"] + self.assertEqual(config.agent_builder_params.get_tool_ids(), expected_tools) + + def test_missing_required_fields(self): + """Test validation errors for missing required fields""" + # Missing UseCaseName + with self.assertRaises(ValueError): + AgentConfig.from_ddb_config({"UseCaseType": "AgentBuilder"}) + # Missing ModelId for QUICK_START inference type + invalid_config = copy.deepcopy(self.minimal_config) + invalid_config["LlmParams"]["BedrockLlmParams"]["BedrockInferenceType"] = "QUICK_START" + del invalid_config["LlmParams"]["BedrockLlmParams"]["ModelId"] + with self.assertRaises(ValueError): + AgentConfig.from_ddb_config(invalid_config) + + def test_type_conversion(self): + """Test automatic type conversion""" + config_with_string_temp = copy.deepcopy(self.minimal_config) + config_with_string_temp["LlmParams"]["Temperature"] = "0.8" # String instead of float + + config = AgentConfig.from_ddb_config(config_with_string_temp) + self.assertEqual(config.llm_params.temperature, 0.8) + self.assertIsInstance(config.llm_params.temperature, float) + + def test_nested_defaults(self): + """Test nested object defaults work correctly""" + config_without_memory = { + "UseCaseName": "No Memory Agent", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": {"SystemPrompt": "You are simple."}, + "LlmParams": { + "BedrockLlmParams": {"ModelId": "test-model"}, + "ModelProvider": "Bedrock", + "Temperature": 0.5, + }, + } + + config = AgentConfig.from_ddb_config(config_without_memory) + self.assertFalse(config.agent_builder_params.memory_config.long_term_enabled) + self.assertEqual(config.agent_builder_params.get_tool_ids(), []) + self.assertEqual(config.llm_params.model_params, {}) + + +if __name__ == "__main__": + unittest.main() + + +class TestMCPModels(unittest.TestCase): + """Test MCP-related models""" + + def setUp(self): + """Set up test data""" + self.minimal_config = { + "UseCaseName": "Minimal Agent", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": {"SystemPrompt": "You are minimal."}, + "LlmParams": { + "BedrockLlmParams": {"ModelId": "amazon.nova-lite-v1:0"}, + "ModelProvider": "Bedrock", + "Temperature": 0.5, + }, + } + + self.inference_profile_config = { + "UseCaseName": "Inference Profile Agent", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": { + "SystemPrompt": "You are a helpful AI assistant.", + "MemoryConfig": {"LongTermEnabled": True}, + }, + "LlmParams": { + "BedrockLlmParams": { + "BedrockInferenceType": "INFERENCE_PROFILE", + "InferenceProfileId": "us.anthropic.claude-3-7-sonnet-20250219-v1:0", + }, + "ModelProvider": "Bedrock", + "Temperature": 0.5, + "Streaming": True, + "Verbose": False, + "ModelParams": {}, + }, + } + + self.provisioned_config = { + "UseCaseName": "Provisioned Model Agent", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": {"SystemPrompt": "You are a helpful AI assistant."}, + "LlmParams": { + "BedrockLlmParams": { + "BedrockInferenceType": "PROVISIONED", + "ModelArn": "arn:aws:bedrock:us-east-1:123456789012:provisioned-model/abcdef123456", + }, + "ModelProvider": "Bedrock", + "Temperature": 0.3, + }, + } + + def test_strands_tool_reference(self): + """Test StrandsToolReference model""" + tool_ref = StrandsToolReference(ToolId="web_search") + self.assertEqual(tool_ref.tool_id, "web_search") + + def test_mcp_server_reference(self): + """Test MCPServerReference model with new structure""" + server_ref = MCPServerReference( + UseCaseId="gateway-google-calendar", Url="https://test.gateway.com/mcp", Type="gateway" + ) + self.assertEqual(server_ref.use_case_id, "gateway-google-calendar") + self.assertEqual(server_ref.url, "https://test.gateway.com/mcp") + self.assertEqual(server_ref.type, "gateway") + + def test_mcp_server_reference_runtime(self): + """Test MCPServerReference model with runtime type""" + server_ref = MCPServerReference( + UseCaseId="runtime-database-tools", + Url="https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test/invocations", + Type="runtime", + ) + self.assertEqual(server_ref.use_case_id, "runtime-database-tools") + self.assertEqual( + server_ref.url, + "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test/invocations", + ) + self.assertEqual(server_ref.type, "runtime") + + def test_mcp_server_reference_invalid_type(self): + """Test MCPServerReference model with invalid type""" + with self.assertRaises(ValidationError) as context: + MCPServerReference(UseCaseId="test-server", Url="https://test.com", Type="invalid") + self.assertIn("Type must be 'gateway' or 'runtime'", str(context.exception)) + + def test_gateway_mcp_params(self): + """Test GatewayMCPParams model""" + gateway_params = GatewayMCPParams( + GatewayUrl="https://test.gateway.com/mcp", + GatewayArn="arn:aws:bedrock-agentcore:us-east-1:123:gateway/test", + GatewayId="test-gateway", + GatewayName="Test Gateway", + TargetParams=[], + ) + self.assertEqual(gateway_params.gateway_url, "https://test.gateway.com/mcp") + self.assertEqual(gateway_params.gateway_id, "test-gateway") + + def test_runtime_mcp_params(self): + """Test RuntimeMCPParams model""" + runtime_params = RuntimeMCPParams( + EcrUri="123.dkr.ecr.us-east-1.amazonaws.com/test:latest", + RuntimeArn="arn:aws:bedrock-agent-runtime:us-east-1:123:agent-runtime/test", + RuntimeUrl="https://bedrock-agent-runtime.us-east-1.amazonaws.com", + ) + self.assertEqual(runtime_params.ecr_uri, "123.dkr.ecr.us-east-1.amazonaws.com/test:latest") + self.assertEqual( + runtime_params.runtime_arn, + "arn:aws:bedrock-agent-runtime:us-east-1:123:agent-runtime/test", + ) + self.assertEqual( + runtime_params.runtime_url, "https://bedrock-agent-runtime.us-east-1.amazonaws.com" + ) + + def test_mcp_server_config_gateway(self): + """Test MCPServerConfig with Gateway params""" + config = MCPServerConfig( + UseCaseName="TestGateway", + UseCaseType="MCPServer", + MCPParams={ + "GatewayParams": { + "GatewayUrl": "https://test.gateway.com/mcp", + "GatewayArn": "arn:aws:bedrock-agentcore:us-east-1:123:gateway/test", + "GatewayId": "test-gateway", + "GatewayName": "Test Gateway", + "TargetParams": [], + } + }, + ) + self.assertEqual(config.use_case_name, "TestGateway") + self.assertEqual(config.use_case_type, "MCPServer") + self.assertIsNotNone(config.mcp_params.gateway_params) + self.assertIsNone(config.mcp_params.runtime_params) + + def test_mcp_server_config_runtime(self): + """Test MCPServerConfig with Runtime params""" + config = MCPServerConfig( + UseCaseName="TestRuntime", + UseCaseType="MCPServer", + MCPParams={ + "RuntimeParams": { + "EcrUri": "123.dkr.ecr.us-east-1.amazonaws.com/test:latest", + "RuntimeArn": "arn:aws:bedrock-agent-runtime:us-east-1:123:agent-runtime/test", + "RuntimeUrl": "https://bedrock-agent-runtime.us-east-1.amazonaws.com", + } + }, + ) + self.assertEqual(config.use_case_name, "TestRuntime") + self.assertIsNone(config.mcp_params.gateway_params) + self.assertIsNotNone(config.mcp_params.runtime_params) + self.assertEqual( + config.mcp_params.runtime_params.runtime_arn, + "arn:aws:bedrock-agent-runtime:us-east-1:123:agent-runtime/test", + ) + + def test_agent_builder_params_with_mcp_servers(self): + """Test AgentBuilderParams with MCP servers""" + params = AgentBuilderParams( + SystemPrompt="Test prompt", + Tools=[{"ToolId": "web_search"}], + MCPServers=[ + { + "UseCaseId": "gateway-google-calendar", + "Url": "https://test.gateway.com/mcp", + "Type": "gateway", + }, + { + "UseCaseId": "runtime-database-tools", + "Url": "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test/invocations", + "Type": "runtime", + }, + ], + ) + self.assertEqual(len(params.tools), 1) + self.assertEqual(len(params.mcp_servers), 2) + self.assertEqual(params.get_tool_ids(), ["web_search"]) + + # Test new get_mcp_servers() method + mcp_servers = params.get_mcp_servers() + self.assertEqual(len(mcp_servers), 2) + self.assertEqual(mcp_servers[0]["use_case_id"], "gateway-google-calendar") + self.assertEqual(mcp_servers[0]["url"], "https://test.gateway.com/mcp") + self.assertEqual(mcp_servers[0]["type"], "gateway") + self.assertEqual(mcp_servers[1]["use_case_id"], "runtime-database-tools") + self.assertEqual(mcp_servers[1]["type"], "runtime") + + # Test deprecated get_mcp_server_ids() method + import logging + from unittest.mock import patch + + with patch("gaab_strands_common.models.logger") as mock_logger: + server_ids = params.get_mcp_server_ids() + self.assertEqual(server_ids, ["gateway-google-calendar", "runtime-database-tools"]) + # Verify deprecation warning was logged + mock_logger.warning.assert_called_once() + self.assertIn("deprecated", mock_logger.warning.call_args[0][0]) + + def test_agent_builder_params_backward_compatibility(self): + """Test that old string format for tools still works""" + # Old format with string tools + old_format_config = { + "UseCaseName": "Test Agent", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": { + "SystemPrompt": "You are helpful.", + "Tools": ["tool1", "tool2"], # Old string format + }, + "LlmParams": { + "BedrockLlmParams": {"ModelId": "test-model"}, + "ModelProvider": "Bedrock", + "Temperature": 0.5, + }, + } + + config = AgentConfig.from_ddb_config(old_format_config) + self.assertEqual(config.agent_builder_params.get_tool_ids(), ["tool1", "tool2"]) + + def test_inference_profile_config(self): + """Test inference profile configuration""" + config = AgentConfig.from_ddb_config(self.inference_profile_config) + + bedrock_params = config.llm_params.bedrock_llm_params + self.assertEqual(bedrock_params.bedrock_inference_type, "INFERENCE_PROFILE") + self.assertEqual( + bedrock_params.inference_profile_id, "us.anthropic.claude-3-7-sonnet-20250219-v1:0" + ) + self.assertIsNone(bedrock_params.model_id) + self.assertIsNone(bedrock_params.model_arn) + # Test model identifier property returns inference profile ID + self.assertEqual( + bedrock_params.model_identifier, "us.anthropic.claude-3-7-sonnet-20250219-v1:0" + ) + + def test_provisioned_model_config(self): + """Test provisioned model configuration""" + config = AgentConfig.from_ddb_config(self.provisioned_config) + + bedrock_params = config.llm_params.bedrock_llm_params + self.assertEqual(bedrock_params.bedrock_inference_type, "PROVISIONED") + self.assertEqual( + bedrock_params.model_arn, + "arn:aws:bedrock:us-east-1:123456789012:provisioned-model/abcdef123456", + ) + self.assertIsNone(bedrock_params.model_id) + self.assertIsNone(bedrock_params.inference_profile_id) + # Test model identifier property returns model ARN + self.assertEqual( + bedrock_params.model_identifier, + "arn:aws:bedrock:us-east-1:123456789012:provisioned-model/abcdef123456", + ) + + def test_inference_type_validation_errors(self): + """Test validation errors for incorrect inference type configurations""" + # INFERENCE_PROFILE without InferenceProfileId + invalid_profile_config = copy.deepcopy(self.inference_profile_config) + del invalid_profile_config["LlmParams"]["BedrockLlmParams"]["InferenceProfileId"] + with self.assertRaises(ValueError) as context: + AgentConfig.from_ddb_config(invalid_profile_config) + self.assertIn("InferenceProfileId is required", str(context.exception)) + + # PROVISIONED without ModelArn + invalid_provisioned_config = copy.deepcopy(self.provisioned_config) + del invalid_provisioned_config["LlmParams"]["BedrockLlmParams"]["ModelArn"] + with self.assertRaises(ValueError) as context: + AgentConfig.from_ddb_config(invalid_provisioned_config) + self.assertIn("ModelArn is required", str(context.exception)) + + # QUICK_START without ModelId + invalid_quick_start_config = copy.deepcopy(self.minimal_config) + invalid_quick_start_config["LlmParams"]["BedrockLlmParams"][ + "BedrockInferenceType" + ] = "QUICK_START" + del invalid_quick_start_config["LlmParams"]["BedrockLlmParams"]["ModelId"] + with self.assertRaises(ValueError) as context: + AgentConfig.from_ddb_config(invalid_quick_start_config) + self.assertIn("ModelId is required", str(context.exception)) + + # OTHER_FOUNDATION without ModelId + invalid_other_foundation_config = copy.deepcopy(self.minimal_config) + invalid_other_foundation_config["LlmParams"]["BedrockLlmParams"][ + "BedrockInferenceType" + ] = "OTHER_FOUNDATION" + del invalid_other_foundation_config["LlmParams"]["BedrockLlmParams"]["ModelId"] + with self.assertRaises(ValueError) as context: + AgentConfig.from_ddb_config(invalid_other_foundation_config) + self.assertIn("ModelId is required", str(context.exception)) + + def test_tools_parsing_edge_cases(self): + """Test edge cases for tools parsing""" + # Test with empty tools list + config_empty_tools = copy.deepcopy(self.minimal_config) + config_empty_tools["AgentBuilderParams"]["Tools"] = [] + config = AgentConfig.from_ddb_config(config_empty_tools) + self.assertEqual(config.agent_builder_params.tools, []) + + # Test with mixed tool formats (dict and string) + config_mixed_tools = copy.deepcopy(self.minimal_config) + config_mixed_tools["AgentBuilderParams"]["Tools"] = [ + {"ToolId": "Tool1"}, + "Tool2", + {"ToolId": "Tool3"}, + ] + config = AgentConfig.from_ddb_config(config_mixed_tools) + self.assertEqual(config.agent_builder_params.get_tool_ids(), ["Tool1", "Tool2", "Tool3"]) + + # Test with non-list tools (should return empty list) + config_invalid_tools = copy.deepcopy(self.minimal_config) + config_invalid_tools["AgentBuilderParams"]["Tools"] = "not a list" + config = AgentConfig.from_ddb_config(config_invalid_tools) + self.assertEqual(config.agent_builder_params.get_tool_ids(), []) + + def test_model_identifier_edge_cases(self): + """Test model_identifier property with various inference types""" + # Test with no inference type (should return model_id) + bedrock_params = BedrockLlmParams(ModelId="test-model") + self.assertEqual(bedrock_params.model_identifier, "test-model") + + # Test with unknown inference type (should return model_id) + bedrock_params = BedrockLlmParams(ModelId="test-model", BedrockInferenceType="UNKNOWN_TYPE") + self.assertEqual(bedrock_params.model_identifier, "test-model") + + # Test with None values + bedrock_params = BedrockLlmParams() + self.assertIsNone(bedrock_params.model_identifier) + + def test_individual_model_creation(self): + """Test creating individual models directly""" + # Test MemoryConfig + memory_config = MemoryConfig(LongTermEnabled=True) + self.assertTrue(memory_config.long_term_enabled) + + memory_config_default = MemoryConfig() + self.assertFalse(memory_config_default.long_term_enabled) + + # Test AgentBuilderParams with string tools (backward compatibility) + agent_params = AgentBuilderParams( + SystemPrompt="Test prompt", + Tools=["tool1", "tool2"], + MemoryConfig={"LongTermEnabled": True}, + ) + self.assertEqual(agent_params.system_prompt, "Test prompt") + self.assertEqual(agent_params.get_tool_ids(), ["tool1", "tool2"]) + self.assertTrue(agent_params.memory_config.long_term_enabled) + + # Test BedrockLlmParams with all fields + bedrock_params = BedrockLlmParams( + ModelId="test-model", + ModelArn="test-arn", + InferenceProfileId="test-profile", + BedrockInferenceType="QUICK_START", + GuardrailIdentifier="test-guardrail", + GuardrailVersion="1", + ) + self.assertEqual(bedrock_params.model_id, "test-model") + self.assertEqual(bedrock_params.model_arn, "test-arn") + self.assertEqual(bedrock_params.inference_profile_id, "test-profile") + self.assertEqual(bedrock_params.bedrock_inference_type, "QUICK_START") + self.assertEqual(bedrock_params.guardrail_identifier, "test-guardrail") + self.assertEqual(bedrock_params.guardrail_version, "1") + + # Test LlmParams + llm_params = LlmParams( + ModelProvider="Bedrock", + Temperature=0.5, + Streaming=False, + Verbose=True, + BedrockLlmParams={"ModelId": "test-model"}, + ModelParams={"param1": "value1"}, + ) + self.assertEqual(llm_params.model_provider, "Bedrock") + self.assertEqual(llm_params.temperature, 0.5) + self.assertFalse(llm_params.streaming) + self.assertTrue(llm_params.verbose) + self.assertEqual(llm_params.model_params, {"param1": "value1"}) + + def test_error_logging_in_from_ddb_config(self): + """Test error logging in from_ddb_config method""" + import logging + from unittest.mock import patch + + # Test with completely invalid config that will cause parsing error + invalid_config = {"invalid": "config"} + + with patch("gaab_strands_common.models.logger") as mock_logger: + with self.assertRaises(ValueError) as context: + AgentConfig.from_ddb_config(invalid_config) + + # Verify that logger.error was called + mock_logger.error.assert_called_once() + self.assertIn("Error parsing configuration", str(context.exception)) + + def test_model_config_populate_by_name(self): + """Test that model_config populate_by_name works correctly""" + # Test that aliases work correctly (this is the main functionality) + config_with_aliases = { + "UseCaseName": "Test Agent", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": {"SystemPrompt": "Test prompt"}, + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 0.5, + "BedrockLlmParams": {"ModelId": "test-model"}, + }, + } + + config = AgentConfig(**config_with_aliases) + self.assertEqual(config.use_case_name, "Test Agent") + self.assertEqual(config.use_case_type, "AgentBuilder") + self.assertEqual(config.agent_builder_params.system_prompt, "Test prompt") + self.assertEqual(config.llm_params.model_provider, "Bedrock") + + def test_bedrock_validation_error_coverage(self): + """Test specific validation error lines for complete coverage""" + + # Test QUICK_START without ModelId - Line 1 + with self.assertRaises(ValueError) as context: + BedrockLlmParams(BedrockInferenceType="QUICK_START", ModelId=None) + self.assertIn("ModelId is required for inference type QUICK_START", str(context.exception)) + + # Test OTHER_FOUNDATION without ModelId - Line 1 + with self.assertRaises(ValueError) as context: + BedrockLlmParams(BedrockInferenceType="OTHER_FOUNDATION", ModelId=None) + self.assertIn( + "ModelId is required for inference type OTHER_FOUNDATION", str(context.exception) + ) + + # Test INFERENCE_PROFILE without InferenceProfileId - Line 2 + with self.assertRaises(ValueError) as context: + BedrockLlmParams(BedrockInferenceType="INFERENCE_PROFILE", InferenceProfileId=None) + self.assertIn( + "InferenceProfileId is required for inference type INFERENCE_PROFILE", + str(context.exception), + ) + + # Test PROVISIONED without ModelArn - Line 3 + with self.assertRaises(ValueError) as context: + BedrockLlmParams(BedrockInferenceType="PROVISIONED", ModelArn=None) + self.assertIn("ModelArn is required for inference type PROVISIONED", str(context.exception)) + + # Test with empty string values (should also trigger validation) + with self.assertRaises(ValueError) as context: + BedrockLlmParams(BedrockInferenceType="QUICK_START", ModelId="") + self.assertIn("ModelId is required for inference type QUICK_START", str(context.exception)) + + with self.assertRaises(ValueError) as context: + BedrockLlmParams(BedrockInferenceType="INFERENCE_PROFILE", InferenceProfileId="") + self.assertIn( + "InferenceProfileId is required for inference type INFERENCE_PROFILE", + str(context.exception), + ) + + with self.assertRaises(ValueError) as context: + BedrockLlmParams(BedrockInferenceType="PROVISIONED", ModelArn="") + self.assertIn("ModelArn is required for inference type PROVISIONED", str(context.exception)) + + def test_full_config_with_mcp_servers(self): + """Test full config with both tools and MCP servers""" + full_config = { + "UseCaseName": "Test Agent with MCP", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": { + "SystemPrompt": "You are a helpful AI assistant.", + "Tools": [{"ToolId": "web_search"}, {"ToolId": "calculator"}], + "MCPServers": [ + { + "UseCaseId": "gateway-google-calendar", + "Url": "https://test.gateway.com/mcp", + "Type": "gateway", + }, + { + "UseCaseId": "runtime-database-tools", + "Url": "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test/invocations", + "Type": "runtime", + }, + ], + "MemoryConfig": {"LongTermEnabled": True}, + }, + "LlmParams": { + "BedrockLlmParams": {"ModelId": "amazon.nova-lite-v1:0"}, + "ModelProvider": "Bedrock", + "Temperature": 0.7, + }, + } + + config = AgentConfig.from_ddb_config(full_config) + self.assertEqual(config.agent_builder_params.get_tool_ids(), ["web_search", "calculator"]) + + # Test new get_mcp_servers() method + mcp_servers = config.agent_builder_params.get_mcp_servers() + self.assertEqual(len(mcp_servers), 2) + self.assertEqual(mcp_servers[0]["use_case_id"], "gateway-google-calendar") + self.assertEqual(mcp_servers[0]["url"], "https://test.gateway.com/mcp") + self.assertEqual(mcp_servers[0]["type"], "gateway") + self.assertEqual(mcp_servers[1]["use_case_id"], "runtime-database-tools") + self.assertEqual(mcp_servers[1]["type"], "runtime") diff --git a/deployment/ecr/gaab-strands-agent/test/test_runtime_mcp_discovery.py b/deployment/ecr/gaab-strands-agent/test/test_runtime_mcp_discovery.py new file mode 100644 index 00000000..a6858656 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/test/test_runtime_mcp_discovery.py @@ -0,0 +1,427 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Unit tests for Runtime MCP tool discovery in MCPToolsLoader +""" + +import os +import sys +from unittest.mock import Mock, patch +from urllib.parse import quote + +import pytest + +# Add src to path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "src")) + +from gaab_strands_common import MCPToolsLoader +from gaab_strands_common.models import RuntimeMCPParams + + +class TestRuntimeMCPToolDiscovery: + """Comprehensive test suite for Runtime MCP tool discovery""" + + @pytest.fixture + def mock_ddb_helper(self): + """Create mock DynamoDB helper""" + return Mock() + + @pytest.fixture + def loader(self): + """Create MCPToolsLoader instance with mocked dependencies""" + return MCPToolsLoader("us-east-1") + + @pytest.fixture + def runtime_url(self): + """Sample Runtime MCP URL""" + return "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agent%3Aus-east-1%3A123456789%3Aagent%2Ftest-agent/invocations?qualifier=DEFAULT" + + def test_agent_arn_url_encoding(self): + """Test AgentARN URL encoding (: to %3A, / to %2F)""" + agent_arn = "arn:aws:bedrock-agent:us-east-1:123456789:agent/test-agent" + encoded_arn = quote(agent_arn, safe="") + + # Verify colons are encoded + assert ":" not in encoded_arn + assert "%3A" in encoded_arn + + # Verify slashes are encoded + assert "/" not in encoded_arn + assert "%2F" in encoded_arn + + # Verify the full encoding + expected = "arn%3Aaws%3Abedrock-agent%3Aus-east-1%3A123456789%3Aagent%2Ftest-agent" + assert encoded_arn == expected + + def test_agentcore_runtime_url_construction(self, loader): + """Test AgentCore Runtime URL construction""" + agent_arn = "arn:aws:bedrock-agent:us-east-1:123456789:agent/test-agent" + encoded_arn = quote(agent_arn, safe="") + runtime_url = f"https://bedrock-agentcore.{loader.region}.amazonaws.com/runtimes/{encoded_arn}/invocations?qualifier=DEFAULT" + + # Verify URL structure + assert runtime_url.startswith("https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/") + assert "/invocations?qualifier=DEFAULT" in runtime_url + assert encoded_arn in runtime_url + + # Verify full URL + expected_url = "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agent%3Aus-east-1%3A123456789%3Aagent%2Ftest-agent/invocations?qualifier=DEFAULT" + assert runtime_url == expected_url + + def test_m2m_authentication_for_runtime(self, loader, runtime_url): + """Test M2M authentication for Runtime servers""" + with ( + patch("strands.tools.mcp.MCPClient") as mock_mcp_client_class, + patch("mcp.client.streamable_http.streamablehttp_client"), + patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator, + ): + + mock_client_instance = Mock() + mock_client_instance.list_tools_sync.return_value = [] + mock_mcp_client_class.return_value = mock_client_instance + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-m2m-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + loader._discover_runtime_tools("TestRuntime", runtime_url) + + # Verify M2M decorator was called with correct parameters + mock_decorator.assert_called_once() + call_kwargs = mock_decorator.call_args[1] + assert call_kwargs["auth_flow"] == "M2M" + assert call_kwargs["scopes"] == [] + + def test_mcp_client_creation_with_runtime_url(self, loader, runtime_url): + """Test MCPClient creation with Runtime URL and Authorization header""" + with ( + patch("strands.tools.mcp.MCPClient") as mock_mcp_client_class, + patch("mcp.client.streamable_http.streamablehttp_client") as mock_streamablehttp, + patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator, + ): + + mock_client_instance = Mock() + mock_client_instance.list_tools_sync.return_value = [] + mock_mcp_client_class.return_value = mock_client_instance + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token-123") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + loader._discover_runtime_tools("TestRuntime", runtime_url) + + # Verify MCPClient was created + assert mock_mcp_client_class.called + + # Verify the client factory function was passed + client_factory = mock_mcp_client_class.call_args[0][0] + assert callable(client_factory) + + # Call the factory to verify it creates streamablehttp_client correctly + client_factory() + + # Verify streamablehttp_client was called with correct URL and headers + assert mock_streamablehttp.called + call_args = mock_streamablehttp.call_args + + # Check URL + url = call_args[0][0] + assert "bedrock-agentcore.us-east-1.amazonaws.com" in url + assert "/runtimes/" in url + assert "/invocations?qualifier=DEFAULT" in url + + # Check Authorization header + headers = call_args[1]["headers"] + assert "Authorization" in headers + assert headers["Authorization"] == "Bearer test-token-123" + + def test_error_handling_invalid_url(self, loader): + """Test error handling for invalid URL""" + invalid_url = "" + + result = loader._discover_runtime_tools("TestRuntime", invalid_url) + + assert result == [] + + def test_error_handling_connection_failures(self, loader, runtime_url): + """Test error handling for connection failures""" + with ( + patch("strands.tools.mcp.MCPClient") as mock_mcp_client_class, + patch("mcp.client.streamable_http.streamablehttp_client"), + patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator, + ): + + mock_client_instance = Mock() + mock_client_instance.start.side_effect = ConnectionError("Connection refused") + mock_mcp_client_class.return_value = mock_client_instance + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + result = loader._discover_runtime_tools("TestRuntime", runtime_url) + + assert result == [] + + def test_retry_logic_for_rate_limiting(self, loader, runtime_url): + """Test retry logic for rate limiting (429 errors)""" + with ( + patch("strands.tools.mcp.MCPClient") as mock_mcp_client_class, + patch("mcp.client.streamable_http.streamablehttp_client"), + patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator, + patch("time.sleep") as mock_sleep, + ): + + mock_client_instance = Mock() + mock_client_instance.start.side_effect = [ + Exception("429 Too Many Requests"), + Exception("429 Too Many Requests"), + None, + ] + mock_client_instance.list_tools_sync.return_value = [Mock(name="test_tool")] + mock_mcp_client_class.return_value = mock_client_instance + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + result = loader._discover_runtime_tools("TestRuntime", runtime_url) + + # Verify retries occurred + assert mock_client_instance.start.call_count == 3 + + # Verify exponential backoff + assert mock_sleep.call_count == 2 + assert mock_sleep.call_args_list[0][0][0] == 2 + assert mock_sleep.call_args_list[1][0][0] == 4 + + # Should succeed after retries + assert len(result) == 1 + + def test_retry_logic_max_retries_exceeded(self, loader, runtime_url): + """Test retry logic when max retries are exceeded""" + with ( + patch("strands.tools.mcp.MCPClient") as mock_mcp_client_class, + patch("mcp.client.streamable_http.streamablehttp_client"), + patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator, + patch("time.sleep"), + ): + + mock_client_instance = Mock() + mock_client_instance.start.side_effect = Exception("429 Too Many Requests") + mock_mcp_client_class.return_value = mock_client_instance + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + result = loader._discover_runtime_tools("TestRuntime", runtime_url) + + # Verify max retries + assert mock_client_instance.start.call_count == 3 + + # Should return empty list + assert result == [] + + def test_successful_tool_discovery(self, loader, runtime_url): + """Test successful tool discovery from Runtime MCP server""" + with ( + patch("strands.tools.mcp.MCPClient") as mock_mcp_client_class, + patch("mcp.client.streamable_http.streamablehttp_client"), + patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator, + ): + + mock_tool1 = Mock() + mock_tool1.name = "runtime_tool_1" + mock_tool2 = Mock() + mock_tool2.name = "runtime_tool_2" + + mock_client_instance = Mock() + mock_client_instance.list_tools_sync.return_value = [mock_tool1, mock_tool2] + mock_mcp_client_class.return_value = mock_client_instance + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + result = loader._discover_runtime_tools("TestRuntime", runtime_url) + + # Verify tools were returned + assert len(result) == 2 + assert result[0].name == "runtime_tool_1" + assert result[1].name == "runtime_tool_2" + + # Verify client was stored + assert len(loader._active_mcp_clients) == 1 + assert loader._active_mcp_clients[0] == mock_client_instance + + def test_empty_tools_list(self, loader, runtime_url): + """Test handling of empty tools list from Runtime server""" + with ( + patch("strands.tools.mcp.MCPClient") as mock_mcp_client_class, + patch("mcp.client.streamable_http.streamablehttp_client"), + patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator, + ): + + mock_client_instance = Mock() + mock_client_instance.list_tools_sync.return_value = [] + mock_mcp_client_class.return_value = mock_client_instance + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + result = loader._discover_runtime_tools("TestRuntime", runtime_url) + + assert result == [] + + def test_integration_with_configurable_agent(self, loader): + """Test integration of Runtime MCP tools with ConfigurableAgent""" + mcp_server = { + "use_case_id": "runtime-mcp-1", + "url": "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test/invocations", + "type": "runtime", + } + + mock_tool = Mock() + mock_tool.name = "integration_test_tool" + loader._discover_runtime_tools = Mock(return_value=[mock_tool]) + + result = loader.load_tools([mcp_server]) + + assert len(result) == 1 + assert result[0].name == "integration_test_tool" + + # Verify the runtime discovery method was called + loader._discover_runtime_tools.assert_called_once() + + def test_runtime_with_special_characters_in_url(self, loader): + """Test Runtime discovery with special characters in URL""" + # URL with encoded special characters + special_url = "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agent%3Aus-east-1%3A123%3Aagent%2Ftest-agent_v2.0/invocations" + + with ( + patch("strands.tools.mcp.MCPClient") as mock_mcp_client_class, + patch("mcp.client.streamable_http.streamablehttp_client") as mock_streamablehttp, + patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator, + ): + + mock_client_instance = Mock() + mock_client_instance.list_tools_sync.return_value = [] + mock_mcp_client_class.return_value = mock_client_instance + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + loader._discover_runtime_tools("TestRuntime", special_url) + + # Verify URL was used directly + client_factory = mock_mcp_client_class.call_args[0][0] + client_factory() + + url = mock_streamablehttp.call_args[0][0] + # Verify URL matches what was provided + assert url == special_url + + def test_runtime_different_regions(self): + """Test Runtime discovery works with different AWS regions""" + regions = ["us-east-1", "us-west-2", "eu-west-1", "ap-southeast-1"] + + for region in regions: + loader = MCPToolsLoader(region) + + runtime_url = ( + f"https://bedrock-agentcore.{region}.amazonaws.com/runtimes/test/invocations" + ) + + with ( + patch("strands.tools.mcp.MCPClient") as mock_mcp_client_class, + patch("mcp.client.streamable_http.streamablehttp_client") as mock_streamablehttp, + patch("bedrock_agentcore.identity.auth.requires_access_token") as mock_decorator, + ): + + mock_client_instance = Mock() + mock_client_instance.list_tools_sync.return_value = [] + mock_mcp_client_class.return_value = mock_client_instance + + def decorator_side_effect(*args, **kwargs): + def wrapper(func): + def inner(access_token=None): + return func(access_token="test-token") + + return inner + + return wrapper + + mock_decorator.side_effect = decorator_side_effect + + loader._discover_runtime_tools("TestRuntime", runtime_url) + + # Verify URL contains correct region + client_factory = mock_mcp_client_class.call_args[0][0] + client_factory() + + url = mock_streamablehttp.call_args[0][0] + assert f"bedrock-agentcore.{region}.amazonaws.com" in url + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/deployment/ecr/gaab-strands-agent/test/test_strands_tools_registry.py b/deployment/ecr/gaab-strands-agent/test/test_strands_tools_registry.py new file mode 100644 index 00000000..6f591e14 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/test/test_strands_tools_registry.py @@ -0,0 +1,265 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/usr/bin/env python3 +""" +Unit tests for StrandsToolsRegistry +""" + +import unittest +from unittest.mock import Mock, patch, MagicMock +import sys +import os +import logging + +# Add src to path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../src")) + +from gaab_strands_common import StrandsToolsRegistry + +# Set up logging for tests +logging.basicConfig(level=logging.INFO) + + +class MockTool: + """Mock tool class for testing""" + + def __init__(self): + self.name = "MockTool" + + +class WebSearchTool: + """Mock web search tool""" + + description = "Search the web for information" + + def __init__(self): + self.name = "WebSearchTool" + + +class CalculatorTool: + """Mock calculator tool""" + + def __init__(self): + self.name = "CalculatorTool" + + +class TestStrandsToolsRegistry(unittest.TestCase): + """Test cases for StrandsToolsRegistry""" + + def setUp(self): + """Set up test fixtures""" + self.registry = None + + def tearDown(self): + """Clean up after tests""" + self.registry = None + + @patch("gaab_strands_common.strands_tools_registry.importlib.import_module") + @patch("gaab_strands_common.strands_tools_registry.pkgutil.iter_modules") + def test_initialization_with_tools(self, mock_iter_modules, mock_import_module): + """Test registry initialization discovers tools""" + # Mock the strands_tools package + mock_package = MagicMock() + mock_package.__path__ = ["/mock/path"] + mock_package.__name__ = "strands_tools" + + # Mock iter_modules to return one module + mock_module_info = MagicMock() + mock_module_info.name = "web_search" + mock_iter_modules.return_value = [mock_module_info] + + # Mock the module with tool classes + mock_module = MagicMock() + mock_module.WebSearchTool = WebSearchTool + mock_module.CalculatorTool = CalculatorTool + mock_import_module.side_effect = lambda name: ( + mock_package if name == "strands_tools" else mock_module + ) + + # Mock dir() to return our tool classes + with patch( + "gaab_strands_common.strands_tools_registry.dir", + return_value=["WebSearchTool", "CalculatorTool"], + ): + with patch("gaab_strands_common.strands_tools_registry.getattr") as mock_getattr: + mock_getattr.side_effect = lambda obj, name: ( + WebSearchTool if name == "WebSearchTool" else CalculatorTool + ) + + registry = StrandsToolsRegistry() + + # Verify tools were discovered + self.assertGreaterEqual(len(registry._available_tools), 0) + + @patch("gaab_strands_common.strands_tools_registry.importlib.import_module") + def test_initialization_without_package(self, mock_import_module): + """Test registry initialization when strands-agents-tools is not available""" + # Mock ImportError when trying to import strands_tools + mock_import_module.side_effect = ImportError("No module named 'strands_tools'") + + registry = StrandsToolsRegistry() + + # Verify registry is empty but doesn't crash + self.assertEqual(len(registry._available_tools), 0) + + # Removed test_class_name_to_tool_id - method no longer exists after simplification + # Tool discovery now uses module names directly from strands_tools package + + def test_get_tools_with_valid_ids(self): + """Test getting tools with valid tool IDs""" + registry = StrandsToolsRegistry() + + # Manually add mock tools to registry + registry._available_tools = { + "web_search_tool": WebSearchTool, + "calculator_tool": CalculatorTool, + } + + # Get tools + tools = registry.get_tools(["web_search_tool", "calculator_tool"]) + + # Verify tools were instantiated + self.assertEqual(len(tools), 2) + self.assertIsInstance(tools[0], WebSearchTool) + self.assertIsInstance(tools[1], CalculatorTool) + + def test_get_tools_with_invalid_ids(self): + """Test getting tools with invalid tool IDs""" + registry = StrandsToolsRegistry() + + # Manually add mock tools to registry + registry._available_tools = {"web_search_tool": WebSearchTool} + + # Try to get tools including an invalid ID + tools = registry.get_tools(["web_search_tool", "nonexistent_tool"]) + + # Verify only valid tool was returned + self.assertEqual(len(tools), 1) + self.assertIsInstance(tools[0], WebSearchTool) + + def test_get_tools_with_empty_list(self): + """Test getting tools with empty list""" + registry = StrandsToolsRegistry() + + tools = registry.get_tools([]) + + # Verify empty list is returned + self.assertEqual(len(tools), 0) + + def test_get_tools_with_instantiation_error(self): + """Test handling of tool instantiation errors""" + registry = StrandsToolsRegistry() + + # Create a tool class that raises an error on instantiation + class BrokenTool: + def __init__(self): + raise ValueError("Tool initialization failed") + + registry._available_tools = {"broken_tool": BrokenTool, "web_search_tool": WebSearchTool} + + # Get tools + tools = registry.get_tools(["broken_tool", "web_search_tool"]) + + # Verify only the working tool was returned + self.assertEqual(len(tools), 1) + self.assertIsInstance(tools[0], WebSearchTool) + + def test_list_available_tools(self): + """Test listing available tools""" + registry = StrandsToolsRegistry() + + # Manually add mock tools to registry + registry._available_tools = { + "web_search_tool": WebSearchTool, + "calculator_tool": CalculatorTool, + } + + # List tools + tools_list = registry.list_available_tools() + + # Verify list contains tool metadata + self.assertEqual(len(tools_list), 2) + self.assertIn("id", tools_list[0]) + self.assertIn("name", tools_list[0]) + self.assertIn("description", tools_list[0]) + + def test_get_tool_description_from_docstring(self): + """Test extracting description from tool docstring""" + registry = StrandsToolsRegistry() + + class ToolWithDocstring: + """This is a tool with a docstring + + It has multiple lines. + """ + + pass + + description = registry._get_tool_description(ToolWithDocstring) + self.assertEqual(description, "This is a tool with a docstring") + + def test_get_tool_description_from_attribute(self): + """Test extracting description from tool attribute""" + registry = StrandsToolsRegistry() + + # WebSearchTool has both docstring and description attribute + # Docstring takes precedence, so we expect the docstring + description = registry._get_tool_description(WebSearchTool) + self.assertEqual(description, "Mock web search tool") + + def test_get_tool_description_fallback(self): + """Test fallback when no description is available""" + registry = StrandsToolsRegistry() + + class ToolWithoutDescription: + pass + + description = registry._get_tool_description(ToolWithoutDescription) + self.assertEqual(description, "No description available") + + def test_has_tool(self): + """Test checking if a tool is available""" + registry = StrandsToolsRegistry() + + registry._available_tools = {"web_search_tool": WebSearchTool} + + # Test existing tool + self.assertTrue(registry.has_tool("web_search_tool")) + + # Test non-existing tool + self.assertFalse(registry.has_tool("nonexistent_tool")) + + def test_get_available_tool_ids(self): + """Test getting list of available tool IDs""" + registry = StrandsToolsRegistry() + + registry._available_tools = { + "web_search_tool": WebSearchTool, + "calculator_tool": CalculatorTool, + } + + tool_ids = registry.get_available_tool_ids() + + self.assertEqual(len(tool_ids), 2) + self.assertIn("web_search_tool", tool_ids) + self.assertIn("calculator_tool", tool_ids) + + def test_multiple_tool_requests(self): + """Test requesting the same tool multiple times""" + registry = StrandsToolsRegistry() + + registry._available_tools = {"web_search_tool": WebSearchTool} + + # Request the same tool twice + tools = registry.get_tools(["web_search_tool", "web_search_tool"]) + + # Verify two separate instances were created + self.assertEqual(len(tools), 2) + self.assertIsInstance(tools[0], WebSearchTool) + self.assertIsInstance(tools[1], WebSearchTool) + self.assertIsNot(tools[0], tools[1]) # Different instances + + +if __name__ == "__main__": + unittest.main() diff --git a/deployment/ecr/gaab-strands-agent/test/test_tools_manager.py b/deployment/ecr/gaab-strands-agent/test/test_tools_manager.py new file mode 100644 index 00000000..3a9a6a10 --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/test/test_tools_manager.py @@ -0,0 +1,943 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/usr/bin/env python3 +""" +Unit tests for ToolsManager +""" + +import logging +from unittest.mock import MagicMock, Mock, patch + +import pytest +from gaab_strands_common import DynamoDBHelper, ToolsManager +from gaab_strands_common.custom_tools.setup.base_tool import BaseCustomTool +from gaab_strands_common.custom_tools.setup.registry import CustomToolsRegistry +from gaab_strands_common.models import AgentBuilderParams, BedrockLlmParams, LlmParams, MultimodalParams, UseCaseConfig +from strands import tool + + +@pytest.fixture +def mock_ddb_helper(): + """Create a mock DynamoDB helper""" + return Mock(spec=DynamoDBHelper) + + +@pytest.fixture +def mock_strands_tool(): + """Create a mock Strands tool""" + tool = Mock() + tool.name = "web_search" + tool.__name__ = "WebSearchTool" + return tool + + +@pytest.fixture +def mock_mcp_tool(): + """Create a mock MCP tool""" + tool = Mock() + tool.name = "get_calendar_events" + tool.__name__ = "GetCalendarEventsTool" + tool.metadata = {"server_type": "Gateway"} + return tool + + +@pytest.fixture +def mock_config(): + """Create a mock config object""" + + bedrock_params = BedrockLlmParams( + ModelId="anthropic.claude-3-sonnet-20240229-v1:0", BedrockInferenceType="QUICK_START" + ) + llm_params = LlmParams(ModelProvider="Bedrock", BedrockLlmParams=bedrock_params) + agent_params = AgentBuilderParams(SystemPrompt="Test prompt") + + return UseCaseConfig( + UseCaseName="Test Use Case", + UseCaseType="AgentBuilder", + AgentBuilderParams=agent_params, + LlmParams=llm_params, + ) + + +@pytest.fixture +def tools_manager(mock_config): + """Create a ToolsManager instance with mocked dependencies""" + with ( + patch("gaab_strands_common.tools_manager.StrandsToolsRegistry"), + patch("gaab_strands_common.tools_manager.MCPToolsLoader"), + ): + manager = ToolsManager("us-east-1", mock_config) + return manager + + +class TestToolsManagerInitialization: + """Tests for ToolsManager initialization""" + + def test_initialization_success(self, mock_config): + """Test successful initialization""" + with ( + patch("gaab_strands_common.tools_manager.StrandsToolsRegistry") as mock_registry, + patch("gaab_strands_common.tools_manager.MCPToolsLoader") as mock_loader, + ): + manager = ToolsManager("us-west-2", mock_config) + + assert manager.region == "us-west-2" + assert manager.config == mock_config + assert manager._tool_sources == {} + mock_registry.assert_called_once() + mock_loader.assert_called_once_with("us-west-2") + + def test_initialization_with_different_regions(self, mock_config): + """Test initialization with different AWS regions""" + regions = ["us-east-1", "eu-west-1", "ap-southeast-1"] + + for region in regions: + with ( + patch("gaab_strands_common.tools_manager.StrandsToolsRegistry"), + patch("gaab_strands_common.tools_manager.MCPToolsLoader"), + ): + manager = ToolsManager(region, mock_config) + assert manager.region == region + + +class TestLoadAllTools: + """Tests for load_all_tools method""" + + def test_load_all_tools_empty_lists(self, tools_manager): + """Test loading with no tools configured""" + tools = tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=[], custom_tool_ids=[] + ) + + assert tools == [] + assert tools_manager._tool_sources == {} + + def test_load_all_tools_only_strands(self, tools_manager, mock_strands_tool): + """Test loading only built-in Strands tools""" + tools_manager.strands_tools_registry.get_tools = Mock(return_value=[mock_strands_tool]) + + tools = tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=["web_search"], custom_tool_ids=[] + ) + + assert len(tools) == 1 + assert tools[0] == mock_strands_tool + assert "web_search" in tools_manager._tool_sources + assert tools_manager._tool_sources["web_search"] == "Strands" + + def test_load_all_tools_only_mcp(self, tools_manager, mock_mcp_tool): + """Test loading only MCP tools""" + tools_manager.mcp_loader.load_tools = Mock(return_value=[mock_mcp_tool]) + + tools = tools_manager.load_all_tools( + mcp_servers=[ + {"use_case_id": "mcp-server-1", "url": "https://example.com/mcp", "type": "gateway"} + ], + strands_tool_ids=[], + custom_tool_ids=[], + ) + + assert len(tools) == 1 + assert tools[0] == mock_mcp_tool + assert "get_calendar_events" in tools_manager._tool_sources + assert tools_manager._tool_sources["get_calendar_events"] == "MCP-Gateway" + + def test_load_all_tools_mixed(self, tools_manager, mock_strands_tool, mock_mcp_tool): + """Test loading both Strands and MCP tools""" + tools_manager.strands_tools_registry.get_tools = Mock(return_value=[mock_strands_tool]) + tools_manager.mcp_loader.load_tools = Mock(return_value=[mock_mcp_tool]) + + tools = tools_manager.load_all_tools( + mcp_servers=[ + {"use_case_id": "mcp-server-1", "url": "https://example.com/mcp", "type": "gateway"} + ], + strands_tool_ids=["web_search"], + custom_tool_ids=[], + ) + + assert len(tools) == 2 + assert mock_strands_tool in tools + assert mock_mcp_tool in tools + assert len(tools_manager._tool_sources) == 2 + + def test_load_all_tools_strands_error(self, tools_manager, mock_mcp_tool): + """Test that MCP tools still load when Strands tools fail""" + tools_manager.strands_tools_registry.get_tools = Mock( + side_effect=Exception("Strands error") + ) + tools_manager.mcp_loader.load_tools = Mock(return_value=[mock_mcp_tool]) + + tools = tools_manager.load_all_tools( + mcp_servers=[ + {"use_case_id": "mcp-server-1", "url": "https://example.com/mcp", "type": "gateway"} + ], + strands_tool_ids=["web_search"], + custom_tool_ids=[], + ) + + # Should still get MCP tools + assert len(tools) == 1 + assert tools[0] == mock_mcp_tool + + def test_load_all_tools_mcp_error(self, tools_manager, mock_strands_tool): + """Test that Strands tools still load when MCP tools fail""" + tools_manager.strands_tools_registry.get_tools = Mock(return_value=[mock_strands_tool]) + tools_manager.mcp_loader.load_tools = Mock(side_effect=Exception("MCP error")) + + tools = tools_manager.load_all_tools( + mcp_servers=[ + {"use_case_id": "mcp-server-1", "url": "https://example.com/mcp", "type": "gateway"} + ], + strands_tool_ids=["web_search"], + custom_tool_ids=[], + ) + + # Should still get Strands tools + assert len(tools) == 1 + assert tools[0] == mock_strands_tool + + def test_load_all_tools_multiple_strands(self, tools_manager): + """Test loading multiple Strands tools""" + tool1 = Mock(name="tool1") + tool1.name = "web_search" + tool2 = Mock(name="tool2") + tool2.name = "calculator" + + tools_manager.strands_tools_registry.get_tools = Mock(return_value=[tool1, tool2]) + + tools = tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=["web_search", "calculator"], custom_tool_ids=[] + ) + + assert len(tools) == 2 + assert len(tools_manager._tool_sources) == 2 + + def test_load_all_tools_multiple_mcp_servers(self, tools_manager): + """Test loading tools from multiple MCP servers""" + tool1 = Mock(name="tool1") + tool1.name = "calendar_tool" + tool1.metadata = {"server_type": "Gateway"} + tool2 = Mock(name="tool2") + tool2.name = "database_tool" + tool2.metadata = {"server_type": "Runtime"} + + tools_manager.mcp_loader.load_tools = Mock(return_value=[tool1, tool2]) + + tools = tools_manager.load_all_tools( + mcp_servers=[ + {"use_case_id": "mcp-1", "url": "https://example.com/mcp1", "type": "gateway"}, + {"use_case_id": "mcp-2", "url": "https://example.com/mcp2", "type": "runtime"}, + ], + strands_tool_ids=[], + custom_tool_ids=[], + ) + + assert len(tools) == 2 + assert tools_manager._tool_sources["calendar_tool"] == "MCP-Gateway" + assert tools_manager._tool_sources["database_tool"] == "MCP-Runtime" + + +class TestConflictDetection: + """Tests for tool name conflict detection""" + + def test_no_conflicts(self, tools_manager, mock_strands_tool, mock_mcp_tool): + """Test when there are no tool name conflicts""" + tools_manager.strands_tools_registry.get_tools = Mock(return_value=[mock_strands_tool]) + tools_manager.mcp_loader.load_tools = Mock(return_value=[mock_mcp_tool]) + + with patch.object(tools_manager, "_detect_conflicts") as mock_detect: + tools_manager.load_all_tools( + mcp_servers=[ + {"use_case_id": "mcp-1", "url": "https://example.com/mcp", "type": "gateway"} + ], + strands_tool_ids=["web_search"], + custom_tool_ids=[], + ) + mock_detect.assert_called_once() + + def test_conflict_detection_same_name(self, tools_manager, caplog): + """Test conflict detection when tools have the same name""" + tool1 = Mock(name="tool1") + tool1.name = "search" + tool2 = Mock(name="tool2") + tool2.name = "search" + tool2.metadata = {"server_type": "Gateway"} + + tools_manager.strands_tools_registry.get_tools = Mock(return_value=[tool1]) + tools_manager.mcp_loader.load_tools = Mock(return_value=[tool2]) + + with caplog.at_level(logging.WARNING): + tools_manager.load_all_tools( + mcp_servers=[ + {"use_case_id": "mcp-1", "url": "https://example.com/mcp", "type": "gateway"} + ], + strands_tool_ids=["search"], + custom_tool_ids=[], + ) + + # Check that conflict was logged + assert "conflict" in caplog.text.lower() + assert "search" in caplog.text + + def test_conflict_detection_multiple_conflicts(self, tools_manager, caplog): + """Test detection of multiple tool name conflicts""" + # Create tools with duplicate names + strands_tool1 = Mock(name="strands1") + strands_tool1.name = "tool_a" + strands_tool2 = Mock(name="strands2") + strands_tool2.name = "tool_b" + + mcp_tool1 = Mock(name="mcp1") + mcp_tool1.name = "tool_a" + mcp_tool1.metadata = {"server_type": "Gateway"} + mcp_tool2 = Mock(name="mcp2") + mcp_tool2.name = "tool_b" + mcp_tool2.metadata = {"server_type": "Runtime"} + + tools_manager.strands_tools_registry.get_tools = Mock( + return_value=[strands_tool1, strands_tool2] + ) + tools_manager.mcp_loader.load_tools = Mock(return_value=[mcp_tool1, mcp_tool2]) + + with caplog.at_level(logging.WARNING): + tools_manager.load_all_tools( + mcp_servers=[ + {"use_case_id": "mcp-1", "url": "https://example.com/mcp", "type": "gateway"} + ], + strands_tool_ids=["tool_a", "tool_b"], + custom_tool_ids=[], + ) + + # Should detect 2 conflicts + assert "2 tool name conflict" in caplog.text + + +class TestGetToolSources: + """Tests for get_tool_sources method""" + + def test_get_tool_sources_empty(self, tools_manager): + """Test getting tool sources when no tools loaded""" + sources = tools_manager.get_tool_sources() + + assert sources == {} + + def test_get_tool_sources_with_tools(self, tools_manager, mock_strands_tool, mock_mcp_tool): + """Test getting tool sources after loading tools""" + tools_manager.strands_tools_registry.get_tools = Mock(return_value=[mock_strands_tool]) + tools_manager.mcp_loader.load_tools = Mock(return_value=[mock_mcp_tool]) + + tools_manager.load_all_tools( + mcp_servers=[ + {"use_case_id": "mcp-1", "url": "https://example.com/mcp", "type": "gateway"} + ], + strands_tool_ids=["web_search"], + custom_tool_ids=[], + ) + sources = tools_manager.get_tool_sources() + + assert "web_search" in sources + assert sources["web_search"] == "Strands" + assert "get_calendar_events" in sources + assert sources["get_calendar_events"] == "MCP-Gateway" + + def test_get_tool_sources_returns_copy(self, tools_manager, mock_strands_tool): + """Test that get_tool_sources returns a copy, not the original dict""" + tools_manager.strands_tools_registry.get_tools = Mock(return_value=[mock_strands_tool]) + tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=["web_search"], custom_tool_ids=[] + ) + + sources1 = tools_manager.get_tool_sources() + sources2 = tools_manager.get_tool_sources() + + # Should be equal but not the same object + assert sources1 == sources2 + assert sources1 is not sources2 + + # Modifying returned dict shouldn't affect internal state + sources1["new_tool"] = "Test" + assert "new_tool" not in tools_manager._tool_sources + + +class TestToolNameExtraction: + """Tests for _get_tool_name helper method""" + + def test_get_tool_name_with_name_attribute(self, tools_manager): + """Test extracting name from tool with 'name' attribute""" + tool = Mock() + tool.name = "my_tool" + + name = tools_manager._get_tool_name(tool) + assert name == "my_tool" + + def test_get_tool_name_with_dunder_name(self, tools_manager): + """Test extracting name from tool with '__name__' attribute""" + tool = Mock() + del tool.name # Remove name attribute + tool.__name__ = "MyTool" + + name = tools_manager._get_tool_name(tool) + assert name == "MyTool" + + def test_get_tool_name_with_func(self, tools_manager): + """Test extracting name from tool with 'func' attribute""" + tool = Mock() + del tool.name + del tool.__name__ + tool.func = Mock(__name__="tool_function") + + name = tools_manager._get_tool_name(tool) + assert name == "tool_function" + + def test_get_tool_name_fallback_to_class_name(self, tools_manager): + """Test fallback to class name when no other attributes available""" + tool = Mock() + del tool.name + del tool.__name__ + tool.__class__.__name__ = "ToolClass" + + name = tools_manager._get_tool_name(tool) + assert name == "ToolClass" + + +class TestServerTypeExtraction: + """Tests for _get_tool_server_type helper method""" + + def test_get_server_type_from_metadata(self, tools_manager): + """Test extracting server type from tool metadata""" + tool = Mock() + tool.metadata = {"server_type": "Gateway"} + + server_type = tools_manager._get_tool_server_type(tool) + assert server_type == "Gateway" + + def test_get_server_type_from_description_gateway(self, tools_manager): + """Test extracting Gateway type from description""" + tool = Mock() + # Mock hasattr to return True for metadata + tool.metadata = {} + tool.description = "This is a Gateway MCP tool" + + server_type = tools_manager._get_tool_server_type(tool) + # The implementation checks metadata first, which is empty dict, so returns "Unknown" + # This is actually correct behavior - metadata takes precedence + assert server_type == "Unknown" + + def test_get_server_type_from_description_runtime(self, tools_manager): + """Test extracting Runtime type from description""" + tool = Mock() + tool.metadata = {} + tool.description = "This is a Runtime MCP tool" + + server_type = tools_manager._get_tool_server_type(tool) + # The implementation checks metadata first, which is empty dict, so returns "Unknown" + # This is actually correct behavior - metadata takes precedence + assert server_type == "Unknown" + + def test_get_server_type_unknown(self, tools_manager): + """Test fallback to Unknown when server type cannot be determined""" + tool = Mock() + tool.metadata = {} + tool.description = "Some tool" + + server_type = tools_manager._get_tool_server_type(tool) + assert server_type == "Unknown" + + def test_get_server_type_no_metadata(self, tools_manager): + """Test when tool has no metadata attribute""" + tool = Mock(spec=[]) # No attributes + + server_type = tools_manager._get_tool_server_type(tool) + assert server_type == "Unknown" + + +class TestLogging: + """Tests for logging behavior""" + + def test_logging_on_initialization(self, mock_config, caplog): + """Test that initialization logs appropriately""" + with ( + patch("gaab_strands_common.tools_manager.StrandsToolsRegistry"), + patch("gaab_strands_common.tools_manager.MCPToolsLoader"), + ): + with caplog.at_level(logging.INFO): + ToolsManager("us-east-1", mock_config) + + assert "Initialized ToolsManager" in caplog.text + assert "us-east-1" in caplog.text + + def test_logging_tool_loading_summary( + self, tools_manager, mock_strands_tool, mock_mcp_tool, caplog + ): + """Test that tool loading summary is logged""" + tools_manager.strands_tools_registry.get_tools = Mock(return_value=[mock_strands_tool]) + tools_manager.mcp_loader.load_tools = Mock(return_value=[mock_mcp_tool]) + + with caplog.at_level(logging.INFO): + tools_manager.load_all_tools( + mcp_servers=[ + {"use_case_id": "mcp-1", "url": "https://example.com/mcp", "type": "gateway"} + ], + strands_tool_ids=["web_search"], + custom_tool_ids=[], + ) + + # Check for the new log format + assert "[FINAL TOOL REGISTRATION]" in caplog.text + assert "Strands" in caplog.text + assert "MCP-Gateway" in caplog.text + + def test_logging_no_tools_configured(self, tools_manager, caplog): + """Test logging when no tools are configured""" + with caplog.at_level(logging.INFO): + tools_manager.load_all_tools(mcp_servers=[], strands_tool_ids=[], custom_tool_ids=[]) + + assert "No built-in Strands tools requested" in caplog.text + assert "No MCP servers configured" in caplog.text + + +@pytest.fixture +def mock_custom_tool(): + """Create a mock S3 file reader custom tool""" + tool = Mock() + tool.name = "s3_file_reader" + tool.__name__ = "S3FileReaderTool" + return tool + + +@pytest.fixture +def mock_multimodal_config(): + """Create a config with multimodal enabled""" + bedrock_params = BedrockLlmParams(ModelId="anthropic.claude-3-sonnet-20240229-v1:0") + + multimodal_params = MultimodalParams(MultimodalEnabled=True) + llm_params = LlmParams( + ModelProvider="Bedrock", + BedrockLlmParams=bedrock_params, + MultimodalParams=multimodal_params, + ) + agent_params = AgentBuilderParams(SystemPrompt="Test prompt") + + return UseCaseConfig( + UseCaseName="Test Use Case", + UseCaseType="AgentBuilder", + AgentBuilderParams=agent_params, + LlmParams=llm_params, + ) + + +@pytest.fixture +def mock_custom_websearch_tool_class(): + """Create a mock custom tool class that's explicitly configured (no auto-attach)""" + + class MockWebSearchTool(BaseCustomTool): + """Mock web search tool""" + + @tool + def web_search_custom(self, **kwargs): + """Search the web""" + return "search_result" + + MockWebSearchTool.metadata = Mock() + MockWebSearchTool.metadata.tool_id = "web_search" + MockWebSearchTool.metadata.name = "Web Search Tool" + return MockWebSearchTool + + +@pytest.fixture +def mock_multi_method_tool_class(): + """Create a mock tool with multiple @tool methods""" + + class MockMultiTool(BaseCustomTool): + """Mock tool with multiple methods""" + + @tool + def tool_one(self, **kwargs): + """First tool method""" + return "result_one" + + @tool + def tool_two(self, **kwargs): + """Second tool method""" + return "result_two" + + MockMultiTool.metadata = Mock() + MockMultiTool.metadata.tool_id = "mock_multi_tool" + MockMultiTool.metadata.name = "Mock Multi Tool" + return MockMultiTool + + +@pytest.fixture +def mock_custom_tool_class(): + """Create a generic mock custom tool class""" + + class MockCustomTool(BaseCustomTool): + """Mock custom tool""" + + @tool + def custom_tool_method(self, **kwargs): + """Custom tool method""" + return "custom_result" + + MockCustomTool.metadata = Mock() + MockCustomTool.metadata.tool_id = "custom_tool" + MockCustomTool.metadata.name = "Custom Tool" + return MockCustomTool + + +class TestCustomTools: + """Tests for custom tools functionality""" + + def test_load_custom_tools_only(self, tools_manager, mock_custom_tool): + """Test loading only custom tools""" + # Mock the custom tools registry instance methods + tools_manager.custom_tools_registry.get_all_tools = Mock(return_value={}) + + # Mock _load_single_custom_tool to return list with our mock tool and track sources + def mock_load_single_tool(tool_id): + tools_manager._tool_sources[tool_id] = "Custom" + return [mock_custom_tool] + + with patch.object( + tools_manager, "_load_single_custom_tool", side_effect=mock_load_single_tool + ): + tools = tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=[], custom_tool_ids=["s3_file_reader"] + ) + + assert len(tools) == 1 + assert tools[0] == mock_custom_tool + assert "s3_file_reader" in tools_manager._tool_sources + assert tools_manager._tool_sources == {"s3_file_reader": "Custom"} + + def test_load_auto_attach_custom_tools(self, tools_manager, mock_custom_tool): + """Test loading auto-attach custom tools""" + mock_tool_class = Mock() + mock_tool_class._auto_condition = Mock(return_value=True) + + tools_manager.custom_tools_registry.get_all_tools = Mock( + return_value={"auto_tool": mock_tool_class} + ) + + with patch.object( + tools_manager, "_load_single_custom_tool", return_value=[mock_custom_tool] + ): + tools = tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=[], custom_tool_ids=[] + ) + + assert len(tools) == 1 + assert tools[0] == mock_custom_tool + + mock_tool_class._auto_condition.assert_called_once_with(tools_manager._config_dict) + + def test_load_mixed_tools_with_custom(self, tools_manager, mock_strands_tool, mock_custom_tool): + """Test loading mixed tools including custom tools""" + tools_manager.strands_tools_registry.get_tools = Mock(return_value=[mock_strands_tool]) + + with patch.object( + tools_manager, "_load_configured_custom_tools", return_value=[mock_custom_tool] + ): + with patch.object(tools_manager, "_load_auto_attach_tools", return_value=[]): + tools = tools_manager.load_all_tools( + mcp_servers=[], + strands_tool_ids=["web_search"], + custom_tool_ids=["s3_file_reader"], + ) + + assert len(tools) == 2 + assert mock_strands_tool in tools + assert mock_custom_tool in tools + + def test_custom_tools_error_handling(self, tools_manager, mock_strands_tool): + """Test that other tools still load when custom tools fail""" + tools_manager.strands_tools_registry.get_tools = Mock(return_value=[mock_strands_tool]) + + with patch.object( + tools_manager, "_load_single_custom_tool", side_effect=Exception("Custom tool error") + ): + tools = tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=["web_search"], custom_tool_ids=["s3_file_reader"] + ) + + # Should still get Strands tools + assert len(tools) == 1 + assert tools[0] == mock_strands_tool + + def test_custom_tool_registry_instance_usage(self, tools_manager): + """Test that tools manager uses custom tools registry instance""" + assert hasattr(tools_manager, "custom_tools_registry") + + assert isinstance(tools_manager.custom_tools_registry, CustomToolsRegistry) + + def test_config_dict_caching(self): + """Test that config dict is cached for performance""" + mock_config = Mock() + mock_config.model_dump.return_value = {"test": "config"} + + with ( + patch("gaab_strands_common.tools_manager.StrandsToolsRegistry"), + patch("gaab_strands_common.tools_manager.MCPToolsLoader"), + ): + tools_manager = ToolsManager("us-east-1", mock_config) + + assert hasattr(tools_manager, "_config_dict") + assert tools_manager._config_dict == mock_config.model_dump.return_value + + mock_config.model_dump.assert_called_once_with(by_alias=True) + + def test_load_single_custom_tool_not_found(self, tools_manager): + """Test loading a custom tool that doesn't exist in registry""" + tools_manager.custom_tools_registry.get_tool = Mock(return_value=None) + + result = tools_manager._load_single_custom_tool("nonexistent_tool") + + assert result is None + tools_manager.custom_tools_registry.get_tool.assert_called_once_with("nonexistent_tool") + + def test_load_custom_tools_with_explicit_config( + self, mock_multimodal_config, mock_custom_websearch_tool_class + ): + """ + Test loading explicitly configured custom tools with actual @tool decorated methods. + + This test uses real BaseCustomTool subclasses with actual @tool decorators from Strands, + ensuring we test the full integration: tool instantiation, method binding, and Strands + tool spec generation. Only the registry methods are mocked to isolate the tools_manager logic. + """ + with ( + patch("gaab_strands_common.tools_manager.StrandsToolsRegistry"), + patch("gaab_strands_common.tools_manager.MCPToolsLoader"), + ): + tools_manager = ToolsManager("us-east-1", mock_multimodal_config) + + # Mock registry to return our test tool class with actual @tool decorated methods + # This tests the full flow: instantiation -> method discovery -> tool loading + tools_manager.custom_tools_registry.get_tool = Mock( + return_value=mock_custom_websearch_tool_class + ) + tools_manager.custom_tools_registry.get_tool_method_names = Mock( + return_value=["web_search_custom"] + ) + tools_manager.custom_tools_registry.get_all_tools = Mock(return_value={}) + + tools = tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=[], custom_tool_ids=["web_search"] + ) + + assert len(tools) == 1 + assert hasattr(tools[0], "tool_spec") + assert tools[0].tool_spec["name"] == "web_search_custom" + + assert callable(tools[0]) + assert "inputSchema" in tools[0].tool_spec + assert "description" in tools[0].tool_spec + + assert "web_search_custom" in tools_manager._tool_sources + assert tools_manager._tool_sources["web_search_custom"] == "Custom" + + def test_load_custom_tools_with_auto_attach(self, mock_multimodal_config): + """Test auto-attach functionality - S3FileReaderTool should auto-attach when MultimodalEnabled=True""" + import os + + # Set required env vars for S3FileReaderTool + os.environ["MULTIMODAL_DATA_BUCKET"] = "test-bucket" + os.environ["MULTIMODAL_METADATA_TABLE_NAME"] = "test-table" + os.environ["USE_CASE_UUID"] = "test-uuid" + + with ( + patch("gaab_strands_common.tools_manager.StrandsToolsRegistry"), + patch("gaab_strands_common.tools_manager.MCPToolsLoader"), + ): + tools_manager = ToolsManager("us-east-1", mock_multimodal_config) + + # Load without explicit tool ID - S3FileReaderTool should auto-attach + tools = tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=[], custom_tool_ids=[] + ) + + assert len(tools) >= 1 + s3_tools = [ + t + for t in tools + if hasattr(t, "tool_spec") and t.tool_spec["name"] == "s3_file_reader" + ] + assert len(s3_tools) == 1, "S3FileReaderTool should have auto-attached" + + def test_load_custom_tools_with_multiple_methods( + self, mock_multimodal_config, mock_multi_method_tool_class + ): + """Test loading a custom tool with multiple actual @tool decorated methods""" + with ( + patch("gaab_strands_common.tools_manager.StrandsToolsRegistry"), + patch("gaab_strands_common.tools_manager.MCPToolsLoader"), + ): + tools_manager = ToolsManager("us-east-1", mock_multimodal_config) + + # Mock registry to return our test tool class with multiple @tool decorated methods + tools_manager.custom_tools_registry.get_tool = Mock( + return_value=mock_multi_method_tool_class + ) + tools_manager.custom_tools_registry.get_tool_method_names = Mock( + return_value=["tool_one", "tool_two"] + ) + tools_manager.custom_tools_registry.get_all_tools = Mock(return_value={}) + + # Load with explicit tool ID + tools = tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=[], custom_tool_ids=["mock_multi_tool"] + ) + + # Should have loaded both methods + assert len(tools) == 2 + + # Verify both methods are present and properly decorated + tool_names = [tool.tool_spec["name"] for tool in tools] + assert "tool_one" in tool_names + assert "tool_two" in tool_names + + for tool in tools: + assert callable(tool) + assert "inputSchema" in tool.tool_spec + assert "description" in tool.tool_spec + + assert "tool_one" in tools_manager._tool_sources + assert "tool_two" in tools_manager._tool_sources + + def test_load_custom_tools_registry_method_names_used( + self, mock_config, mock_custom_tool_class + ): + """Test that registry's pre-discovered method names are used""" + with ( + patch("gaab_strands_common.tools_manager.StrandsToolsRegistry"), + patch("gaab_strands_common.tools_manager.MCPToolsLoader"), + ): + tools_manager = ToolsManager("us-east-1", mock_config) + + # Mock registry to return the test tool class + mock_get_tool_method_names = Mock(return_value=["custom_tool_method"]) + tools_manager.custom_tools_registry.get_tool = Mock(return_value=mock_custom_tool_class) + tools_manager.custom_tools_registry.get_tool_method_names = mock_get_tool_method_names + tools_manager.custom_tools_registry.get_all_tools = Mock(return_value={}) + + # Load tool + tools = tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=[], custom_tool_ids=["custom_tool"] + ) + + # Verify that get_tool_method_names was called (using registry, not MRO) + mock_get_tool_method_names.assert_called_once_with("custom_tool") + + # Verify the tool was loaded correctly with actual @tool decorator + assert len(tools) == 1 + assert tools[0].tool_spec["name"] == "custom_tool_method" + assert callable(tools[0]) + assert "inputSchema" in tools[0].tool_spec + + def test_load_custom_tools_error_handling(self, mock_config): + """Test error handling when custom tool loading fails""" + with ( + patch("gaab_strands_common.tools_manager.StrandsToolsRegistry"), + patch("gaab_strands_common.tools_manager.MCPToolsLoader"), + ): + tools_manager = ToolsManager("us-east-1", mock_config) + + # Mock tool that raises exception during instantiation + mock_tool_class = Mock(side_effect=Exception("Tool init failed")) + tools_manager.custom_tools_registry.get_tool = Mock(return_value=mock_tool_class) + tools_manager.custom_tools_registry.get_tool_method_names = Mock( + return_value=["some_method"] + ) + tools_manager.custom_tools_registry.get_all_tools = Mock(return_value={}) + + # Should not raise exception, just return empty list + tools = tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=[], custom_tool_ids=["failing_tool"] + ) + + assert len(tools) == 0 + + def test_load_custom_tools_no_method_names(self, mock_config, mock_custom_tool_class): + """Test handling when registry returns no method names""" + with ( + patch("gaab_strands_common.tools_manager.StrandsToolsRegistry"), + patch("gaab_strands_common.tools_manager.MCPToolsLoader"), + ): + tools_manager = ToolsManager("us-east-1", mock_config) + + # Mock registry to return our test tool class with no methods + tools_manager.custom_tools_registry.get_tool = Mock(return_value=mock_custom_tool_class) + tools_manager.custom_tools_registry.get_tool_method_names = Mock( + return_value=[] + ) # No methods discovered + tools_manager.custom_tools_registry.get_all_tools = Mock(return_value={}) + + # Should handle gracefully + tools = tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=[], custom_tool_ids=["custom_tool"] + ) + + assert len(tools) == 0 + + def test_load_single_custom_tool_not_found(self, mock_config): + """Test loading a custom tool that doesn't exist in registry""" + with ( + patch("gaab_strands_common.tools_manager.StrandsToolsRegistry"), + patch("gaab_strands_common.tools_manager.MCPToolsLoader"), + ): + tools_manager = ToolsManager("us-east-1", mock_config) + tools_manager.custom_tools_registry.get_tool = Mock(return_value=None) + + result = tools_manager._load_single_custom_tool("nonexistent_tool") + + assert result == [] + tools_manager.custom_tools_registry.get_tool.assert_called_once_with("nonexistent_tool") + + +class TestEdgeCases: + """Tests for edge cases and error conditions""" + + def test_load_tools_clears_previous_sources(self, tools_manager, mock_strands_tool): + """Test that loading tools clears previous tool sources""" + tools_manager.strands_tools_registry.get_tools = Mock(return_value=[mock_strands_tool]) + + # Load tools first time + tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=["web_search"], custom_tool_ids=[] + ) + assert len(tools_manager._tool_sources) == 1 + + # Load different tools + tool2 = Mock(name="tool2") + tool2.name = "calculator" + tools_manager.strands_tools_registry.get_tools = Mock(return_value=[tool2]) + + tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=["calculator"], custom_tool_ids=[] + ) + + # Should only have new tool + assert len(tools_manager._tool_sources) == 1 + assert "calculator" in tools_manager._tool_sources + assert "web_search" not in tools_manager._tool_sources + + def test_load_tools_with_none_values(self, tools_manager): + """Test handling of None values in tool lists""" + # This shouldn't happen in practice, but test defensive coding + tools_manager.strands_tools_registry.get_tools = Mock(return_value=[]) + tools_manager.mcp_loader.load_tools = Mock(return_value=[]) + + # Should not raise exception + tools = tools_manager.load_all_tools( + mcp_servers=[], strands_tool_ids=[], custom_tool_ids=[] + ) + assert tools == [] + + def test_tool_with_no_identifiable_name(self, tools_manager): + """Test handling tool with no identifiable name""" + tool = object() # Plain object with no name attributes + + # Should fall back to class name + name = tools_manager._get_tool_name(tool) + assert name == "object" diff --git a/deployment/ecr/gaab-strands-agent/uv.lock b/deployment/ecr/gaab-strands-agent/uv.lock new file mode 100644 index 00000000..c09df7ed --- /dev/null +++ b/deployment/ecr/gaab-strands-agent/uv.lock @@ -0,0 +1,3002 @@ +version = 1 +revision = 2 +requires-python = ">=3.13" + +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" }, +] + +[[package]] +name = "aiohttp" +version = "3.13.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohappyeyeballs" }, + { name = "aiosignal" }, + { name = "attrs" }, + { name = "frozenlist" }, + { name = "multidict" }, + { name = "propcache" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/62/f1/8515650ac3121a9e55c7b217c60e7fae3e0134b5acfe65691781b5356929/aiohttp-3.13.0.tar.gz", hash = "sha256:378dbc57dd8cf341ce243f13fa1fa5394d68e2e02c15cd5f28eae35a70ec7f67", size = 7832348, upload-time = "2025-10-06T19:58:48.089Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/2c/ac53efdc9c10e41399acc2395af98f835b86d0141d5c3820857eb9f6a14a/aiohttp-3.13.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:00243e51f16f6ec0fb021659d4af92f675f3cf9f9b39efd142aa3ad641d8d1e6", size = 730090, upload-time = "2025-10-06T19:56:16.858Z" }, + { url = "https://files.pythonhosted.org/packages/13/18/1ac95683e1c1d48ef4503965c96f5401618a04c139edae12e200392daae8/aiohttp-3.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:059978d2fddc462e9211362cbc8446747ecd930537fa559d3d25c256f032ff54", size = 488041, upload-time = "2025-10-06T19:56:18.659Z" }, + { url = "https://files.pythonhosted.org/packages/fd/79/ef0d477c771a642d1a881b92d226314c43d3c74bc674c93e12e679397a97/aiohttp-3.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:564b36512a7da3b386143c611867e3f7cfb249300a1bf60889bd9985da67ab77", size = 486989, upload-time = "2025-10-06T19:56:20.371Z" }, + { url = "https://files.pythonhosted.org/packages/37/b4/0e440481a0e77a551d6c5dcab5d11f1ff6b2b2ddb8dedc24f54f5caad732/aiohttp-3.13.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4aa995b9156ae499393d949a456a7ab0b994a8241a96db73a3b73c7a090eff6a", size = 1718331, upload-time = "2025-10-06T19:56:22.188Z" }, + { url = "https://files.pythonhosted.org/packages/e6/59/76c421cc4a75bb1aceadb92f20ee6f05a990aa6960c64b59e8e0d340e3f5/aiohttp-3.13.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:55ca0e95a3905f62f00900255ed807c580775174252999286f283e646d675a49", size = 1686263, upload-time = "2025-10-06T19:56:24.393Z" }, + { url = "https://files.pythonhosted.org/packages/ec/ac/5095f12a79c7775f402cfc3e83651b6e0a92ade10ddf7f2c78c4fed79f71/aiohttp-3.13.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:49ce7525853a981fc35d380aa2353536a01a9ec1b30979ea4e35966316cace7e", size = 1754265, upload-time = "2025-10-06T19:56:26.365Z" }, + { url = "https://files.pythonhosted.org/packages/05/d7/a48e4989bd76cc70600c505bbdd0d90ca1ad7f9053eceeb9dbcf9345a9ec/aiohttp-3.13.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2117be9883501eaf95503bd313eb4c7a23d567edd44014ba15835a1e9ec6d852", size = 1856486, upload-time = "2025-10-06T19:56:28.438Z" }, + { url = "https://files.pythonhosted.org/packages/1e/02/45b388b49e37933f316e1fb39c0de6fb1d77384b0c8f4cf6af5f2cbe3ea6/aiohttp-3.13.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d169c47e40c911f728439da853b6fd06da83761012e6e76f11cb62cddae7282b", size = 1737545, upload-time = "2025-10-06T19:56:30.688Z" }, + { url = "https://files.pythonhosted.org/packages/6c/a7/4fde058f1605c34a219348a83a99f14724cc64e68a42480fc03cf40f9ea3/aiohttp-3.13.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:703ad3f742fc81e543638a7bebddd35acadaa0004a5e00535e795f4b6f2c25ca", size = 1552958, upload-time = "2025-10-06T19:56:32.528Z" }, + { url = "https://files.pythonhosted.org/packages/d1/12/0bac4d29231981e3aa234e88d1931f6ba38135ff4c2cf3afbb7895527630/aiohttp-3.13.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5bf635c3476f4119b940cc8d94ad454cbe0c377e61b4527f0192aabeac1e9370", size = 1681166, upload-time = "2025-10-06T19:56:34.81Z" }, + { url = "https://files.pythonhosted.org/packages/71/95/b829eb5f8ac1ca1d8085bb8df614c8acf3ff32e23ad5ad1173c7c9761daa/aiohttp-3.13.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:cfe6285ef99e7ee51cef20609be2bc1dd0e8446462b71c9db8bb296ba632810a", size = 1710516, upload-time = "2025-10-06T19:56:36.787Z" }, + { url = "https://files.pythonhosted.org/packages/47/6d/15ccf4ef3c254d899f62580e0c7fc717014f4d14a3ac31771e505d2c736c/aiohttp-3.13.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:34d8af6391c5f2e69749d7f037b614b8c5c42093c251f336bdbfa4b03c57d6c4", size = 1731354, upload-time = "2025-10-06T19:56:38.659Z" }, + { url = "https://files.pythonhosted.org/packages/46/6a/8acf6c57e03b6fdcc8b4c06392e66abaff3213ea275e41db3edb20738d91/aiohttp-3.13.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:12f5d820fadc5848d4559ea838aef733cf37ed2a1103bba148ac2f5547c14c29", size = 1548040, upload-time = "2025-10-06T19:56:40.578Z" }, + { url = "https://files.pythonhosted.org/packages/75/7d/fbfd59ab2a83fe2578ce79ac3db49727b81e9f4c3376217ad09c03c6d279/aiohttp-3.13.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0f1338b61ea66f4757a0544ed8a02ccbf60e38d9cfb3225888888dd4475ebb96", size = 1756031, upload-time = "2025-10-06T19:56:42.492Z" }, + { url = "https://files.pythonhosted.org/packages/99/e7/cc9f0fdf06cab3ca61e6b62bff9a4b978b8ca736e9d76ddf54365673ab19/aiohttp-3.13.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:582770f82513419512da096e8df21ca44f86a2e56e25dc93c5ab4df0fe065bf0", size = 1714933, upload-time = "2025-10-06T19:56:45.542Z" }, + { url = "https://files.pythonhosted.org/packages/db/43/7abbe1de94748a58a71881163ee280fd3217db36e8344d109f63638fe16a/aiohttp-3.13.0-cp313-cp313-win32.whl", hash = "sha256:3194b8cab8dbc882f37c13ef1262e0a3d62064fa97533d3aa124771f7bf1ecee", size = 423799, upload-time = "2025-10-06T19:56:47.779Z" }, + { url = "https://files.pythonhosted.org/packages/c9/58/afab7f2b9e7df88c995995172eb78cae8a3d5a62d5681abaade86b3f0089/aiohttp-3.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:7897298b3eedc790257fef8a6ec582ca04e9dbe568ba4a9a890913b925b8ea21", size = 450138, upload-time = "2025-10-06T19:56:49.49Z" }, + { url = "https://files.pythonhosted.org/packages/fe/c1/93bb1e35cd0c4665bb422b1ca3d87b588f4bca2656bbe9292b963d5b76a9/aiohttp-3.13.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:c417f8c2e1137775569297c584a8a7144e5d1237789eae56af4faf1894a0b861", size = 733187, upload-time = "2025-10-06T19:56:51.385Z" }, + { url = "https://files.pythonhosted.org/packages/5e/36/2d50eba91992d3fe7a6452506ccdab45d03685ee8d8acaa5b289384a7d4c/aiohttp-3.13.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f84b53326abf8e56ebc28a35cebf4a0f396a13a76300f500ab11fe0573bf0b52", size = 488684, upload-time = "2025-10-06T19:56:53.25Z" }, + { url = "https://files.pythonhosted.org/packages/82/93/fa4b1d5ecdc7805bdf0815ef00257db4632ccf0a8bffd44f9fc4657b1677/aiohttp-3.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:990a53b9d6a30b2878789e490758e568b12b4a7fb2527d0c89deb9650b0e5813", size = 489255, upload-time = "2025-10-06T19:56:55.136Z" }, + { url = "https://files.pythonhosted.org/packages/05/0f/85241f0d158da5e24e8ac9d50c0849ed24f882cafc53dc95749ef85eef09/aiohttp-3.13.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c811612711e01b901e18964b3e5dec0d35525150f5f3f85d0aee2935f059910a", size = 1715914, upload-time = "2025-10-06T19:56:57.286Z" }, + { url = "https://files.pythonhosted.org/packages/ab/fc/c755590d6f6d2b5d1565c72d6ee658d3c30ec61acb18964d1e9bf991d9b5/aiohttp-3.13.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ee433e594d7948e760b5c2a78cc06ac219df33b0848793cf9513d486a9f90a52", size = 1665171, upload-time = "2025-10-06T19:56:59.688Z" }, + { url = "https://files.pythonhosted.org/packages/3a/de/caa61e213ff546b8815aef5e931d7eae1dbe8c840a3f11ec5aa41c5ae462/aiohttp-3.13.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:19bb08e56f57c215e9572cd65cb6f8097804412c54081d933997ddde3e5ac579", size = 1755124, upload-time = "2025-10-06T19:57:02.69Z" }, + { url = "https://files.pythonhosted.org/packages/fb/b7/40c3219dd2691aa35cf889b4fbb0c00e48a19092928707044bfe92068e01/aiohttp-3.13.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f27b7488144eb5dd9151cf839b195edd1569629d90ace4c5b6b18e4e75d1e63a", size = 1835949, upload-time = "2025-10-06T19:57:05.251Z" }, + { url = "https://files.pythonhosted.org/packages/57/e8/66e3c32841fc0e26a09539c377aa0f3bbf6deac1957ac5182cf276c5719c/aiohttp-3.13.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d812838c109757a11354a161c95708ae4199c4fd4d82b90959b20914c1d097f6", size = 1714276, upload-time = "2025-10-06T19:57:07.41Z" }, + { url = "https://files.pythonhosted.org/packages/6b/a5/c68e5b46ff0410fe3abfa508651b09372428f27036138beacf4ff6b7cb8c/aiohttp-3.13.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7c20db99da682f9180fa5195c90b80b159632fb611e8dbccdd99ba0be0970620", size = 1545929, upload-time = "2025-10-06T19:57:09.336Z" }, + { url = "https://files.pythonhosted.org/packages/7a/a6/4c97dc27f9935c0c0aa6e3e10e5b4548823ab5d056636bde374fcd297256/aiohttp-3.13.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:cf8b0870047900eb1f17f453b4b3953b8ffbf203ef56c2f346780ff930a4d430", size = 1679988, upload-time = "2025-10-06T19:57:11.367Z" }, + { url = "https://files.pythonhosted.org/packages/8e/1b/11f9c52fd72b786a47e796e6794883417280cdca8eb1032d8d0939928dfa/aiohttp-3.13.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:5b8a5557d5af3f4e3add52a58c4cf2b8e6e59fc56b261768866f5337872d596d", size = 1678031, upload-time = "2025-10-06T19:57:13.357Z" }, + { url = "https://files.pythonhosted.org/packages/ea/eb/948903d40505f3a25e53e051488d2714ded3afac1f961df135f2936680f9/aiohttp-3.13.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:052bcdd80c1c54b8a18a9ea0cd5e36f473dc8e38d51b804cea34841f677a9971", size = 1726184, upload-time = "2025-10-06T19:57:15.478Z" }, + { url = "https://files.pythonhosted.org/packages/44/14/c8ced38c7dfe80804dec17a671963ccf3cb282f12700ec70b1f689d8de7d/aiohttp-3.13.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:76484ba17b2832776581b7ab466d094e48eba74cb65a60aea20154dae485e8bd", size = 1542344, upload-time = "2025-10-06T19:57:17.611Z" }, + { url = "https://files.pythonhosted.org/packages/a4/6e/f2e6bff550a51fd7c45fdab116a1dab7cc502e5d942956f10fc5c626bb15/aiohttp-3.13.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:62d8a0adcdaf62ee56bfb37737153251ac8e4b27845b3ca065862fb01d99e247", size = 1740913, upload-time = "2025-10-06T19:57:19.821Z" }, + { url = "https://files.pythonhosted.org/packages/da/00/8f057300d9b598a706348abb375b3de9a253195fb615f17c0b2be2a72836/aiohttp-3.13.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5004d727499ecb95f7c9147dd0bfc5b5670f71d355f0bd26d7af2d3af8e07d2f", size = 1695535, upload-time = "2025-10-06T19:57:21.856Z" }, + { url = "https://files.pythonhosted.org/packages/8a/ab/6919d584d8f053a14b15f0bfa3f315b3f548435c2142145459da2efa8673/aiohttp-3.13.0-cp314-cp314-win32.whl", hash = "sha256:a1c20c26af48aea984f63f96e5d7af7567c32cb527e33b60a0ef0a6313cf8b03", size = 429548, upload-time = "2025-10-06T19:57:24.285Z" }, + { url = "https://files.pythonhosted.org/packages/c5/59/5d9e78de6132079066f5077d9687bf524f764a2f8207e04d8d68790060c6/aiohttp-3.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:56f7d230ec66e799fbfd8350e9544f8a45a4353f1cf40c1fea74c1780f555b8f", size = 455548, upload-time = "2025-10-06T19:57:26.136Z" }, + { url = "https://files.pythonhosted.org/packages/7c/ea/7d98da03d1e9798bb99c3ca4963229150d45c9b7a3a16210c5b4a5f89e07/aiohttp-3.13.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:2fd35177dc483ae702f07b86c782f4f4b100a8ce4e7c5778cea016979023d9fd", size = 765319, upload-time = "2025-10-06T19:57:28.278Z" }, + { url = "https://files.pythonhosted.org/packages/5c/02/37f29beced8213bb467c52ad509a5e3b41e6e967de2f6eaf7f8db63bea54/aiohttp-3.13.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:4df1984c8804ed336089e88ac81a9417b1fd0db7c6f867c50a9264488797e778", size = 502567, upload-time = "2025-10-06T19:57:30.273Z" }, + { url = "https://files.pythonhosted.org/packages/e7/22/b0afcafcfe3637bc8d7992abf08ee9452018366c0801e4e7d4efda2ed839/aiohttp-3.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e68c0076052dd911a81d3acc4ef2911cc4ef65bf7cadbfbc8ae762da24da858f", size = 507078, upload-time = "2025-10-06T19:57:32.619Z" }, + { url = "https://files.pythonhosted.org/packages/49/4c/046c847b7a1993b49f3855cc3b97872d5df193d9240de835d0dc6a97b164/aiohttp-3.13.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bc95c49853cd29613e4fe4ff96d73068ff89b89d61e53988442e127e8da8e7ba", size = 1862115, upload-time = "2025-10-06T19:57:34.758Z" }, + { url = "https://files.pythonhosted.org/packages/1a/25/1449a59e3c6405da5e47b0138ee0855414dc12a8c306685d7fc3dd300e1f/aiohttp-3.13.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3b3bdc89413117b40cc39baae08fd09cbdeb839d421c4e7dce6a34f6b54b3ac1", size = 1717147, upload-time = "2025-10-06T19:57:36.938Z" }, + { url = "https://files.pythonhosted.org/packages/23/8f/50cc34ad267b38608f21c6a74327015dd08a66f1dd8e7ceac954d0953191/aiohttp-3.13.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3e77a729df23be2116acc4e9de2767d8e92445fbca68886dd991dc912f473755", size = 1841443, upload-time = "2025-10-06T19:57:39.708Z" }, + { url = "https://files.pythonhosted.org/packages/df/b9/b3ab1278faa0d1b8f434c85f9cf34eeb0a25016ffe1ee6bc361d09fef0ec/aiohttp-3.13.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e88ab34826d6eeb6c67e6e92400b9ec653faf5092a35f07465f44c9f1c429f82", size = 1933652, upload-time = "2025-10-06T19:57:42.33Z" }, + { url = "https://files.pythonhosted.org/packages/88/e2/86050aaa3bd7021b115cdfc88477b754e8cf93ef0079867840eee22d3c34/aiohttp-3.13.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:019dbef24fe28ce2301419dd63a2b97250d9760ca63ee2976c2da2e3f182f82e", size = 1790682, upload-time = "2025-10-06T19:57:44.851Z" }, + { url = "https://files.pythonhosted.org/packages/78/8d/9af903324c2ba24a0c4778e9bcc738b773c98dded3a4fcf8041d5211769f/aiohttp-3.13.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:2c4aeaedd20771b7b4bcdf0ae791904445df6d856c02fc51d809d12d17cffdc7", size = 1622011, upload-time = "2025-10-06T19:57:47.025Z" }, + { url = "https://files.pythonhosted.org/packages/84/97/5174971ba4986d913554ceb248b0401eb5358cb60672ea0166f9f596cd08/aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b3a8e6a2058a0240cfde542b641d0e78b594311bc1a710cbcb2e1841417d5cb3", size = 1787148, upload-time = "2025-10-06T19:57:49.149Z" }, + { url = "https://files.pythonhosted.org/packages/dd/ae/8b397e980ac613ef3ddd8e996aa7a40a1828df958257800d4bb325657db3/aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:f8e38d55ca36c15f36d814ea414ecb2401d860de177c49f84a327a25b3ee752b", size = 1774816, upload-time = "2025-10-06T19:57:51.523Z" }, + { url = "https://files.pythonhosted.org/packages/c7/54/0e8e2111dd92051c787e934b6bbf30c213daaa5e7ee5f51bca8913607492/aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:a921edbe971aade1bf45bcbb3494e30ba6863a5c78f28be992c42de980fd9108", size = 1788610, upload-time = "2025-10-06T19:57:54.337Z" }, + { url = "https://files.pythonhosted.org/packages/fa/dd/c9283dbfd9325ed6fa6c91f009db6344d8d370a7bcf09f36e7b2fcbfae02/aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:474cade59a447cb4019c0dce9f0434bf835fb558ea932f62c686fe07fe6db6a1", size = 1615498, upload-time = "2025-10-06T19:57:56.604Z" }, + { url = "https://files.pythonhosted.org/packages/8c/f6/da76230679bd9ef175d876093f89e7fd6d6476c18505e115e3026fe5ef95/aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:99a303ad960747c33b65b1cb65d01a62ac73fa39b72f08a2e1efa832529b01ed", size = 1815187, upload-time = "2025-10-06T19:57:59.036Z" }, + { url = "https://files.pythonhosted.org/packages/d5/78/394003ac738703822616f4f922705b54e5b3d8e7185831ecc1c97904174d/aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:bb34001fc1f05f6b323e02c278090c07a47645caae3aa77ed7ed8a3ce6abcce9", size = 1760281, upload-time = "2025-10-06T19:58:01.585Z" }, + { url = "https://files.pythonhosted.org/packages/bd/b0/4bad0a9dd5910bd01c3119f8bd3d71887cd412d4105e4acddcdacf3cfa76/aiohttp-3.13.0-cp314-cp314t-win32.whl", hash = "sha256:dea698b64235d053def7d2f08af9302a69fcd760d1c7bd9988fd5d3b6157e657", size = 462608, upload-time = "2025-10-06T19:58:03.674Z" }, + { url = "https://files.pythonhosted.org/packages/bd/af/ad12d592f623aae2bd1d3463201dc39c201ea362f9ddee0d03efd9e83720/aiohttp-3.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:1f164699a060c0b3616459d13c1464a981fddf36f892f0a5027cbd45121fb14b", size = 496010, upload-time = "2025-10-06T19:58:05.589Z" }, +] + +[[package]] +name = "aiosignal" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "frozenlist" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "sniffio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c6/78/7d432127c41b50bccba979505f272c16cbcadcc33645d5fa3a738110ae75/anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4", size = 219094, upload-time = "2025-09-23T09:19:12.58Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/b3/9b1a8074496371342ec1e796a96f99c82c945a339cd81a8e73de28b4cf9e/anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc", size = 109097, upload-time = "2025-09-23T09:19:10.601Z" }, +] + +[[package]] +name = "asgiref" +version = "3.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/46/08/4dfec9b90758a59acc6be32ac82e98d1fbfc321cb5cfa410436dbacf821c/asgiref-3.10.0.tar.gz", hash = "sha256:d89f2d8cd8b56dada7d52fa7dc8075baa08fb836560710d38c292a7a3f78c04e", size = 37483, upload-time = "2025-10-05T09:15:06.557Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/9c/fc2331f538fbf7eedba64b2052e99ccf9ba9d6888e2f41441ee28847004b/asgiref-3.10.0-py3-none-any.whl", hash = "sha256:aef8a81283a34d0ab31630c9b7dfe70c812c95eba78171367ca8745e88124734", size = 24050, upload-time = "2025-10-05T09:15:05.11Z" }, +] + +[[package]] +name = "attrs" +version = "25.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, +] + +[[package]] +name = "aws-opentelemetry-distro" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-distro" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-exporter-otlp-proto-grpc" }, + { name = "opentelemetry-exporter-otlp-proto-http" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-aio-pika" }, + { name = "opentelemetry-instrumentation-aiohttp-client" }, + { name = "opentelemetry-instrumentation-aiopg" }, + { name = "opentelemetry-instrumentation-asgi" }, + { name = "opentelemetry-instrumentation-asyncpg" }, + { name = "opentelemetry-instrumentation-aws-lambda" }, + { name = "opentelemetry-instrumentation-boto" }, + { name = "opentelemetry-instrumentation-boto3sqs" }, + { name = "opentelemetry-instrumentation-botocore" }, + { name = "opentelemetry-instrumentation-cassandra" }, + { name = "opentelemetry-instrumentation-celery" }, + { name = "opentelemetry-instrumentation-confluent-kafka" }, + { name = "opentelemetry-instrumentation-dbapi" }, + { name = "opentelemetry-instrumentation-django" }, + { name = "opentelemetry-instrumentation-elasticsearch" }, + { name = "opentelemetry-instrumentation-falcon" }, + { name = "opentelemetry-instrumentation-fastapi" }, + { name = "opentelemetry-instrumentation-flask" }, + { name = "opentelemetry-instrumentation-grpc" }, + { name = "opentelemetry-instrumentation-httpx" }, + { name = "opentelemetry-instrumentation-jinja2" }, + { name = "opentelemetry-instrumentation-kafka-python" }, + { name = "opentelemetry-instrumentation-logging" }, + { name = "opentelemetry-instrumentation-mysql" }, + { name = "opentelemetry-instrumentation-mysqlclient" }, + { name = "opentelemetry-instrumentation-pika" }, + { name = "opentelemetry-instrumentation-psycopg2" }, + { name = "opentelemetry-instrumentation-pymemcache" }, + { name = "opentelemetry-instrumentation-pymongo" }, + { name = "opentelemetry-instrumentation-pymysql" }, + { name = "opentelemetry-instrumentation-pyramid" }, + { name = "opentelemetry-instrumentation-redis" }, + { name = "opentelemetry-instrumentation-remoulade" }, + { name = "opentelemetry-instrumentation-requests" }, + { name = "opentelemetry-instrumentation-sqlalchemy" }, + { name = "opentelemetry-instrumentation-sqlite3" }, + { name = "opentelemetry-instrumentation-starlette" }, + { name = "opentelemetry-instrumentation-system-metrics" }, + { name = "opentelemetry-instrumentation-tornado" }, + { name = "opentelemetry-instrumentation-tortoiseorm" }, + { name = "opentelemetry-instrumentation-urllib" }, + { name = "opentelemetry-instrumentation-urllib3" }, + { name = "opentelemetry-instrumentation-wsgi" }, + { name = "opentelemetry-processor-baggage" }, + { name = "opentelemetry-propagator-aws-xray" }, + { name = "opentelemetry-propagator-b3" }, + { name = "opentelemetry-propagator-jaeger" }, + { name = "opentelemetry-propagator-ot-trace" }, + { name = "opentelemetry-sdk" }, + { name = "opentelemetry-sdk-extension-aws" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/41/58dddd72c7503facf5d313780ae107cfb7db2f69107e5754026390f55470/aws_opentelemetry_distro-0.12.1-py3-none-any.whl", hash = "sha256:529e7879c25546fb2d8015f73a1e41b48ddd98eb5dfc49caefde4a809f7c0ad0", size = 104546, upload-time = "2025-09-16T17:14:54.778Z" }, +] + +[[package]] +name = "aws-requests-auth" +version = "0.4.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/54/b2/455c0bfcbd772dafd4c9e93c4b713e36790abf9ccbca9b8e661968b29798/aws-requests-auth-0.4.3.tar.gz", hash = "sha256:33593372018b960a31dbbe236f89421678b885c35f0b6a7abfae35bb77e069b2", size = 10096, upload-time = "2020-05-27T23:10:34.742Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/11/5dc8be418e1d54bed15eaf3a7461797e5ebb9e6a34869ad750561f35fa5b/aws_requests_auth-0.4.3-py2.py3-none-any.whl", hash = "sha256:646bc37d62140ea1c709d20148f5d43197e6bd2d63909eb36fa4bb2345759977", size = 6838, upload-time = "2020-05-27T23:10:33.658Z" }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.14.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/77/e9/df2358efd7659577435e2177bfa69cba6c33216681af51a707193dec162a/beautifulsoup4-4.14.2.tar.gz", hash = "sha256:2a98ab9f944a11acee9cc848508ec28d9228abfd522ef0fad6a02a72e0ded69e", size = 625822, upload-time = "2025-09-29T10:05:42.613Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/fe/3aed5d0be4d404d12d36ab97e2f1791424d9ca39c2f754a6285d59a3b01d/beautifulsoup4-4.14.2-py3-none-any.whl", hash = "sha256:5ef6fa3a8cbece8488d66985560f97ed091e22bbc4e9c2338508a9d5de6d4515", size = 106392, upload-time = "2025-09-29T10:05:43.771Z" }, +] + +[[package]] +name = "bedrock-agentcore" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "boto3" }, + { name = "botocore" }, + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, + { name = "urllib3" }, + { name = "uvicorn" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b0/87/4c0bacf09430e559657fc986cbb1003f76d597ab7e7365ab247dbef73940/bedrock_agentcore-0.1.7.tar.gz", hash = "sha256:e518e8f5e6fb5a5a80182db95757a20e32b0ac2b33d0a1909dfafcba950c6356", size = 263080, upload-time = "2025-10-01T16:18:39.255Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/f3/a9d961cfba236dc85f27f2f2c6eab88e12698754aaa02459ba7dfafc5062/bedrock_agentcore-0.1.7-py3-none-any.whl", hash = "sha256:441dde64fea596e9571e47ae37ee3b033e58d8d255018f13bdcde8ae8bef2075", size = 77216, upload-time = "2025-10-01T16:18:38.153Z" }, +] + +[[package]] +name = "black" +version = "25.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "mypy-extensions" }, + { name = "packaging" }, + { name = "pathspec" }, + { name = "platformdirs" }, + { name = "pytokens" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/43/20b5c90612d7bdb2bdbcceeb53d588acca3bb8f0e4c5d5c751a2c8fdd55a/black-25.9.0.tar.gz", hash = "sha256:0474bca9a0dd1b51791fcc507a4e02078a1c63f6d4e4ae5544b9848c7adfb619", size = 648393, upload-time = "2025-09-19T00:27:37.758Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/99/3acfea65f5e79f45472c45f87ec13037b506522719cd9d4ac86484ff51ac/black-25.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0172a012f725b792c358d57fe7b6b6e8e67375dd157f64fa7a3097b3ed3e2175", size = 1742165, upload-time = "2025-09-19T00:34:10.402Z" }, + { url = "https://files.pythonhosted.org/packages/3a/18/799285282c8236a79f25d590f0222dbd6850e14b060dfaa3e720241fd772/black-25.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3bec74ee60f8dfef564b573a96b8930f7b6a538e846123d5ad77ba14a8d7a64f", size = 1581259, upload-time = "2025-09-19T00:32:49.685Z" }, + { url = "https://files.pythonhosted.org/packages/f1/ce/883ec4b6303acdeca93ee06b7622f1fa383c6b3765294824165d49b1a86b/black-25.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b756fc75871cb1bcac5499552d771822fd9db5a2bb8db2a7247936ca48f39831", size = 1655583, upload-time = "2025-09-19T00:30:44.505Z" }, + { url = "https://files.pythonhosted.org/packages/21/17/5c253aa80a0639ccc427a5c7144534b661505ae2b5a10b77ebe13fa25334/black-25.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:846d58e3ce7879ec1ffe816bb9df6d006cd9590515ed5d17db14e17666b2b357", size = 1343428, upload-time = "2025-09-19T00:32:13.839Z" }, + { url = "https://files.pythonhosted.org/packages/1b/46/863c90dcd3f9d41b109b7f19032ae0db021f0b2a81482ba0a1e28c84de86/black-25.9.0-py3-none-any.whl", hash = "sha256:474b34c1342cdc157d307b56c4c65bce916480c4a8f6551fdc6bf9b486a7c4ae", size = 203363, upload-time = "2025-09-19T00:27:35.724Z" }, +] + +[[package]] +name = "boto3" +version = "1.40.52" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, + { name = "jmespath" }, + { name = "s3transfer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a0/5a/8ba08c979926326d961e2384d994d789a2eda3ed281bb6cb333b36e92310/boto3-1.40.52.tar.gz", hash = "sha256:96ee720b52be647d8ef5ba92fccfce6b65d6321769430fe6edd10d57ec43c25b", size = 111530, upload-time = "2025-10-14T20:32:12.226Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/d2/879e9787c5263aefc5c88f0dd97cdea29ac01c480dd53c2421de77a493f7/boto3-1.40.52-py3-none-any.whl", hash = "sha256:ecc8c99d3cc96716cdfba62d9c9c6ce0eb98d02494a66690bcc2ec181c1ced67", size = 139345, upload-time = "2025-10-14T20:32:10.801Z" }, +] + +[[package]] +name = "botocore" +version = "1.40.52" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jmespath" }, + { name = "python-dateutil" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/74/3449d77c002d82586786b91dff6dd2e6fd52c5cdc1793d1ac7ea690ea52c/botocore-1.40.52.tar.gz", hash = "sha256:b65d970ca4ccd869639332083da17c3a933bcf495120dcc4f5c7723cb3f6216c", size = 14427680, upload-time = "2025-10-14T20:32:03.065Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/ad/559dc4097fe1368e5f3abb5d8ca496f9c609e4e452498bca11134fde1462/botocore-1.40.52-py3-none-any.whl", hash = "sha256:838697a06c7713df8d39f088105334b4eadcc3d65c7a260bf1a1bd8bf616ce4a", size = 14098823, upload-time = "2025-10-14T20:32:00.094Z" }, +] + +[[package]] +name = "certifi" +version = "2025.10.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4c/5b/b6ce21586237c77ce67d01dc5507039d444b630dd76611bbca2d8e5dcd91/certifi-2025.10.5.tar.gz", hash = "sha256:47c09d31ccf2acf0be3f701ea53595ee7e0b8fa08801c6624be771df09ae7b43", size = 164519, upload-time = "2025-10-05T04:12:15.808Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/37/af0d2ef3967ac0d6113837b44a4f0bfe1328c2b9763bd5b1744520e5cfed/certifi-2025.10.5-py3-none-any.whl", hash = "sha256:0f212c2744a9bb6de0c56639a6f68afe01ecd92d91f14ae897c4fe7bbeeef0de", size = 163286, upload-time = "2025-10-05T04:12:14.03Z" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, + { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, + { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, + { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, + { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, + { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, + { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, + { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, + { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, + { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, + { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, + { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, + { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, + { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, + { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, + { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, + { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, + { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, + { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, + { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, + { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, + { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, + { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, + { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, + { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, + { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, + { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, + { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, + { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "click" +version = "8.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/61/de6cd827efad202d7057d93e0fed9294b96952e188f7384832791c7b2254/click-8.3.0.tar.gz", hash = "sha256:e7b8232224eba16f4ebe410c25ced9f7875cb5f3263ffc93cc3e8da705e229c4", size = 276943, upload-time = "2025-09-18T17:32:23.696Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/d3/9dcc0f5797f070ec8edf30fbadfb200e71d9db6b84d211e3b2085a7589a0/click-8.3.0-py3-none-any.whl", hash = "sha256:9b9f285302c6e3064f4330c05f05b81945b2a39544279343e6e7c5f27a9baddc", size = 107295, upload-time = "2025-09-18T17:32:22.42Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "coverage" +version = "7.10.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/51/26/d22c300112504f5f9a9fd2297ce33c35f3d353e4aeb987c8419453b2a7c2/coverage-7.10.7.tar.gz", hash = "sha256:f4ab143ab113be368a3e9b795f9cd7906c5ef407d6173fe9675a902e1fffc239", size = 827704, upload-time = "2025-09-21T20:03:56.815Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/94/b765c1abcb613d103b64fcf10395f54d69b0ef8be6a0dd9c524384892cc7/coverage-7.10.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:981a651f543f2854abd3b5fcb3263aac581b18209be49863ba575de6edf4c14d", size = 218320, upload-time = "2025-09-21T20:01:56.629Z" }, + { url = "https://files.pythonhosted.org/packages/72/4f/732fff31c119bb73b35236dd333030f32c4bfe909f445b423e6c7594f9a2/coverage-7.10.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:73ab1601f84dc804f7812dc297e93cd99381162da39c47040a827d4e8dafe63b", size = 218575, upload-time = "2025-09-21T20:01:58.203Z" }, + { url = "https://files.pythonhosted.org/packages/87/02/ae7e0af4b674be47566707777db1aa375474f02a1d64b9323e5813a6cdd5/coverage-7.10.7-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a8b6f03672aa6734e700bbcd65ff050fd19cddfec4b031cc8cf1c6967de5a68e", size = 249568, upload-time = "2025-09-21T20:01:59.748Z" }, + { url = "https://files.pythonhosted.org/packages/a2/77/8c6d22bf61921a59bce5471c2f1f7ac30cd4ac50aadde72b8c48d5727902/coverage-7.10.7-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10b6ba00ab1132a0ce4428ff68cf50a25efd6840a42cdf4239c9b99aad83be8b", size = 252174, upload-time = "2025-09-21T20:02:01.192Z" }, + { url = "https://files.pythonhosted.org/packages/b1/20/b6ea4f69bbb52dac0aebd62157ba6a9dddbfe664f5af8122dac296c3ee15/coverage-7.10.7-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c79124f70465a150e89340de5963f936ee97097d2ef76c869708c4248c63ca49", size = 253447, upload-time = "2025-09-21T20:02:02.701Z" }, + { url = "https://files.pythonhosted.org/packages/f9/28/4831523ba483a7f90f7b259d2018fef02cb4d5b90bc7c1505d6e5a84883c/coverage-7.10.7-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:69212fbccdbd5b0e39eac4067e20a4a5256609e209547d86f740d68ad4f04911", size = 249779, upload-time = "2025-09-21T20:02:04.185Z" }, + { url = "https://files.pythonhosted.org/packages/a7/9f/4331142bc98c10ca6436d2d620c3e165f31e6c58d43479985afce6f3191c/coverage-7.10.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7ea7c6c9d0d286d04ed3541747e6597cbe4971f22648b68248f7ddcd329207f0", size = 251604, upload-time = "2025-09-21T20:02:06.034Z" }, + { url = "https://files.pythonhosted.org/packages/ce/60/bda83b96602036b77ecf34e6393a3836365481b69f7ed7079ab85048202b/coverage-7.10.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b9be91986841a75042b3e3243d0b3cb0b2434252b977baaf0cd56e960fe1e46f", size = 249497, upload-time = "2025-09-21T20:02:07.619Z" }, + { url = "https://files.pythonhosted.org/packages/5f/af/152633ff35b2af63977edd835d8e6430f0caef27d171edf2fc76c270ef31/coverage-7.10.7-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:b281d5eca50189325cfe1f365fafade89b14b4a78d9b40b05ddd1fc7d2a10a9c", size = 249350, upload-time = "2025-09-21T20:02:10.34Z" }, + { url = "https://files.pythonhosted.org/packages/9d/71/d92105d122bd21cebba877228990e1646d862e34a98bb3374d3fece5a794/coverage-7.10.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:99e4aa63097ab1118e75a848a28e40d68b08a5e19ce587891ab7fd04475e780f", size = 251111, upload-time = "2025-09-21T20:02:12.122Z" }, + { url = "https://files.pythonhosted.org/packages/a2/9e/9fdb08f4bf476c912f0c3ca292e019aab6712c93c9344a1653986c3fd305/coverage-7.10.7-cp313-cp313-win32.whl", hash = "sha256:dc7c389dce432500273eaf48f410b37886be9208b2dd5710aaf7c57fd442c698", size = 220746, upload-time = "2025-09-21T20:02:13.919Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b1/a75fd25df44eab52d1931e89980d1ada46824c7a3210be0d3c88a44aaa99/coverage-7.10.7-cp313-cp313-win_amd64.whl", hash = "sha256:cac0fdca17b036af3881a9d2729a850b76553f3f716ccb0360ad4dbc06b3b843", size = 221541, upload-time = "2025-09-21T20:02:15.57Z" }, + { url = "https://files.pythonhosted.org/packages/14/3a/d720d7c989562a6e9a14b2c9f5f2876bdb38e9367126d118495b89c99c37/coverage-7.10.7-cp313-cp313-win_arm64.whl", hash = "sha256:4b6f236edf6e2f9ae8fcd1332da4e791c1b6ba0dc16a2dc94590ceccb482e546", size = 220170, upload-time = "2025-09-21T20:02:17.395Z" }, + { url = "https://files.pythonhosted.org/packages/bb/22/e04514bf2a735d8b0add31d2b4ab636fc02370730787c576bb995390d2d5/coverage-7.10.7-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a0ec07fd264d0745ee396b666d47cef20875f4ff2375d7c4f58235886cc1ef0c", size = 219029, upload-time = "2025-09-21T20:02:18.936Z" }, + { url = "https://files.pythonhosted.org/packages/11/0b/91128e099035ece15da3445d9015e4b4153a6059403452d324cbb0a575fa/coverage-7.10.7-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:dd5e856ebb7bfb7672b0086846db5afb4567a7b9714b8a0ebafd211ec7ce6a15", size = 219259, upload-time = "2025-09-21T20:02:20.44Z" }, + { url = "https://files.pythonhosted.org/packages/8b/51/66420081e72801536a091a0c8f8c1f88a5c4bf7b9b1bdc6222c7afe6dc9b/coverage-7.10.7-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f57b2a3c8353d3e04acf75b3fed57ba41f5c0646bbf1d10c7c282291c97936b4", size = 260592, upload-time = "2025-09-21T20:02:22.313Z" }, + { url = "https://files.pythonhosted.org/packages/5d/22/9b8d458c2881b22df3db5bb3e7369e63d527d986decb6c11a591ba2364f7/coverage-7.10.7-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1ef2319dd15a0b009667301a3f84452a4dc6fddfd06b0c5c53ea472d3989fbf0", size = 262768, upload-time = "2025-09-21T20:02:24.287Z" }, + { url = "https://files.pythonhosted.org/packages/f7/08/16bee2c433e60913c610ea200b276e8eeef084b0d200bdcff69920bd5828/coverage-7.10.7-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:83082a57783239717ceb0ad584de3c69cf581b2a95ed6bf81ea66034f00401c0", size = 264995, upload-time = "2025-09-21T20:02:26.133Z" }, + { url = "https://files.pythonhosted.org/packages/20/9d/e53eb9771d154859b084b90201e5221bca7674ba449a17c101a5031d4054/coverage-7.10.7-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:50aa94fb1fb9a397eaa19c0d5ec15a5edd03a47bf1a3a6111a16b36e190cff65", size = 259546, upload-time = "2025-09-21T20:02:27.716Z" }, + { url = "https://files.pythonhosted.org/packages/ad/b0/69bc7050f8d4e56a89fb550a1577d5d0d1db2278106f6f626464067b3817/coverage-7.10.7-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2120043f147bebb41c85b97ac45dd173595ff14f2a584f2963891cbcc3091541", size = 262544, upload-time = "2025-09-21T20:02:29.216Z" }, + { url = "https://files.pythonhosted.org/packages/ef/4b/2514b060dbd1bc0aaf23b852c14bb5818f244c664cb16517feff6bb3a5ab/coverage-7.10.7-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2fafd773231dd0378fdba66d339f84904a8e57a262f583530f4f156ab83863e6", size = 260308, upload-time = "2025-09-21T20:02:31.226Z" }, + { url = "https://files.pythonhosted.org/packages/54/78/7ba2175007c246d75e496f64c06e94122bdb914790a1285d627a918bd271/coverage-7.10.7-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:0b944ee8459f515f28b851728ad224fa2d068f1513ef6b7ff1efafeb2185f999", size = 258920, upload-time = "2025-09-21T20:02:32.823Z" }, + { url = "https://files.pythonhosted.org/packages/c0/b3/fac9f7abbc841409b9a410309d73bfa6cfb2e51c3fada738cb607ce174f8/coverage-7.10.7-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4b583b97ab2e3efe1b3e75248a9b333bd3f8b0b1b8e5b45578e05e5850dfb2c2", size = 261434, upload-time = "2025-09-21T20:02:34.86Z" }, + { url = "https://files.pythonhosted.org/packages/ee/51/a03bec00d37faaa891b3ff7387192cef20f01604e5283a5fabc95346befa/coverage-7.10.7-cp313-cp313t-win32.whl", hash = "sha256:2a78cd46550081a7909b3329e2266204d584866e8d97b898cd7fb5ac8d888b1a", size = 221403, upload-time = "2025-09-21T20:02:37.034Z" }, + { url = "https://files.pythonhosted.org/packages/53/22/3cf25d614e64bf6d8e59c7c669b20d6d940bb337bdee5900b9ca41c820bb/coverage-7.10.7-cp313-cp313t-win_amd64.whl", hash = "sha256:33a5e6396ab684cb43dc7befa386258acb2d7fae7f67330ebb85ba4ea27938eb", size = 222469, upload-time = "2025-09-21T20:02:39.011Z" }, + { url = "https://files.pythonhosted.org/packages/49/a1/00164f6d30d8a01c3c9c48418a7a5be394de5349b421b9ee019f380df2a0/coverage-7.10.7-cp313-cp313t-win_arm64.whl", hash = "sha256:86b0e7308289ddde73d863b7683f596d8d21c7d8664ce1dee061d0bcf3fbb4bb", size = 220731, upload-time = "2025-09-21T20:02:40.939Z" }, + { url = "https://files.pythonhosted.org/packages/23/9c/5844ab4ca6a4dd97a1850e030a15ec7d292b5c5cb93082979225126e35dd/coverage-7.10.7-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b06f260b16ead11643a5a9f955bd4b5fd76c1a4c6796aeade8520095b75de520", size = 218302, upload-time = "2025-09-21T20:02:42.527Z" }, + { url = "https://files.pythonhosted.org/packages/f0/89/673f6514b0961d1f0e20ddc242e9342f6da21eaba3489901b565c0689f34/coverage-7.10.7-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:212f8f2e0612778f09c55dd4872cb1f64a1f2b074393d139278ce902064d5b32", size = 218578, upload-time = "2025-09-21T20:02:44.468Z" }, + { url = "https://files.pythonhosted.org/packages/05/e8/261cae479e85232828fb17ad536765c88dd818c8470aca690b0ac6feeaa3/coverage-7.10.7-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3445258bcded7d4aa630ab8296dea4d3f15a255588dd535f980c193ab6b95f3f", size = 249629, upload-time = "2025-09-21T20:02:46.503Z" }, + { url = "https://files.pythonhosted.org/packages/82/62/14ed6546d0207e6eda876434e3e8475a3e9adbe32110ce896c9e0c06bb9a/coverage-7.10.7-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bb45474711ba385c46a0bfe696c695a929ae69ac636cda8f532be9e8c93d720a", size = 252162, upload-time = "2025-09-21T20:02:48.689Z" }, + { url = "https://files.pythonhosted.org/packages/ff/49/07f00db9ac6478e4358165a08fb41b469a1b053212e8a00cb02f0d27a05f/coverage-7.10.7-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:813922f35bd800dca9994c5971883cbc0d291128a5de6b167c7aa697fcf59360", size = 253517, upload-time = "2025-09-21T20:02:50.31Z" }, + { url = "https://files.pythonhosted.org/packages/a2/59/c5201c62dbf165dfbc91460f6dbbaa85a8b82cfa6131ac45d6c1bfb52deb/coverage-7.10.7-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:93c1b03552081b2a4423091d6fb3787265b8f86af404cff98d1b5342713bdd69", size = 249632, upload-time = "2025-09-21T20:02:51.971Z" }, + { url = "https://files.pythonhosted.org/packages/07/ae/5920097195291a51fb00b3a70b9bbd2edbfe3c84876a1762bd1ef1565ebc/coverage-7.10.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:cc87dd1b6eaf0b848eebb1c86469b9f72a1891cb42ac7adcfbce75eadb13dd14", size = 251520, upload-time = "2025-09-21T20:02:53.858Z" }, + { url = "https://files.pythonhosted.org/packages/b9/3c/a815dde77a2981f5743a60b63df31cb322c944843e57dbd579326625a413/coverage-7.10.7-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:39508ffda4f343c35f3236fe8d1a6634a51f4581226a1262769d7f970e73bffe", size = 249455, upload-time = "2025-09-21T20:02:55.807Z" }, + { url = "https://files.pythonhosted.org/packages/aa/99/f5cdd8421ea656abefb6c0ce92556709db2265c41e8f9fc6c8ae0f7824c9/coverage-7.10.7-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:925a1edf3d810537c5a3abe78ec5530160c5f9a26b1f4270b40e62cc79304a1e", size = 249287, upload-time = "2025-09-21T20:02:57.784Z" }, + { url = "https://files.pythonhosted.org/packages/c3/7a/e9a2da6a1fc5d007dd51fca083a663ab930a8c4d149c087732a5dbaa0029/coverage-7.10.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2c8b9a0636f94c43cd3576811e05b89aa9bc2d0a85137affc544ae5cb0e4bfbd", size = 250946, upload-time = "2025-09-21T20:02:59.431Z" }, + { url = "https://files.pythonhosted.org/packages/ef/5b/0b5799aa30380a949005a353715095d6d1da81927d6dbed5def2200a4e25/coverage-7.10.7-cp314-cp314-win32.whl", hash = "sha256:b7b8288eb7cdd268b0304632da8cb0bb93fadcfec2fe5712f7b9cc8f4d487be2", size = 221009, upload-time = "2025-09-21T20:03:01.324Z" }, + { url = "https://files.pythonhosted.org/packages/da/b0/e802fbb6eb746de006490abc9bb554b708918b6774b722bb3a0e6aa1b7de/coverage-7.10.7-cp314-cp314-win_amd64.whl", hash = "sha256:1ca6db7c8807fb9e755d0379ccc39017ce0a84dcd26d14b5a03b78563776f681", size = 221804, upload-time = "2025-09-21T20:03:03.4Z" }, + { url = "https://files.pythonhosted.org/packages/9e/e8/71d0c8e374e31f39e3389bb0bd19e527d46f00ea8571ec7ec8fd261d8b44/coverage-7.10.7-cp314-cp314-win_arm64.whl", hash = "sha256:097c1591f5af4496226d5783d036bf6fd6cd0cbc132e071b33861de756efb880", size = 220384, upload-time = "2025-09-21T20:03:05.111Z" }, + { url = "https://files.pythonhosted.org/packages/62/09/9a5608d319fa3eba7a2019addeacb8c746fb50872b57a724c9f79f146969/coverage-7.10.7-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:a62c6ef0d50e6de320c270ff91d9dd0a05e7250cac2a800b7784bae474506e63", size = 219047, upload-time = "2025-09-21T20:03:06.795Z" }, + { url = "https://files.pythonhosted.org/packages/f5/6f/f58d46f33db9f2e3647b2d0764704548c184e6f5e014bef528b7f979ef84/coverage-7.10.7-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:9fa6e4dd51fe15d8738708a973470f67a855ca50002294852e9571cdbd9433f2", size = 219266, upload-time = "2025-09-21T20:03:08.495Z" }, + { url = "https://files.pythonhosted.org/packages/74/5c/183ffc817ba68e0b443b8c934c8795553eb0c14573813415bd59941ee165/coverage-7.10.7-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:8fb190658865565c549b6b4706856d6a7b09302c797eb2cf8e7fe9dabb043f0d", size = 260767, upload-time = "2025-09-21T20:03:10.172Z" }, + { url = "https://files.pythonhosted.org/packages/0f/48/71a8abe9c1ad7e97548835e3cc1adbf361e743e9d60310c5f75c9e7bf847/coverage-7.10.7-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:affef7c76a9ef259187ef31599a9260330e0335a3011732c4b9effa01e1cd6e0", size = 262931, upload-time = "2025-09-21T20:03:11.861Z" }, + { url = "https://files.pythonhosted.org/packages/84/fd/193a8fb132acfc0a901f72020e54be5e48021e1575bb327d8ee1097a28fd/coverage-7.10.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6e16e07d85ca0cf8bafe5f5d23a0b850064e8e945d5677492b06bbe6f09cc699", size = 265186, upload-time = "2025-09-21T20:03:13.539Z" }, + { url = "https://files.pythonhosted.org/packages/b1/8f/74ecc30607dd95ad50e3034221113ccb1c6d4e8085cc761134782995daae/coverage-7.10.7-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:03ffc58aacdf65d2a82bbeb1ffe4d01ead4017a21bfd0454983b88ca73af94b9", size = 259470, upload-time = "2025-09-21T20:03:15.584Z" }, + { url = "https://files.pythonhosted.org/packages/0f/55/79ff53a769f20d71b07023ea115c9167c0bb56f281320520cf64c5298a96/coverage-7.10.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1b4fd784344d4e52647fd7857b2af5b3fbe6c239b0b5fa63e94eb67320770e0f", size = 262626, upload-time = "2025-09-21T20:03:17.673Z" }, + { url = "https://files.pythonhosted.org/packages/88/e2/dac66c140009b61ac3fc13af673a574b00c16efdf04f9b5c740703e953c0/coverage-7.10.7-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:0ebbaddb2c19b71912c6f2518e791aa8b9f054985a0769bdb3a53ebbc765c6a1", size = 260386, upload-time = "2025-09-21T20:03:19.36Z" }, + { url = "https://files.pythonhosted.org/packages/a2/f1/f48f645e3f33bb9ca8a496bc4a9671b52f2f353146233ebd7c1df6160440/coverage-7.10.7-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:a2d9a3b260cc1d1dbdb1c582e63ddcf5363426a1a68faa0f5da28d8ee3c722a0", size = 258852, upload-time = "2025-09-21T20:03:21.007Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3b/8442618972c51a7affeead957995cfa8323c0c9bcf8fa5a027421f720ff4/coverage-7.10.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a3cc8638b2480865eaa3926d192e64ce6c51e3d29c849e09d5b4ad95efae5399", size = 261534, upload-time = "2025-09-21T20:03:23.12Z" }, + { url = "https://files.pythonhosted.org/packages/b2/dc/101f3fa3a45146db0cb03f5b4376e24c0aac818309da23e2de0c75295a91/coverage-7.10.7-cp314-cp314t-win32.whl", hash = "sha256:67f8c5cbcd3deb7a60b3345dffc89a961a484ed0af1f6f73de91705cc6e31235", size = 221784, upload-time = "2025-09-21T20:03:24.769Z" }, + { url = "https://files.pythonhosted.org/packages/4c/a1/74c51803fc70a8a40d7346660379e144be772bab4ac7bb6e6b905152345c/coverage-7.10.7-cp314-cp314t-win_amd64.whl", hash = "sha256:e1ed71194ef6dea7ed2d5cb5f7243d4bcd334bfb63e59878519be558078f848d", size = 222905, upload-time = "2025-09-21T20:03:26.93Z" }, + { url = "https://files.pythonhosted.org/packages/12/65/f116a6d2127df30bcafbceef0302d8a64ba87488bf6f73a6d8eebf060873/coverage-7.10.7-cp314-cp314t-win_arm64.whl", hash = "sha256:7fe650342addd8524ca63d77b2362b02345e5f1a093266787d210c70a50b471a", size = 220922, upload-time = "2025-09-21T20:03:28.672Z" }, + { url = "https://files.pythonhosted.org/packages/ec/16/114df1c291c22cac3b0c127a73e0af5c12ed7bbb6558d310429a0ae24023/coverage-7.10.7-py3-none-any.whl", hash = "sha256:f7941f6f2fe6dd6807a1208737b8a0cbcf1cc6d7b07d24998ad2d63590868260", size = 209952, upload-time = "2025-09-21T20:03:53.918Z" }, +] + +[[package]] +name = "cryptography" +version = "46.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4a/9b/e301418629f7bfdf72db9e80ad6ed9d1b83c487c471803eaa6464c511a01/cryptography-46.0.2.tar.gz", hash = "sha256:21b6fc8c71a3f9a604f028a329e5560009cc4a3a828bfea5fcba8eb7647d88fe", size = 749293, upload-time = "2025-10-01T00:29:11.856Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/98/7a8df8c19a335c8028414738490fc3955c0cecbfdd37fcc1b9c3d04bd561/cryptography-46.0.2-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:f3e32ab7dd1b1ef67b9232c4cf5e2ee4cd517d4316ea910acaaa9c5712a1c663", size = 7261255, upload-time = "2025-10-01T00:27:22.947Z" }, + { url = "https://files.pythonhosted.org/packages/c6/38/b2adb2aa1baa6706adc3eb746691edd6f90a656a9a65c3509e274d15a2b8/cryptography-46.0.2-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1fd1a69086926b623ef8126b4c33d5399ce9e2f3fac07c9c734c2a4ec38b6d02", size = 4297596, upload-time = "2025-10-01T00:27:25.258Z" }, + { url = "https://files.pythonhosted.org/packages/e4/27/0f190ada240003119488ae66c897b5e97149292988f556aef4a6a2a57595/cryptography-46.0.2-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bb7fb9cd44c2582aa5990cf61a4183e6f54eea3172e54963787ba47287edd135", size = 4450899, upload-time = "2025-10-01T00:27:27.458Z" }, + { url = "https://files.pythonhosted.org/packages/85/d5/e4744105ab02fdf6bb58ba9a816e23b7a633255987310b4187d6745533db/cryptography-46.0.2-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:9066cfd7f146f291869a9898b01df1c9b0e314bfa182cef432043f13fc462c92", size = 4300382, upload-time = "2025-10-01T00:27:29.091Z" }, + { url = "https://files.pythonhosted.org/packages/33/fb/bf9571065c18c04818cb07de90c43fc042c7977c68e5de6876049559c72f/cryptography-46.0.2-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:97e83bf4f2f2c084d8dd792d13841d0a9b241643151686010866bbd076b19659", size = 4017347, upload-time = "2025-10-01T00:27:30.767Z" }, + { url = "https://files.pythonhosted.org/packages/35/72/fc51856b9b16155ca071080e1a3ad0c3a8e86616daf7eb018d9565b99baa/cryptography-46.0.2-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:4a766d2a5d8127364fd936572c6e6757682fc5dfcbdba1632d4554943199f2fa", size = 4983500, upload-time = "2025-10-01T00:27:32.741Z" }, + { url = "https://files.pythonhosted.org/packages/c1/53/0f51e926799025e31746d454ab2e36f8c3f0d41592bc65cb9840368d3275/cryptography-46.0.2-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:fab8f805e9675e61ed8538f192aad70500fa6afb33a8803932999b1049363a08", size = 4482591, upload-time = "2025-10-01T00:27:34.869Z" }, + { url = "https://files.pythonhosted.org/packages/86/96/4302af40b23ab8aa360862251fb8fc450b2a06ff24bc5e261c2007f27014/cryptography-46.0.2-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:1e3b6428a3d56043bff0bb85b41c535734204e599c1c0977e1d0f261b02f3ad5", size = 4300019, upload-time = "2025-10-01T00:27:37.029Z" }, + { url = "https://files.pythonhosted.org/packages/9b/59/0be12c7fcc4c5e34fe2b665a75bc20958473047a30d095a7657c218fa9e8/cryptography-46.0.2-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:1a88634851d9b8de8bb53726f4300ab191d3b2f42595e2581a54b26aba71b7cc", size = 4950006, upload-time = "2025-10-01T00:27:40.272Z" }, + { url = "https://files.pythonhosted.org/packages/55/1d/42fda47b0111834b49e31590ae14fd020594d5e4dadd639bce89ad790fba/cryptography-46.0.2-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:be939b99d4e091eec9a2bcf41aaf8f351f312cd19ff74b5c83480f08a8a43e0b", size = 4482088, upload-time = "2025-10-01T00:27:42.668Z" }, + { url = "https://files.pythonhosted.org/packages/17/50/60f583f69aa1602c2bdc7022dae86a0d2b837276182f8c1ec825feb9b874/cryptography-46.0.2-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f13b040649bc18e7eb37936009b24fd31ca095a5c647be8bb6aaf1761142bd1", size = 4425599, upload-time = "2025-10-01T00:27:44.616Z" }, + { url = "https://files.pythonhosted.org/packages/d1/57/d8d4134cd27e6e94cf44adb3f3489f935bde85f3a5508e1b5b43095b917d/cryptography-46.0.2-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:9bdc25e4e01b261a8fda4e98618f1c9515febcecebc9566ddf4a70c63967043b", size = 4697458, upload-time = "2025-10-01T00:27:46.209Z" }, + { url = "https://files.pythonhosted.org/packages/d1/2b/531e37408573e1da33adfb4c58875013ee8ac7d548d1548967d94a0ae5c4/cryptography-46.0.2-cp311-abi3-win32.whl", hash = "sha256:8b9bf67b11ef9e28f4d78ff88b04ed0929fcd0e4f70bb0f704cfc32a5c6311ee", size = 3056077, upload-time = "2025-10-01T00:27:48.424Z" }, + { url = "https://files.pythonhosted.org/packages/a8/cd/2f83cafd47ed2dc5a3a9c783ff5d764e9e70d3a160e0df9a9dcd639414ce/cryptography-46.0.2-cp311-abi3-win_amd64.whl", hash = "sha256:758cfc7f4c38c5c5274b55a57ef1910107436f4ae842478c4989abbd24bd5acb", size = 3512585, upload-time = "2025-10-01T00:27:50.521Z" }, + { url = "https://files.pythonhosted.org/packages/00/36/676f94e10bfaa5c5b86c469ff46d3e0663c5dc89542f7afbadac241a3ee4/cryptography-46.0.2-cp311-abi3-win_arm64.whl", hash = "sha256:218abd64a2e72f8472c2102febb596793347a3e65fafbb4ad50519969da44470", size = 2927474, upload-time = "2025-10-01T00:27:52.91Z" }, + { url = "https://files.pythonhosted.org/packages/6f/cc/47fc6223a341f26d103cb6da2216805e08a37d3b52bee7f3b2aee8066f95/cryptography-46.0.2-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:bda55e8dbe8533937956c996beaa20266a8eca3570402e52ae52ed60de1faca8", size = 7198626, upload-time = "2025-10-01T00:27:54.8Z" }, + { url = "https://files.pythonhosted.org/packages/93/22/d66a8591207c28bbe4ac7afa25c4656dc19dc0db29a219f9809205639ede/cryptography-46.0.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e7155c0b004e936d381b15425273aee1cebc94f879c0ce82b0d7fecbf755d53a", size = 4287584, upload-time = "2025-10-01T00:27:57.018Z" }, + { url = "https://files.pythonhosted.org/packages/8c/3e/fac3ab6302b928e0398c269eddab5978e6c1c50b2b77bb5365ffa8633b37/cryptography-46.0.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a61c154cc5488272a6c4b86e8d5beff4639cdb173d75325ce464d723cda0052b", size = 4433796, upload-time = "2025-10-01T00:27:58.631Z" }, + { url = "https://files.pythonhosted.org/packages/7d/d8/24392e5d3c58e2d83f98fe5a2322ae343360ec5b5b93fe18bc52e47298f5/cryptography-46.0.2-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:9ec3f2e2173f36a9679d3b06d3d01121ab9b57c979de1e6a244b98d51fea1b20", size = 4292126, upload-time = "2025-10-01T00:28:00.643Z" }, + { url = "https://files.pythonhosted.org/packages/ed/38/3d9f9359b84c16c49a5a336ee8be8d322072a09fac17e737f3bb11f1ce64/cryptography-46.0.2-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2fafb6aa24e702bbf74de4cb23bfa2c3beb7ab7683a299062b69724c92e0fa73", size = 3993056, upload-time = "2025-10-01T00:28:02.8Z" }, + { url = "https://files.pythonhosted.org/packages/d6/a3/4c44fce0d49a4703cc94bfbe705adebf7ab36efe978053742957bc7ec324/cryptography-46.0.2-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:0c7ffe8c9b1fcbb07a26d7c9fa5e857c2fe80d72d7b9e0353dcf1d2180ae60ee", size = 4967604, upload-time = "2025-10-01T00:28:04.783Z" }, + { url = "https://files.pythonhosted.org/packages/eb/c2/49d73218747c8cac16bb8318a5513fde3129e06a018af3bc4dc722aa4a98/cryptography-46.0.2-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:5840f05518caa86b09d23f8b9405a7b6d5400085aa14a72a98fdf5cf1568c0d2", size = 4465367, upload-time = "2025-10-01T00:28:06.864Z" }, + { url = "https://files.pythonhosted.org/packages/1b/64/9afa7d2ee742f55ca6285a54386ed2778556a4ed8871571cb1c1bfd8db9e/cryptography-46.0.2-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:27c53b4f6a682a1b645fbf1cd5058c72cf2f5aeba7d74314c36838c7cbc06e0f", size = 4291678, upload-time = "2025-10-01T00:28:08.982Z" }, + { url = "https://files.pythonhosted.org/packages/50/48/1696d5ea9623a7b72ace87608f6899ca3c331709ac7ebf80740abb8ac673/cryptography-46.0.2-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:512c0250065e0a6b286b2db4bbcc2e67d810acd53eb81733e71314340366279e", size = 4931366, upload-time = "2025-10-01T00:28:10.74Z" }, + { url = "https://files.pythonhosted.org/packages/eb/3c/9dfc778401a334db3b24435ee0733dd005aefb74afe036e2d154547cb917/cryptography-46.0.2-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:07c0eb6657c0e9cca5891f4e35081dbf985c8131825e21d99b4f440a8f496f36", size = 4464738, upload-time = "2025-10-01T00:28:12.491Z" }, + { url = "https://files.pythonhosted.org/packages/dc/b1/abcde62072b8f3fd414e191a6238ce55a0050e9738090dc6cded24c12036/cryptography-46.0.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:48b983089378f50cba258f7f7aa28198c3f6e13e607eaf10472c26320332ca9a", size = 4419305, upload-time = "2025-10-01T00:28:14.145Z" }, + { url = "https://files.pythonhosted.org/packages/c7/1f/3d2228492f9391395ca34c677e8f2571fb5370fe13dc48c1014f8c509864/cryptography-46.0.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e6f6775eaaa08c0eec73e301f7592f4367ccde5e4e4df8e58320f2ebf161ea2c", size = 4681201, upload-time = "2025-10-01T00:28:15.951Z" }, + { url = "https://files.pythonhosted.org/packages/de/77/b687745804a93a55054f391528fcfc76c3d6bfd082ce9fb62c12f0d29fc1/cryptography-46.0.2-cp314-cp314t-win32.whl", hash = "sha256:e8633996579961f9b5a3008683344c2558d38420029d3c0bc7ff77c17949a4e1", size = 3022492, upload-time = "2025-10-01T00:28:17.643Z" }, + { url = "https://files.pythonhosted.org/packages/60/a5/8d498ef2996e583de0bef1dcc5e70186376f00883ae27bf2133f490adf21/cryptography-46.0.2-cp314-cp314t-win_amd64.whl", hash = "sha256:48c01988ecbb32979bb98731f5c2b2f79042a6c58cc9a319c8c2f9987c7f68f9", size = 3496215, upload-time = "2025-10-01T00:28:19.272Z" }, + { url = "https://files.pythonhosted.org/packages/56/db/ee67aaef459a2706bc302b15889a1a8126ebe66877bab1487ae6ad00f33d/cryptography-46.0.2-cp314-cp314t-win_arm64.whl", hash = "sha256:8e2ad4d1a5899b7caa3a450e33ee2734be7cc0689010964703a7c4bcc8dd4fd0", size = 2919255, upload-time = "2025-10-01T00:28:21.115Z" }, + { url = "https://files.pythonhosted.org/packages/d5/bb/fa95abcf147a1b0bb94d95f53fbb09da77b24c776c5d87d36f3d94521d2c/cryptography-46.0.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a08e7401a94c002e79dc3bc5231b6558cd4b2280ee525c4673f650a37e2c7685", size = 7248090, upload-time = "2025-10-01T00:28:22.846Z" }, + { url = "https://files.pythonhosted.org/packages/b7/66/f42071ce0e3ffbfa80a88feadb209c779fda92a23fbc1e14f74ebf72ef6b/cryptography-46.0.2-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d30bc11d35743bf4ddf76674a0a369ec8a21f87aaa09b0661b04c5f6c46e8d7b", size = 4293123, upload-time = "2025-10-01T00:28:25.072Z" }, + { url = "https://files.pythonhosted.org/packages/a8/5d/1fdbd2e5c1ba822828d250e5a966622ef00185e476d1cd2726b6dd135e53/cryptography-46.0.2-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bca3f0ce67e5a2a2cf524e86f44697c4323a86e0fd7ba857de1c30d52c11ede1", size = 4439524, upload-time = "2025-10-01T00:28:26.808Z" }, + { url = "https://files.pythonhosted.org/packages/c8/c1/5e4989a7d102d4306053770d60f978c7b6b1ea2ff8c06e0265e305b23516/cryptography-46.0.2-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ff798ad7a957a5021dcbab78dfff681f0cf15744d0e6af62bd6746984d9c9e9c", size = 4297264, upload-time = "2025-10-01T00:28:29.327Z" }, + { url = "https://files.pythonhosted.org/packages/28/78/b56f847d220cb1d6d6aef5a390e116ad603ce13a0945a3386a33abc80385/cryptography-46.0.2-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:cb5e8daac840e8879407acbe689a174f5ebaf344a062f8918e526824eb5d97af", size = 4011872, upload-time = "2025-10-01T00:28:31.479Z" }, + { url = "https://files.pythonhosted.org/packages/e1/80/2971f214b066b888944f7b57761bf709ee3f2cf805619a18b18cab9b263c/cryptography-46.0.2-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:3f37aa12b2d91e157827d90ce78f6180f0c02319468a0aea86ab5a9566da644b", size = 4978458, upload-time = "2025-10-01T00:28:33.267Z" }, + { url = "https://files.pythonhosted.org/packages/a5/84/0cb0a2beaa4f1cbe63ebec4e97cd7e0e9f835d0ba5ee143ed2523a1e0016/cryptography-46.0.2-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5e38f203160a48b93010b07493c15f2babb4e0f2319bbd001885adb3f3696d21", size = 4472195, upload-time = "2025-10-01T00:28:36.039Z" }, + { url = "https://files.pythonhosted.org/packages/30/8b/2b542ddbf78835c7cd67b6fa79e95560023481213a060b92352a61a10efe/cryptography-46.0.2-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:d19f5f48883752b5ab34cff9e2f7e4a7f216296f33714e77d1beb03d108632b6", size = 4296791, upload-time = "2025-10-01T00:28:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/78/12/9065b40201b4f4876e93b9b94d91feb18de9150d60bd842a16a21565007f/cryptography-46.0.2-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:04911b149eae142ccd8c9a68892a70c21613864afb47aba92d8c7ed9cc001023", size = 4939629, upload-time = "2025-10-01T00:28:39.654Z" }, + { url = "https://files.pythonhosted.org/packages/f6/9e/6507dc048c1b1530d372c483dfd34e7709fc542765015425f0442b08547f/cryptography-46.0.2-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:8b16c1ede6a937c291d41176934268e4ccac2c6521c69d3f5961c5a1e11e039e", size = 4471988, upload-time = "2025-10-01T00:28:41.822Z" }, + { url = "https://files.pythonhosted.org/packages/b1/86/d025584a5f7d5c5ec8d3633dbcdce83a0cd579f1141ceada7817a4c26934/cryptography-46.0.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:747b6f4a4a23d5a215aadd1d0b12233b4119c4313df83ab4137631d43672cc90", size = 4422989, upload-time = "2025-10-01T00:28:43.608Z" }, + { url = "https://files.pythonhosted.org/packages/4b/39/536370418b38a15a61bbe413006b79dfc3d2b4b0eafceb5581983f973c15/cryptography-46.0.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6b275e398ab3a7905e168c036aad54b5969d63d3d9099a0a66cc147a3cc983be", size = 4685578, upload-time = "2025-10-01T00:28:45.361Z" }, + { url = "https://files.pythonhosted.org/packages/15/52/ea7e2b1910f547baed566c866fbb86de2402e501a89ecb4871ea7f169a81/cryptography-46.0.2-cp38-abi3-win32.whl", hash = "sha256:0b507c8e033307e37af61cb9f7159b416173bdf5b41d11c4df2e499a1d8e007c", size = 3036711, upload-time = "2025-10-01T00:28:47.096Z" }, + { url = "https://files.pythonhosted.org/packages/71/9e/171f40f9c70a873e73c2efcdbe91e1d4b1777a03398fa1c4af3c56a2477a/cryptography-46.0.2-cp38-abi3-win_amd64.whl", hash = "sha256:f9b2dc7668418fb6f221e4bf701f716e05e8eadb4f1988a2487b11aedf8abe62", size = 3500007, upload-time = "2025-10-01T00:28:48.967Z" }, + { url = "https://files.pythonhosted.org/packages/3e/7c/15ad426257615f9be8caf7f97990cf3dcbb5b8dd7ed7e0db581a1c4759dd/cryptography-46.0.2-cp38-abi3-win_arm64.whl", hash = "sha256:91447f2b17e83c9e0c89f133119d83f94ce6e0fb55dd47da0a959316e6e9cfa1", size = 2918153, upload-time = "2025-10-01T00:28:51.003Z" }, +] + +[[package]] +name = "deprecated" +version = "1.2.18" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/98/97/06afe62762c9a8a86af0cfb7bfdab22a43ad17138b07af5b1a58442690a2/deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d", size = 2928744, upload-time = "2025-01-27T10:46:25.7Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6e/c6/ac0b6c1e2d138f1002bcf799d330bd6d85084fece321e662a14223794041/Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec", size = 9998, upload-time = "2025-01-27T10:46:09.186Z" }, +] + +[[package]] +name = "dill" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/80/630b4b88364e9a8c8c5797f4602d0f76ef820909ee32f0bacb9f90654042/dill-0.4.0.tar.gz", hash = "sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0", size = 186976, upload-time = "2025-04-16T00:41:48.867Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/3d/9373ad9c56321fdab5b41197068e1d8c25883b3fea29dd361f9b55116869/dill-0.4.0-py3-none-any.whl", hash = "sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049", size = 119668, upload-time = "2025-04-16T00:41:47.671Z" }, +] + +[[package]] +name = "docstring-parser" +version = "0.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/9d/c3b43da9515bd270df0f80548d9944e389870713cc1fe2b8fb35fe2bcefd/docstring_parser-0.17.0.tar.gz", hash = "sha256:583de4a309722b3315439bb31d64ba3eebada841f2e2cee23b99df001434c912", size = 27442, upload-time = "2025-07-21T07:35:01.868Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/55/e2/2537ebcff11c1ee1ff17d8d0b6f4db75873e3b0fb32c2d4a2ee31ecb310a/docstring_parser-0.17.0-py3-none-any.whl", hash = "sha256:cf2569abd23dce8099b300f9b4fa8191e9582dda731fd533daf54c4551658708", size = 36896, upload-time = "2025-07-21T07:35:00.684Z" }, +] + +[[package]] +name = "frozenlist" +version = "1.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/f5/c831fac6cc817d26fd54c7eaccd04ef7e0288806943f7cc5bbf69f3ac1f0/frozenlist-1.8.0.tar.gz", hash = "sha256:3ede829ed8d842f6cd48fc7081d7a41001a56f1f38603f9d49bf3020d59a31ad", size = 45875, upload-time = "2025-10-06T05:38:17.865Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2d/40/0832c31a37d60f60ed79e9dfb5a92e1e2af4f40a16a29abcc7992af9edff/frozenlist-1.8.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8d92f1a84bb12d9e56f818b3a746f3efba93c1b63c8387a73dde655e1e42282a", size = 85717, upload-time = "2025-10-06T05:36:27.341Z" }, + { url = "https://files.pythonhosted.org/packages/30/ba/b0b3de23f40bc55a7057bd38434e25c34fa48e17f20ee273bbde5e0650f3/frozenlist-1.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96153e77a591c8adc2ee805756c61f59fef4cf4073a9275ee86fe8cba41241f7", size = 49651, upload-time = "2025-10-06T05:36:28.855Z" }, + { url = "https://files.pythonhosted.org/packages/0c/ab/6e5080ee374f875296c4243c381bbdef97a9ac39c6e3ce1d5f7d42cb78d6/frozenlist-1.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f21f00a91358803399890ab167098c131ec2ddd5f8f5fd5fe9c9f2c6fcd91e40", size = 49417, upload-time = "2025-10-06T05:36:29.877Z" }, + { url = "https://files.pythonhosted.org/packages/d5/4e/e4691508f9477ce67da2015d8c00acd751e6287739123113a9fca6f1604e/frozenlist-1.8.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fb30f9626572a76dfe4293c7194a09fb1fe93ba94c7d4f720dfae3b646b45027", size = 234391, upload-time = "2025-10-06T05:36:31.301Z" }, + { url = "https://files.pythonhosted.org/packages/40/76/c202df58e3acdf12969a7895fd6f3bc016c642e6726aa63bd3025e0fc71c/frozenlist-1.8.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eaa352d7047a31d87dafcacbabe89df0aa506abb5b1b85a2fb91bc3faa02d822", size = 233048, upload-time = "2025-10-06T05:36:32.531Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c0/8746afb90f17b73ca5979c7a3958116e105ff796e718575175319b5bb4ce/frozenlist-1.8.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:03ae967b4e297f58f8c774c7eabcce57fe3c2434817d4385c50661845a058121", size = 226549, upload-time = "2025-10-06T05:36:33.706Z" }, + { url = "https://files.pythonhosted.org/packages/7e/eb/4c7eefc718ff72f9b6c4893291abaae5fbc0c82226a32dcd8ef4f7a5dbef/frozenlist-1.8.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f6292f1de555ffcc675941d65fffffb0a5bcd992905015f85d0592201793e0e5", size = 239833, upload-time = "2025-10-06T05:36:34.947Z" }, + { url = "https://files.pythonhosted.org/packages/c2/4e/e5c02187cf704224f8b21bee886f3d713ca379535f16893233b9d672ea71/frozenlist-1.8.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29548f9b5b5e3460ce7378144c3010363d8035cea44bc0bf02d57f5a685e084e", size = 245363, upload-time = "2025-10-06T05:36:36.534Z" }, + { url = "https://files.pythonhosted.org/packages/1f/96/cb85ec608464472e82ad37a17f844889c36100eed57bea094518bf270692/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ec3cc8c5d4084591b4237c0a272cc4f50a5b03396a47d9caaf76f5d7b38a4f11", size = 229314, upload-time = "2025-10-06T05:36:38.582Z" }, + { url = "https://files.pythonhosted.org/packages/5d/6f/4ae69c550e4cee66b57887daeebe006fe985917c01d0fff9caab9883f6d0/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:517279f58009d0b1f2e7c1b130b377a349405da3f7621ed6bfae50b10adf20c1", size = 243365, upload-time = "2025-10-06T05:36:40.152Z" }, + { url = "https://files.pythonhosted.org/packages/7a/58/afd56de246cf11780a40a2c28dc7cbabbf06337cc8ddb1c780a2d97e88d8/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:db1e72ede2d0d7ccb213f218df6a078a9c09a7de257c2fe8fcef16d5925230b1", size = 237763, upload-time = "2025-10-06T05:36:41.355Z" }, + { url = "https://files.pythonhosted.org/packages/cb/36/cdfaf6ed42e2644740d4a10452d8e97fa1c062e2a8006e4b09f1b5fd7d63/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b4dec9482a65c54a5044486847b8a66bf10c9cb4926d42927ec4e8fd5db7fed8", size = 240110, upload-time = "2025-10-06T05:36:42.716Z" }, + { url = "https://files.pythonhosted.org/packages/03/a8/9ea226fbefad669f11b52e864c55f0bd57d3c8d7eb07e9f2e9a0b39502e1/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:21900c48ae04d13d416f0e1e0c4d81f7931f73a9dfa0b7a8746fb2fe7dd970ed", size = 233717, upload-time = "2025-10-06T05:36:44.251Z" }, + { url = "https://files.pythonhosted.org/packages/1e/0b/1b5531611e83ba7d13ccc9988967ea1b51186af64c42b7a7af465dcc9568/frozenlist-1.8.0-cp313-cp313-win32.whl", hash = "sha256:8b7b94a067d1c504ee0b16def57ad5738701e4ba10cec90529f13fa03c833496", size = 39628, upload-time = "2025-10-06T05:36:45.423Z" }, + { url = "https://files.pythonhosted.org/packages/d8/cf/174c91dbc9cc49bc7b7aab74d8b734e974d1faa8f191c74af9b7e80848e6/frozenlist-1.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:878be833caa6a3821caf85eb39c5ba92d28e85df26d57afb06b35b2efd937231", size = 43882, upload-time = "2025-10-06T05:36:46.796Z" }, + { url = "https://files.pythonhosted.org/packages/c1/17/502cd212cbfa96eb1388614fe39a3fc9ab87dbbe042b66f97acb57474834/frozenlist-1.8.0-cp313-cp313-win_arm64.whl", hash = "sha256:44389d135b3ff43ba8cc89ff7f51f5a0bb6b63d829c8300f79a2fe4fe61bcc62", size = 39676, upload-time = "2025-10-06T05:36:47.8Z" }, + { url = "https://files.pythonhosted.org/packages/d2/5c/3bbfaa920dfab09e76946a5d2833a7cbdf7b9b4a91c714666ac4855b88b4/frozenlist-1.8.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:e25ac20a2ef37e91c1b39938b591457666a0fa835c7783c3a8f33ea42870db94", size = 89235, upload-time = "2025-10-06T05:36:48.78Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d6/f03961ef72166cec1687e84e8925838442b615bd0b8854b54923ce5b7b8a/frozenlist-1.8.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07cdca25a91a4386d2e76ad992916a85038a9b97561bf7a3fd12d5d9ce31870c", size = 50742, upload-time = "2025-10-06T05:36:49.837Z" }, + { url = "https://files.pythonhosted.org/packages/1e/bb/a6d12b7ba4c3337667d0e421f7181c82dda448ce4e7ad7ecd249a16fa806/frozenlist-1.8.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4e0c11f2cc6717e0a741f84a527c52616140741cd812a50422f83dc31749fb52", size = 51725, upload-time = "2025-10-06T05:36:50.851Z" }, + { url = "https://files.pythonhosted.org/packages/bc/71/d1fed0ffe2c2ccd70b43714c6cab0f4188f09f8a67a7914a6b46ee30f274/frozenlist-1.8.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b3210649ee28062ea6099cfda39e147fa1bc039583c8ee4481cb7811e2448c51", size = 284533, upload-time = "2025-10-06T05:36:51.898Z" }, + { url = "https://files.pythonhosted.org/packages/c9/1f/fb1685a7b009d89f9bf78a42d94461bc06581f6e718c39344754a5d9bada/frozenlist-1.8.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:581ef5194c48035a7de2aefc72ac6539823bb71508189e5de01d60c9dcd5fa65", size = 292506, upload-time = "2025-10-06T05:36:53.101Z" }, + { url = "https://files.pythonhosted.org/packages/e6/3b/b991fe1612703f7e0d05c0cf734c1b77aaf7c7d321df4572e8d36e7048c8/frozenlist-1.8.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3ef2d026f16a2b1866e1d86fc4e1291e1ed8a387b2c333809419a2f8b3a77b82", size = 274161, upload-time = "2025-10-06T05:36:54.309Z" }, + { url = "https://files.pythonhosted.org/packages/ca/ec/c5c618767bcdf66e88945ec0157d7f6c4a1322f1473392319b7a2501ded7/frozenlist-1.8.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5500ef82073f599ac84d888e3a8c1f77ac831183244bfd7f11eaa0289fb30714", size = 294676, upload-time = "2025-10-06T05:36:55.566Z" }, + { url = "https://files.pythonhosted.org/packages/7c/ce/3934758637d8f8a88d11f0585d6495ef54b2044ed6ec84492a91fa3b27aa/frozenlist-1.8.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:50066c3997d0091c411a66e710f4e11752251e6d2d73d70d8d5d4c76442a199d", size = 300638, upload-time = "2025-10-06T05:36:56.758Z" }, + { url = "https://files.pythonhosted.org/packages/fc/4f/a7e4d0d467298f42de4b41cbc7ddaf19d3cfeabaf9ff97c20c6c7ee409f9/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:5c1c8e78426e59b3f8005e9b19f6ff46e5845895adbde20ece9218319eca6506", size = 283067, upload-time = "2025-10-06T05:36:57.965Z" }, + { url = "https://files.pythonhosted.org/packages/dc/48/c7b163063d55a83772b268e6d1affb960771b0e203b632cfe09522d67ea5/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:eefdba20de0d938cec6a89bd4d70f346a03108a19b9df4248d3cf0d88f1b0f51", size = 292101, upload-time = "2025-10-06T05:36:59.237Z" }, + { url = "https://files.pythonhosted.org/packages/9f/d0/2366d3c4ecdc2fd391e0afa6e11500bfba0ea772764d631bbf82f0136c9d/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:cf253e0e1c3ceb4aaff6df637ce033ff6535fb8c70a764a8f46aafd3d6ab798e", size = 289901, upload-time = "2025-10-06T05:37:00.811Z" }, + { url = "https://files.pythonhosted.org/packages/b8/94/daff920e82c1b70e3618a2ac39fbc01ae3e2ff6124e80739ce5d71c9b920/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:032efa2674356903cd0261c4317a561a6850f3ac864a63fc1583147fb05a79b0", size = 289395, upload-time = "2025-10-06T05:37:02.115Z" }, + { url = "https://files.pythonhosted.org/packages/e3/20/bba307ab4235a09fdcd3cc5508dbabd17c4634a1af4b96e0f69bfe551ebd/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6da155091429aeba16851ecb10a9104a108bcd32f6c1642867eadaee401c1c41", size = 283659, upload-time = "2025-10-06T05:37:03.711Z" }, + { url = "https://files.pythonhosted.org/packages/fd/00/04ca1c3a7a124b6de4f8a9a17cc2fcad138b4608e7a3fc5877804b8715d7/frozenlist-1.8.0-cp313-cp313t-win32.whl", hash = "sha256:0f96534f8bfebc1a394209427d0f8a63d343c9779cda6fc25e8e121b5fd8555b", size = 43492, upload-time = "2025-10-06T05:37:04.915Z" }, + { url = "https://files.pythonhosted.org/packages/59/5e/c69f733a86a94ab10f68e496dc6b7e8bc078ebb415281d5698313e3af3a1/frozenlist-1.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5d63a068f978fc69421fb0e6eb91a9603187527c86b7cd3f534a5b77a592b888", size = 48034, upload-time = "2025-10-06T05:37:06.343Z" }, + { url = "https://files.pythonhosted.org/packages/16/6c/be9d79775d8abe79b05fa6d23da99ad6e7763a1d080fbae7290b286093fd/frozenlist-1.8.0-cp313-cp313t-win_arm64.whl", hash = "sha256:bf0a7e10b077bf5fb9380ad3ae8ce20ef919a6ad93b4552896419ac7e1d8e042", size = 41749, upload-time = "2025-10-06T05:37:07.431Z" }, + { url = "https://files.pythonhosted.org/packages/f1/c8/85da824b7e7b9b6e7f7705b2ecaf9591ba6f79c1177f324c2735e41d36a2/frozenlist-1.8.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cee686f1f4cadeb2136007ddedd0aaf928ab95216e7691c63e50a8ec066336d0", size = 86127, upload-time = "2025-10-06T05:37:08.438Z" }, + { url = "https://files.pythonhosted.org/packages/8e/e8/a1185e236ec66c20afd72399522f142c3724c785789255202d27ae992818/frozenlist-1.8.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:119fb2a1bd47307e899c2fac7f28e85b9a543864df47aa7ec9d3c1b4545f096f", size = 49698, upload-time = "2025-10-06T05:37:09.48Z" }, + { url = "https://files.pythonhosted.org/packages/a1/93/72b1736d68f03fda5fdf0f2180fb6caaae3894f1b854d006ac61ecc727ee/frozenlist-1.8.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4970ece02dbc8c3a92fcc5228e36a3e933a01a999f7094ff7c23fbd2beeaa67c", size = 49749, upload-time = "2025-10-06T05:37:10.569Z" }, + { url = "https://files.pythonhosted.org/packages/a7/b2/fabede9fafd976b991e9f1b9c8c873ed86f202889b864756f240ce6dd855/frozenlist-1.8.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:cba69cb73723c3f329622e34bdbf5ce1f80c21c290ff04256cff1cd3c2036ed2", size = 231298, upload-time = "2025-10-06T05:37:11.993Z" }, + { url = "https://files.pythonhosted.org/packages/3a/3b/d9b1e0b0eed36e70477ffb8360c49c85c8ca8ef9700a4e6711f39a6e8b45/frozenlist-1.8.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:778a11b15673f6f1df23d9586f83c4846c471a8af693a22e066508b77d201ec8", size = 232015, upload-time = "2025-10-06T05:37:13.194Z" }, + { url = "https://files.pythonhosted.org/packages/dc/94/be719d2766c1138148564a3960fc2c06eb688da592bdc25adcf856101be7/frozenlist-1.8.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0325024fe97f94c41c08872db482cf8ac4800d80e79222c6b0b7b162d5b13686", size = 225038, upload-time = "2025-10-06T05:37:14.577Z" }, + { url = "https://files.pythonhosted.org/packages/e4/09/6712b6c5465f083f52f50cf74167b92d4ea2f50e46a9eea0523d658454ae/frozenlist-1.8.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:97260ff46b207a82a7567b581ab4190bd4dfa09f4db8a8b49d1a958f6aa4940e", size = 240130, upload-time = "2025-10-06T05:37:15.781Z" }, + { url = "https://files.pythonhosted.org/packages/f8/d4/cd065cdcf21550b54f3ce6a22e143ac9e4836ca42a0de1022da8498eac89/frozenlist-1.8.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:54b2077180eb7f83dd52c40b2750d0a9f175e06a42e3213ce047219de902717a", size = 242845, upload-time = "2025-10-06T05:37:17.037Z" }, + { url = "https://files.pythonhosted.org/packages/62/c3/f57a5c8c70cd1ead3d5d5f776f89d33110b1addae0ab010ad774d9a44fb9/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2f05983daecab868a31e1da44462873306d3cbfd76d1f0b5b69c473d21dbb128", size = 229131, upload-time = "2025-10-06T05:37:18.221Z" }, + { url = "https://files.pythonhosted.org/packages/6c/52/232476fe9cb64f0742f3fde2b7d26c1dac18b6d62071c74d4ded55e0ef94/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:33f48f51a446114bc5d251fb2954ab0164d5be02ad3382abcbfe07e2531d650f", size = 240542, upload-time = "2025-10-06T05:37:19.771Z" }, + { url = "https://files.pythonhosted.org/packages/5f/85/07bf3f5d0fb5414aee5f47d33c6f5c77bfe49aac680bfece33d4fdf6a246/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:154e55ec0655291b5dd1b8731c637ecdb50975a2ae70c606d100750a540082f7", size = 237308, upload-time = "2025-10-06T05:37:20.969Z" }, + { url = "https://files.pythonhosted.org/packages/11/99/ae3a33d5befd41ac0ca2cc7fd3aa707c9c324de2e89db0e0f45db9a64c26/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:4314debad13beb564b708b4a496020e5306c7333fa9a3ab90374169a20ffab30", size = 238210, upload-time = "2025-10-06T05:37:22.252Z" }, + { url = "https://files.pythonhosted.org/packages/b2/60/b1d2da22f4970e7a155f0adde9b1435712ece01b3cd45ba63702aea33938/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:073f8bf8becba60aa931eb3bc420b217bb7d5b8f4750e6f8b3be7f3da85d38b7", size = 231972, upload-time = "2025-10-06T05:37:23.5Z" }, + { url = "https://files.pythonhosted.org/packages/3f/ab/945b2f32de889993b9c9133216c068b7fcf257d8595a0ac420ac8677cab0/frozenlist-1.8.0-cp314-cp314-win32.whl", hash = "sha256:bac9c42ba2ac65ddc115d930c78d24ab8d4f465fd3fc473cdedfccadb9429806", size = 40536, upload-time = "2025-10-06T05:37:25.581Z" }, + { url = "https://files.pythonhosted.org/packages/59/ad/9caa9b9c836d9ad6f067157a531ac48b7d36499f5036d4141ce78c230b1b/frozenlist-1.8.0-cp314-cp314-win_amd64.whl", hash = "sha256:3e0761f4d1a44f1d1a47996511752cf3dcec5bbdd9cc2b4fe595caf97754b7a0", size = 44330, upload-time = "2025-10-06T05:37:26.928Z" }, + { url = "https://files.pythonhosted.org/packages/82/13/e6950121764f2676f43534c555249f57030150260aee9dcf7d64efda11dd/frozenlist-1.8.0-cp314-cp314-win_arm64.whl", hash = "sha256:d1eaff1d00c7751b7c6662e9c5ba6eb2c17a2306ba5e2a37f24ddf3cc953402b", size = 40627, upload-time = "2025-10-06T05:37:28.075Z" }, + { url = "https://files.pythonhosted.org/packages/c0/c7/43200656ecc4e02d3f8bc248df68256cd9572b3f0017f0a0c4e93440ae23/frozenlist-1.8.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:d3bb933317c52d7ea5004a1c442eef86f426886fba134ef8cf4226ea6ee1821d", size = 89238, upload-time = "2025-10-06T05:37:29.373Z" }, + { url = "https://files.pythonhosted.org/packages/d1/29/55c5f0689b9c0fb765055629f472c0de484dcaf0acee2f7707266ae3583c/frozenlist-1.8.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:8009897cdef112072f93a0efdce29cd819e717fd2f649ee3016efd3cd885a7ed", size = 50738, upload-time = "2025-10-06T05:37:30.792Z" }, + { url = "https://files.pythonhosted.org/packages/ba/7d/b7282a445956506fa11da8c2db7d276adcbf2b17d8bb8407a47685263f90/frozenlist-1.8.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2c5dcbbc55383e5883246d11fd179782a9d07a986c40f49abe89ddf865913930", size = 51739, upload-time = "2025-10-06T05:37:32.127Z" }, + { url = "https://files.pythonhosted.org/packages/62/1c/3d8622e60d0b767a5510d1d3cf21065b9db874696a51ea6d7a43180a259c/frozenlist-1.8.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:39ecbc32f1390387d2aa4f5a995e465e9e2f79ba3adcac92d68e3e0afae6657c", size = 284186, upload-time = "2025-10-06T05:37:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/2d/14/aa36d5f85a89679a85a1d44cd7a6657e0b1c75f61e7cad987b203d2daca8/frozenlist-1.8.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92db2bf818d5cc8d9c1f1fc56b897662e24ea5adb36ad1f1d82875bd64e03c24", size = 292196, upload-time = "2025-10-06T05:37:36.107Z" }, + { url = "https://files.pythonhosted.org/packages/05/23/6bde59eb55abd407d34f77d39a5126fb7b4f109a3f611d3929f14b700c66/frozenlist-1.8.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2dc43a022e555de94c3b68a4ef0b11c4f747d12c024a520c7101709a2144fb37", size = 273830, upload-time = "2025-10-06T05:37:37.663Z" }, + { url = "https://files.pythonhosted.org/packages/d2/3f/22cff331bfad7a8afa616289000ba793347fcd7bc275f3b28ecea2a27909/frozenlist-1.8.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cb89a7f2de3602cfed448095bab3f178399646ab7c61454315089787df07733a", size = 294289, upload-time = "2025-10-06T05:37:39.261Z" }, + { url = "https://files.pythonhosted.org/packages/a4/89/5b057c799de4838b6c69aa82b79705f2027615e01be996d2486a69ca99c4/frozenlist-1.8.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:33139dc858c580ea50e7e60a1b0ea003efa1fd42e6ec7fdbad78fff65fad2fd2", size = 300318, upload-time = "2025-10-06T05:37:43.213Z" }, + { url = "https://files.pythonhosted.org/packages/30/de/2c22ab3eb2a8af6d69dc799e48455813bab3690c760de58e1bf43b36da3e/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:168c0969a329b416119507ba30b9ea13688fafffac1b7822802537569a1cb0ef", size = 282814, upload-time = "2025-10-06T05:37:45.337Z" }, + { url = "https://files.pythonhosted.org/packages/59/f7/970141a6a8dbd7f556d94977858cfb36fa9b66e0892c6dd780d2219d8cd8/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:28bd570e8e189d7f7b001966435f9dac6718324b5be2990ac496cf1ea9ddb7fe", size = 291762, upload-time = "2025-10-06T05:37:46.657Z" }, + { url = "https://files.pythonhosted.org/packages/c1/15/ca1adae83a719f82df9116d66f5bb28bb95557b3951903d39135620ef157/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:b2a095d45c5d46e5e79ba1e5b9cb787f541a8dee0433836cea4b96a2c439dcd8", size = 289470, upload-time = "2025-10-06T05:37:47.946Z" }, + { url = "https://files.pythonhosted.org/packages/ac/83/dca6dc53bf657d371fbc88ddeb21b79891e747189c5de990b9dfff2ccba1/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:eab8145831a0d56ec9c4139b6c3e594c7a83c2c8be25d5bcf2d86136a532287a", size = 289042, upload-time = "2025-10-06T05:37:49.499Z" }, + { url = "https://files.pythonhosted.org/packages/96/52/abddd34ca99be142f354398700536c5bd315880ed0a213812bc491cff5e4/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:974b28cf63cc99dfb2188d8d222bc6843656188164848c4f679e63dae4b0708e", size = 283148, upload-time = "2025-10-06T05:37:50.745Z" }, + { url = "https://files.pythonhosted.org/packages/af/d3/76bd4ed4317e7119c2b7f57c3f6934aba26d277acc6309f873341640e21f/frozenlist-1.8.0-cp314-cp314t-win32.whl", hash = "sha256:342c97bf697ac5480c0a7ec73cd700ecfa5a8a40ac923bd035484616efecc2df", size = 44676, upload-time = "2025-10-06T05:37:52.222Z" }, + { url = "https://files.pythonhosted.org/packages/89/76/c615883b7b521ead2944bb3480398cbb07e12b7b4e4d073d3752eb721558/frozenlist-1.8.0-cp314-cp314t-win_amd64.whl", hash = "sha256:06be8f67f39c8b1dc671f5d83aaefd3358ae5cdcf8314552c57e7ed3e6475bdd", size = 49451, upload-time = "2025-10-06T05:37:53.425Z" }, + { url = "https://files.pythonhosted.org/packages/e0/a3/5982da14e113d07b325230f95060e2169f5311b1017ea8af2a29b374c289/frozenlist-1.8.0-cp314-cp314t-win_arm64.whl", hash = "sha256:102e6314ca4da683dca92e3b1355490fed5f313b768500084fbe6371fddfdb79", size = 42507, upload-time = "2025-10-06T05:37:54.513Z" }, + { url = "https://files.pythonhosted.org/packages/9a/9a/e35b4a917281c0b8419d4207f4334c8e8c5dbf4f3f5f9ada73958d937dcc/frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d", size = 13409, upload-time = "2025-10-06T05:38:16.721Z" }, +] + +[[package]] +name = "gaab-strands-agent" +version = "4.0.0" +source = { editable = "." } +dependencies = [ + { name = "aws-opentelemetry-distro" }, + { name = "bedrock-agentcore" }, + { name = "boto3" }, + { name = "gaab-strands-common" }, + { name = "pip" }, + { name = "pydantic" }, + { name = "setuptools" }, + { name = "strands-agents", extra = ["otel"] }, + { name = "strands-agents-tools" }, + { name = "wheel" }, +] + +[package.optional-dependencies] +dev = [ + { name = "black" }, + { name = "isort" }, + { name = "mypy" }, + { name = "pytest" }, + { name = "pytest-cov" }, +] +test = [ + { name = "moto" }, + { name = "pytest" }, + { name = "pytest-cov" }, + { name = "pytest-mock" }, +] + +[package.dev-dependencies] +dev = [ + { name = "black" }, + { name = "isort" }, + { name = "moto" }, + { name = "mypy" }, + { name = "pytest" }, + { name = "pytest-cov" }, + { name = "pytest-mock" }, +] + +[package.metadata] +requires-dist = [ + { name = "aws-opentelemetry-distro", specifier = ">=0.12.1" }, + { name = "bedrock-agentcore", specifier = ">=0.1.5" }, + { name = "black", marker = "extra == 'dev'", specifier = ">=24.0.0" }, + { name = "boto3", specifier = ">=1.35.0" }, + { name = "gaab-strands-common", editable = "../gaab-strands-common" }, + { name = "isort", marker = "extra == 'dev'", specifier = ">=5.12.0" }, + { name = "moto", marker = "extra == 'test'", specifier = ">=5.0.0" }, + { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.8.0" }, + { name = "pip", specifier = ">=25.0" }, + { name = "pydantic", specifier = ">=2.0.0" }, + { name = "pytest", marker = "extra == 'dev'", specifier = ">=8.0.0" }, + { name = "pytest", marker = "extra == 'test'", specifier = ">=8.0.0" }, + { name = "pytest-cov", marker = "extra == 'dev'", specifier = ">=5.0.0" }, + { name = "pytest-cov", marker = "extra == 'test'", specifier = ">=5.0.0" }, + { name = "pytest-mock", marker = "extra == 'test'", specifier = ">=3.12.0" }, + { name = "setuptools", specifier = ">=70.0.0" }, + { name = "strands-agents", extras = ["otel"], specifier = ">=1.10.0" }, + { name = "strands-agents-tools", specifier = ">=0.2.9" }, + { name = "wheel", specifier = ">=0.42.0" }, +] +provides-extras = ["dev", "test"] + +[package.metadata.requires-dev] +dev = [ + { name = "black", specifier = ">=24.0.0" }, + { name = "isort", specifier = ">=5.12.0" }, + { name = "moto", specifier = ">=5.0.0" }, + { name = "mypy", specifier = ">=1.8.0" }, + { name = "pytest", specifier = ">=8.0.0" }, + { name = "pytest-cov", specifier = ">=5.0.0" }, + { name = "pytest-mock", specifier = ">=3.12.0" }, +] + +[[package]] +name = "gaab-strands-common" +version = "0.1.0" +source = { editable = "../gaab-strands-common" } +dependencies = [ + { name = "bedrock-agentcore" }, + { name = "boto3" }, + { name = "pydantic" }, + { name = "strands-agents" }, +] + +[package.metadata] +requires-dist = [ + { name = "bedrock-agentcore", specifier = ">=0.1.0" }, + { name = "boto3", specifier = ">=1.34.0" }, + { name = "pydantic", specifier = ">=2.0.0" }, + { name = "strands-agents", specifier = ">=1.10.0" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "black", specifier = ">=24.0.0" }, + { name = "isort", specifier = ">=5.13.0" }, + { name = "moto", specifier = ">=5.1.0" }, + { name = "pytest", specifier = ">=8.0.0" }, + { name = "pytest-asyncio", specifier = ">=0.23.0" }, + { name = "pytest-cov", specifier = ">=4.1.0" }, +] + +[[package]] +name = "googleapis-common-protos" +version = "1.70.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/39/24/33db22342cf4a2ea27c9955e6713140fedd51e8b141b5ce5260897020f1a/googleapis_common_protos-1.70.0.tar.gz", hash = "sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257", size = 145903, upload-time = "2025-04-14T10:17:02.924Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/f1/62a193f0227cf15a920390abe675f386dec35f7ae3ffe6da582d3ade42c7/googleapis_common_protos-1.70.0-py3-none-any.whl", hash = "sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8", size = 294530, upload-time = "2025-04-14T10:17:01.271Z" }, +] + +[[package]] +name = "grpcio" +version = "1.75.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9d/f7/8963848164c7604efb3a3e6ee457fdb3a469653e19002bd24742473254f8/grpcio-1.75.1.tar.gz", hash = "sha256:3e81d89ece99b9ace23a6916880baca613c03a799925afb2857887efa8b1b3d2", size = 12731327, upload-time = "2025-09-26T09:03:36.887Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/74/bac4ab9f7722164afdf263ae31ba97b8174c667153510322a5eba4194c32/grpcio-1.75.1-cp313-cp313-linux_armv7l.whl", hash = "sha256:3bed22e750d91d53d9e31e0af35a7b0b51367e974e14a4ff229db5b207647884", size = 5672779, upload-time = "2025-09-26T09:02:19.11Z" }, + { url = "https://files.pythonhosted.org/packages/a6/52/d0483cfa667cddaa294e3ab88fd2c2a6e9dc1a1928c0e5911e2e54bd5b50/grpcio-1.75.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:5b8f381eadcd6ecaa143a21e9e80a26424c76a0a9b3d546febe6648f3a36a5ac", size = 11470623, upload-time = "2025-09-26T09:02:22.117Z" }, + { url = "https://files.pythonhosted.org/packages/cf/e4/d1954dce2972e32384db6a30273275e8c8ea5a44b80347f9055589333b3f/grpcio-1.75.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5bf4001d3293e3414d0cf99ff9b1139106e57c3a66dfff0c5f60b2a6286ec133", size = 6248838, upload-time = "2025-09-26T09:02:26.426Z" }, + { url = "https://files.pythonhosted.org/packages/06/43/073363bf63826ba8077c335d797a8d026f129dc0912b69c42feaf8f0cd26/grpcio-1.75.1-cp313-cp313-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:9f82ff474103e26351dacfe8d50214e7c9322960d8d07ba7fa1d05ff981c8b2d", size = 6922663, upload-time = "2025-09-26T09:02:28.724Z" }, + { url = "https://files.pythonhosted.org/packages/c2/6f/076ac0df6c359117676cacfa8a377e2abcecec6a6599a15a672d331f6680/grpcio-1.75.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0ee119f4f88d9f75414217823d21d75bfe0e6ed40135b0cbbfc6376bc9f7757d", size = 6436149, upload-time = "2025-09-26T09:02:30.971Z" }, + { url = "https://files.pythonhosted.org/packages/6b/27/1d08824f1d573fcb1fa35ede40d6020e68a04391709939e1c6f4193b445f/grpcio-1.75.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:664eecc3abe6d916fa6cf8dd6b778e62fb264a70f3430a3180995bf2da935446", size = 7067989, upload-time = "2025-09-26T09:02:33.233Z" }, + { url = "https://files.pythonhosted.org/packages/c6/98/98594cf97b8713feb06a8cb04eeef60b4757e3e2fb91aa0d9161da769843/grpcio-1.75.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c32193fa08b2fbebf08fe08e84f8a0aad32d87c3ad42999c65e9449871b1c66e", size = 8010717, upload-time = "2025-09-26T09:02:36.011Z" }, + { url = "https://files.pythonhosted.org/packages/8c/7e/bb80b1bba03c12158f9254762cdf5cced4a9bc2e8ed51ed335915a5a06ef/grpcio-1.75.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5cebe13088b9254f6e615bcf1da9131d46cfa4e88039454aca9cb65f639bd3bc", size = 7463822, upload-time = "2025-09-26T09:02:38.26Z" }, + { url = "https://files.pythonhosted.org/packages/23/1c/1ea57fdc06927eb5640f6750c697f596f26183573069189eeaf6ef86ba2d/grpcio-1.75.1-cp313-cp313-win32.whl", hash = "sha256:4b4c678e7ed50f8ae8b8dbad15a865ee73ce12668b6aaf411bf3258b5bc3f970", size = 3938490, upload-time = "2025-09-26T09:02:40.268Z" }, + { url = "https://files.pythonhosted.org/packages/4b/24/fbb8ff1ccadfbf78ad2401c41aceaf02b0d782c084530d8871ddd69a2d49/grpcio-1.75.1-cp313-cp313-win_amd64.whl", hash = "sha256:5573f51e3f296a1bcf71e7a690c092845fb223072120f4bdb7a5b48e111def66", size = 4642538, upload-time = "2025-09-26T09:02:42.519Z" }, + { url = "https://files.pythonhosted.org/packages/f2/1b/9a0a5cecd24302b9fdbcd55d15ed6267e5f3d5b898ff9ac8cbe17ee76129/grpcio-1.75.1-cp314-cp314-linux_armv7l.whl", hash = "sha256:c05da79068dd96723793bffc8d0e64c45f316248417515f28d22204d9dae51c7", size = 5673319, upload-time = "2025-09-26T09:02:44.742Z" }, + { url = "https://files.pythonhosted.org/packages/c6/ec/9d6959429a83fbf5df8549c591a8a52bb313976f6646b79852c4884e3225/grpcio-1.75.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:06373a94fd16ec287116a825161dca179a0402d0c60674ceeec8c9fba344fe66", size = 11480347, upload-time = "2025-09-26T09:02:47.539Z" }, + { url = "https://files.pythonhosted.org/packages/09/7a/26da709e42c4565c3d7bf999a9569da96243ce34a8271a968dee810a7cf1/grpcio-1.75.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4484f4b7287bdaa7a5b3980f3c7224c3c622669405d20f69549f5fb956ad0421", size = 6254706, upload-time = "2025-09-26T09:02:50.4Z" }, + { url = "https://files.pythonhosted.org/packages/f1/08/dcb26a319d3725f199c97e671d904d84ee5680de57d74c566a991cfab632/grpcio-1.75.1-cp314-cp314-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:2720c239c1180eee69f7883c1d4c83fc1a495a2535b5fa322887c70bf02b16e8", size = 6922501, upload-time = "2025-09-26T09:02:52.711Z" }, + { url = "https://files.pythonhosted.org/packages/78/66/044d412c98408a5e23cb348845979a2d17a2e2b6c3c34c1ec91b920f49d0/grpcio-1.75.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:07a554fa31c668cf0e7a188678ceeca3cb8fead29bbe455352e712ec33ca701c", size = 6437492, upload-time = "2025-09-26T09:02:55.542Z" }, + { url = "https://files.pythonhosted.org/packages/4e/9d/5e3e362815152aa1afd8b26ea613effa005962f9da0eec6e0e4527e7a7d1/grpcio-1.75.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:3e71a2105210366bfc398eef7f57a664df99194f3520edb88b9c3a7e46ee0d64", size = 7081061, upload-time = "2025-09-26T09:02:58.261Z" }, + { url = "https://files.pythonhosted.org/packages/1e/1a/46615682a19e100f46e31ddba9ebc297c5a5ab9ddb47b35443ffadb8776c/grpcio-1.75.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:8679aa8a5b67976776d3c6b0521e99d1c34db8a312a12bcfd78a7085cb9b604e", size = 8010849, upload-time = "2025-09-26T09:03:00.548Z" }, + { url = "https://files.pythonhosted.org/packages/67/8e/3204b94ac30b0f675ab1c06540ab5578660dc8b690db71854d3116f20d00/grpcio-1.75.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:aad1c774f4ebf0696a7f148a56d39a3432550612597331792528895258966dc0", size = 7464478, upload-time = "2025-09-26T09:03:03.096Z" }, + { url = "https://files.pythonhosted.org/packages/b7/97/2d90652b213863b2cf466d9c1260ca7e7b67a16780431b3eb1d0420e3d5b/grpcio-1.75.1-cp314-cp314-win32.whl", hash = "sha256:62ce42d9994446b307649cb2a23335fa8e927f7ab2cbf5fcb844d6acb4d85f9c", size = 4012672, upload-time = "2025-09-26T09:03:05.477Z" }, + { url = "https://files.pythonhosted.org/packages/f9/df/e2e6e9fc1c985cd1a59e6996a05647c720fe8a03b92f5ec2d60d366c531e/grpcio-1.75.1-cp314-cp314-win_amd64.whl", hash = "sha256:f86e92275710bea3000cb79feca1762dc0ad3b27830dd1a74e82ab321d4ee464", size = 4772475, upload-time = "2025-09-26T09:03:07.661Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "httpx-sse" +version = "0.4.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/4c/751061ffa58615a32c31b2d82e8482be8dd4a89154f003147acee90f2be9/httpx_sse-0.4.3.tar.gz", hash = "sha256:9b1ed0127459a66014aec3c56bebd93da3c1bc8bb6618c8082039a44889a755d", size = 15943, upload-time = "2025-10-10T21:48:22.271Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/fd/6668e5aec43ab844de6fc74927e155a3b37bf40d7c3790e49fc0406b6578/httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc", size = 8960, upload-time = "2025-10-10T21:48:21.158Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/33/08/c1395a292bb23fd03bdf572a1357c5a733d3eecbab877641ceacab23db6e/importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580", size = 55767, upload-time = "2025-01-20T22:21:30.429Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/9d/0fb148dc4d6fa4a7dd1d8378168d9b4cd8d4560a6fbf6f0121c5fc34eb68/importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e", size = 26971, upload-time = "2025-01-20T22:21:29.177Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, +] + +[[package]] +name = "isort" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/63/53/4f3c058e3bace40282876f9b553343376ee687f3c35a525dc79dbd450f88/isort-7.0.0.tar.gz", hash = "sha256:5513527951aadb3ac4292a41a16cbc50dd1642432f5e8c20057d414bdafb4187", size = 805049, upload-time = "2025-10-11T13:30:59.107Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/ed/e3705d6d02b4f7aea715a353c8ce193efd0b5db13e204df895d38734c244/isort-7.0.0-py3-none-any.whl", hash = "sha256:1bcabac8bc3c36c7fb7b98a76c8abb18e0f841a3ba81decac7691008592499c1", size = 94672, upload-time = "2025-10-11T13:30:57.665Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/2a/e867e8531cf3e36b41201936b7fa7ba7b5702dbef42922193f05c8976cd6/jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe", size = 25843, upload-time = "2022-06-17T18:00:12.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256, upload-time = "2022-06-17T18:00:10.251Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, +] + +[[package]] +name = "markdown-it-py" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, +] + +[[package]] +name = "markdownify" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/1b/6f2697b51eaca81f08852fd2734745af15718fea10222a1d40f8a239c4ea/markdownify-1.2.0.tar.gz", hash = "sha256:f6c367c54eb24ee953921804dfe6d6575c5e5b42c643955e7242034435de634c", size = 18771, upload-time = "2025-08-09T17:44:15.302Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/e2/7af643acb4cae0741dffffaa7f3f7c9e7ab4046724543ba1777c401d821c/markdownify-1.2.0-py3-none-any.whl", hash = "sha256:48e150a1c4993d4d50f282f725c0111bd9eb25645d41fa2f543708fd44161351", size = 15561, upload-time = "2025-08-09T17:44:14.074Z" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313, upload-time = "2025-09-27T18:37:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/2f/907b9c7bbba283e68f20259574b13d005c121a0fa4c175f9bed27c4597ff/markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795", size = 11622, upload-time = "2025-09-27T18:36:41.777Z" }, + { url = "https://files.pythonhosted.org/packages/9c/d9/5f7756922cdd676869eca1c4e3c0cd0df60ed30199ffd775e319089cb3ed/markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219", size = 12029, upload-time = "2025-09-27T18:36:43.257Z" }, + { url = "https://files.pythonhosted.org/packages/00/07/575a68c754943058c78f30db02ee03a64b3c638586fba6a6dd56830b30a3/markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6", size = 24374, upload-time = "2025-09-27T18:36:44.508Z" }, + { url = "https://files.pythonhosted.org/packages/a9/21/9b05698b46f218fc0e118e1f8168395c65c8a2c750ae2bab54fc4bd4e0e8/markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676", size = 22980, upload-time = "2025-09-27T18:36:45.385Z" }, + { url = "https://files.pythonhosted.org/packages/7f/71/544260864f893f18b6827315b988c146b559391e6e7e8f7252839b1b846a/markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9", size = 21990, upload-time = "2025-09-27T18:36:46.916Z" }, + { url = "https://files.pythonhosted.org/packages/c2/28/b50fc2f74d1ad761af2f5dcce7492648b983d00a65b8c0e0cb457c82ebbe/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1", size = 23784, upload-time = "2025-09-27T18:36:47.884Z" }, + { url = "https://files.pythonhosted.org/packages/ed/76/104b2aa106a208da8b17a2fb72e033a5a9d7073c68f7e508b94916ed47a9/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc", size = 21588, upload-time = "2025-09-27T18:36:48.82Z" }, + { url = "https://files.pythonhosted.org/packages/b5/99/16a5eb2d140087ebd97180d95249b00a03aa87e29cc224056274f2e45fd6/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12", size = 23041, upload-time = "2025-09-27T18:36:49.797Z" }, + { url = "https://files.pythonhosted.org/packages/19/bc/e7140ed90c5d61d77cea142eed9f9c303f4c4806f60a1044c13e3f1471d0/markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed", size = 14543, upload-time = "2025-09-27T18:36:51.584Z" }, + { url = "https://files.pythonhosted.org/packages/05/73/c4abe620b841b6b791f2edc248f556900667a5a1cf023a6646967ae98335/markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5", size = 15113, upload-time = "2025-09-27T18:36:52.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/3a/fa34a0f7cfef23cf9500d68cb7c32dd64ffd58a12b09225fb03dd37d5b80/markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485", size = 13911, upload-time = "2025-09-27T18:36:53.513Z" }, + { url = "https://files.pythonhosted.org/packages/e4/d7/e05cd7efe43a88a17a37b3ae96e79a19e846f3f456fe79c57ca61356ef01/markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73", size = 11658, upload-time = "2025-09-27T18:36:54.819Z" }, + { url = "https://files.pythonhosted.org/packages/99/9e/e412117548182ce2148bdeacdda3bb494260c0b0184360fe0d56389b523b/markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37", size = 12066, upload-time = "2025-09-27T18:36:55.714Z" }, + { url = "https://files.pythonhosted.org/packages/bc/e6/fa0ffcda717ef64a5108eaa7b4f5ed28d56122c9a6d70ab8b72f9f715c80/markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19", size = 25639, upload-time = "2025-09-27T18:36:56.908Z" }, + { url = "https://files.pythonhosted.org/packages/96/ec/2102e881fe9d25fc16cb4b25d5f5cde50970967ffa5dddafdb771237062d/markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025", size = 23569, upload-time = "2025-09-27T18:36:57.913Z" }, + { url = "https://files.pythonhosted.org/packages/4b/30/6f2fce1f1f205fc9323255b216ca8a235b15860c34b6798f810f05828e32/markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6", size = 23284, upload-time = "2025-09-27T18:36:58.833Z" }, + { url = "https://files.pythonhosted.org/packages/58/47/4a0ccea4ab9f5dcb6f79c0236d954acb382202721e704223a8aafa38b5c8/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f", size = 24801, upload-time = "2025-09-27T18:36:59.739Z" }, + { url = "https://files.pythonhosted.org/packages/6a/70/3780e9b72180b6fecb83a4814d84c3bf4b4ae4bf0b19c27196104149734c/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb", size = 22769, upload-time = "2025-09-27T18:37:00.719Z" }, + { url = "https://files.pythonhosted.org/packages/98/c5/c03c7f4125180fc215220c035beac6b9cb684bc7a067c84fc69414d315f5/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009", size = 23642, upload-time = "2025-09-27T18:37:01.673Z" }, + { url = "https://files.pythonhosted.org/packages/80/d6/2d1b89f6ca4bff1036499b1e29a1d02d282259f3681540e16563f27ebc23/markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354", size = 14612, upload-time = "2025-09-27T18:37:02.639Z" }, + { url = "https://files.pythonhosted.org/packages/2b/98/e48a4bfba0a0ffcf9925fe2d69240bfaa19c6f7507b8cd09c70684a53c1e/markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218", size = 15200, upload-time = "2025-09-27T18:37:03.582Z" }, + { url = "https://files.pythonhosted.org/packages/0e/72/e3cc540f351f316e9ed0f092757459afbc595824ca724cbc5a5d4263713f/markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287", size = 13973, upload-time = "2025-09-27T18:37:04.929Z" }, + { url = "https://files.pythonhosted.org/packages/33/8a/8e42d4838cd89b7dde187011e97fe6c3af66d8c044997d2183fbd6d31352/markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe", size = 11619, upload-time = "2025-09-27T18:37:06.342Z" }, + { url = "https://files.pythonhosted.org/packages/b5/64/7660f8a4a8e53c924d0fa05dc3a55c9cee10bbd82b11c5afb27d44b096ce/markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026", size = 12029, upload-time = "2025-09-27T18:37:07.213Z" }, + { url = "https://files.pythonhosted.org/packages/da/ef/e648bfd021127bef5fa12e1720ffed0c6cbb8310c8d9bea7266337ff06de/markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737", size = 24408, upload-time = "2025-09-27T18:37:09.572Z" }, + { url = "https://files.pythonhosted.org/packages/41/3c/a36c2450754618e62008bf7435ccb0f88053e07592e6028a34776213d877/markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97", size = 23005, upload-time = "2025-09-27T18:37:10.58Z" }, + { url = "https://files.pythonhosted.org/packages/bc/20/b7fdf89a8456b099837cd1dc21974632a02a999ec9bf7ca3e490aacd98e7/markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d", size = 22048, upload-time = "2025-09-27T18:37:11.547Z" }, + { url = "https://files.pythonhosted.org/packages/9a/a7/591f592afdc734f47db08a75793a55d7fbcc6902a723ae4cfbab61010cc5/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda", size = 23821, upload-time = "2025-09-27T18:37:12.48Z" }, + { url = "https://files.pythonhosted.org/packages/7d/33/45b24e4f44195b26521bc6f1a82197118f74df348556594bd2262bda1038/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf", size = 21606, upload-time = "2025-09-27T18:37:13.485Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0e/53dfaca23a69fbfbbf17a4b64072090e70717344c52eaaaa9c5ddff1e5f0/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe", size = 23043, upload-time = "2025-09-27T18:37:14.408Z" }, + { url = "https://files.pythonhosted.org/packages/46/11/f333a06fc16236d5238bfe74daccbca41459dcd8d1fa952e8fbd5dccfb70/markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9", size = 14747, upload-time = "2025-09-27T18:37:15.36Z" }, + { url = "https://files.pythonhosted.org/packages/28/52/182836104b33b444e400b14f797212f720cbc9ed6ba34c800639d154e821/markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581", size = 15341, upload-time = "2025-09-27T18:37:16.496Z" }, + { url = "https://files.pythonhosted.org/packages/6f/18/acf23e91bd94fd7b3031558b1f013adfa21a8e407a3fdb32745538730382/markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4", size = 14073, upload-time = "2025-09-27T18:37:17.476Z" }, + { url = "https://files.pythonhosted.org/packages/3c/f0/57689aa4076e1b43b15fdfa646b04653969d50cf30c32a102762be2485da/markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab", size = 11661, upload-time = "2025-09-27T18:37:18.453Z" }, + { url = "https://files.pythonhosted.org/packages/89/c3/2e67a7ca217c6912985ec766c6393b636fb0c2344443ff9d91404dc4c79f/markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175", size = 12069, upload-time = "2025-09-27T18:37:19.332Z" }, + { url = "https://files.pythonhosted.org/packages/f0/00/be561dce4e6ca66b15276e184ce4b8aec61fe83662cce2f7d72bd3249d28/markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634", size = 25670, upload-time = "2025-09-27T18:37:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/50/09/c419f6f5a92e5fadde27efd190eca90f05e1261b10dbd8cbcb39cd8ea1dc/markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50", size = 23598, upload-time = "2025-09-27T18:37:21.177Z" }, + { url = "https://files.pythonhosted.org/packages/22/44/a0681611106e0b2921b3033fc19bc53323e0b50bc70cffdd19f7d679bb66/markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e", size = 23261, upload-time = "2025-09-27T18:37:22.167Z" }, + { url = "https://files.pythonhosted.org/packages/5f/57/1b0b3f100259dc9fffe780cfb60d4be71375510e435efec3d116b6436d43/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5", size = 24835, upload-time = "2025-09-27T18:37:23.296Z" }, + { url = "https://files.pythonhosted.org/packages/26/6a/4bf6d0c97c4920f1597cc14dd720705eca0bf7c787aebc6bb4d1bead5388/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523", size = 22733, upload-time = "2025-09-27T18:37:24.237Z" }, + { url = "https://files.pythonhosted.org/packages/14/c7/ca723101509b518797fedc2fdf79ba57f886b4aca8a7d31857ba3ee8281f/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc", size = 23672, upload-time = "2025-09-27T18:37:25.271Z" }, + { url = "https://files.pythonhosted.org/packages/fb/df/5bd7a48c256faecd1d36edc13133e51397e41b73bb77e1a69deab746ebac/markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d", size = 14819, upload-time = "2025-09-27T18:37:26.285Z" }, + { url = "https://files.pythonhosted.org/packages/1a/8a/0402ba61a2f16038b48b39bccca271134be00c5c9f0f623208399333c448/markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9", size = 15426, upload-time = "2025-09-27T18:37:27.316Z" }, + { url = "https://files.pythonhosted.org/packages/70/bc/6f1c2f612465f5fa89b95bead1f44dcb607670fd42891d8fdcd5d039f4f4/markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa", size = 14146, upload-time = "2025-09-27T18:37:28.327Z" }, +] + +[[package]] +name = "mcp" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "httpx" }, + { name = "httpx-sse" }, + { name = "jsonschema" }, + { name = "pydantic" }, + { name = "pydantic-settings" }, + { name = "python-multipart" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "sse-starlette" }, + { name = "starlette" }, + { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/79/5724a540df19e192e8606c543cdcf162de8eb435077520cca150f7365ec0/mcp-1.17.0.tar.gz", hash = "sha256:1b57fabf3203240ccc48e39859faf3ae1ccb0b571ff798bbedae800c73c6df90", size = 477951, upload-time = "2025-10-10T12:16:44.519Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/72/3751feae343a5ad07959df713907b5c3fbaed269d697a14b0c449080cf2e/mcp-1.17.0-py3-none-any.whl", hash = "sha256:0660ef275cada7a545af154db3082f176cf1d2681d5e35ae63e014faf0a35d40", size = 167737, upload-time = "2025-10-10T12:16:42.863Z" }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, +] + +[[package]] +name = "moto" +version = "5.1.14" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "boto3" }, + { name = "botocore" }, + { name = "cryptography" }, + { name = "jinja2" }, + { name = "python-dateutil" }, + { name = "requests" }, + { name = "responses" }, + { name = "werkzeug" }, + { name = "xmltodict" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/d9/ec94955a1b14ef45ccbda81f2256b30bf1f21ae5c5739fca14130bb1f048/moto-5.1.14.tar.gz", hash = "sha256:450690abb0b152fea7f93e497ac2172f15d8a838b15f22b514db801a6b857ae4", size = 7264025, upload-time = "2025-10-05T13:32:38.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/a0/4c5955187853536c7d337709074a5f3ef391654a32a3379096b2d16bfd9b/moto-5.1.14-py3-none-any.whl", hash = "sha256:b9767848953beaf6650f1fd91615a3bcef84d93bd00603fa64dae38c656548e8", size = 5384022, upload-time = "2025-10-05T13:32:35.763Z" }, +] + +[[package]] +name = "mpmath" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" }, +] + +[[package]] +name = "multidict" +version = "6.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/80/1e/5492c365f222f907de1039b91f922b93fa4f764c713ee858d235495d8f50/multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5", size = 101834, upload-time = "2025-10-06T14:52:30.657Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/86/33272a544eeb36d66e4d9a920602d1a2f57d4ebea4ef3cdfe5a912574c95/multidict-6.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bee7c0588aa0076ce77c0ea5d19a68d76ad81fcd9fe8501003b9a24f9d4000f6", size = 76135, upload-time = "2025-10-06T14:49:54.26Z" }, + { url = "https://files.pythonhosted.org/packages/91/1c/eb97db117a1ebe46d457a3d235a7b9d2e6dcab174f42d1b67663dd9e5371/multidict-6.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7ef6b61cad77091056ce0e7ce69814ef72afacb150b7ac6a3e9470def2198159", size = 45117, upload-time = "2025-10-06T14:49:55.82Z" }, + { url = "https://files.pythonhosted.org/packages/f1/d8/6c3442322e41fb1dd4de8bd67bfd11cd72352ac131f6368315617de752f1/multidict-6.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c0359b1ec12b1d6849c59f9d319610b7f20ef990a6d454ab151aa0e3b9f78ca", size = 43472, upload-time = "2025-10-06T14:49:57.048Z" }, + { url = "https://files.pythonhosted.org/packages/75/3f/e2639e80325af0b6c6febdf8e57cc07043ff15f57fa1ef808f4ccb5ac4cd/multidict-6.7.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cd240939f71c64bd658f186330603aac1a9a81bf6273f523fca63673cb7378a8", size = 249342, upload-time = "2025-10-06T14:49:58.368Z" }, + { url = "https://files.pythonhosted.org/packages/5d/cc/84e0585f805cbeaa9cbdaa95f9a3d6aed745b9d25700623ac89a6ecff400/multidict-6.7.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a60a4d75718a5efa473ebd5ab685786ba0c67b8381f781d1be14da49f1a2dc60", size = 257082, upload-time = "2025-10-06T14:49:59.89Z" }, + { url = "https://files.pythonhosted.org/packages/b0/9c/ac851c107c92289acbbf5cfb485694084690c1b17e555f44952c26ddc5bd/multidict-6.7.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53a42d364f323275126aff81fb67c5ca1b7a04fda0546245730a55c8c5f24bc4", size = 240704, upload-time = "2025-10-06T14:50:01.485Z" }, + { url = "https://files.pythonhosted.org/packages/50/cc/5f93e99427248c09da95b62d64b25748a5f5c98c7c2ab09825a1d6af0e15/multidict-6.7.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3b29b980d0ddbecb736735ee5bef69bb2ddca56eff603c86f3f29a1128299b4f", size = 266355, upload-time = "2025-10-06T14:50:02.955Z" }, + { url = "https://files.pythonhosted.org/packages/ec/0c/2ec1d883ceb79c6f7f6d7ad90c919c898f5d1c6ea96d322751420211e072/multidict-6.7.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f8a93b1c0ed2d04b97a5e9336fd2d33371b9a6e29ab7dd6503d63407c20ffbaf", size = 267259, upload-time = "2025-10-06T14:50:04.446Z" }, + { url = "https://files.pythonhosted.org/packages/c6/2d/f0b184fa88d6630aa267680bdb8623fb69cb0d024b8c6f0d23f9a0f406d3/multidict-6.7.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ff96e8815eecacc6645da76c413eb3b3d34cfca256c70b16b286a687d013c32", size = 254903, upload-time = "2025-10-06T14:50:05.98Z" }, + { url = "https://files.pythonhosted.org/packages/06/c9/11ea263ad0df7dfabcad404feb3c0dd40b131bc7f232d5537f2fb1356951/multidict-6.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7516c579652f6a6be0e266aec0acd0db80829ca305c3d771ed898538804c2036", size = 252365, upload-time = "2025-10-06T14:50:07.511Z" }, + { url = "https://files.pythonhosted.org/packages/41/88/d714b86ee2c17d6e09850c70c9d310abac3d808ab49dfa16b43aba9d53fd/multidict-6.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:040f393368e63fb0f3330e70c26bfd336656bed925e5cbe17c9da839a6ab13ec", size = 250062, upload-time = "2025-10-06T14:50:09.074Z" }, + { url = "https://files.pythonhosted.org/packages/15/fe/ad407bb9e818c2b31383f6131ca19ea7e35ce93cf1310fce69f12e89de75/multidict-6.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b3bc26a951007b1057a1c543af845f1c7e3e71cc240ed1ace7bf4484aa99196e", size = 249683, upload-time = "2025-10-06T14:50:10.714Z" }, + { url = "https://files.pythonhosted.org/packages/8c/a4/a89abdb0229e533fb925e7c6e5c40201c2873efebc9abaf14046a4536ee6/multidict-6.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7b022717c748dd1992a83e219587aabe45980d88969f01b316e78683e6285f64", size = 261254, upload-time = "2025-10-06T14:50:12.28Z" }, + { url = "https://files.pythonhosted.org/packages/8d/aa/0e2b27bd88b40a4fb8dc53dd74eecac70edaa4c1dd0707eb2164da3675b3/multidict-6.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:9600082733859f00d79dee64effc7aef1beb26adb297416a4ad2116fd61374bd", size = 257967, upload-time = "2025-10-06T14:50:14.16Z" }, + { url = "https://files.pythonhosted.org/packages/d0/8e/0c67b7120d5d5f6d874ed85a085f9dc770a7f9d8813e80f44a9fec820bb7/multidict-6.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:94218fcec4d72bc61df51c198d098ce2b378e0ccbac41ddbed5ef44092913288", size = 250085, upload-time = "2025-10-06T14:50:15.639Z" }, + { url = "https://files.pythonhosted.org/packages/ba/55/b73e1d624ea4b8fd4dd07a3bb70f6e4c7c6c5d9d640a41c6ffe5cdbd2a55/multidict-6.7.0-cp313-cp313-win32.whl", hash = "sha256:a37bd74c3fa9d00be2d7b8eca074dc56bd8077ddd2917a839bd989612671ed17", size = 41713, upload-time = "2025-10-06T14:50:17.066Z" }, + { url = "https://files.pythonhosted.org/packages/32/31/75c59e7d3b4205075b4c183fa4ca398a2daf2303ddf616b04ae6ef55cffe/multidict-6.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:30d193c6cc6d559db42b6bcec8a5d395d34d60c9877a0b71ecd7c204fcf15390", size = 45915, upload-time = "2025-10-06T14:50:18.264Z" }, + { url = "https://files.pythonhosted.org/packages/31/2a/8987831e811f1184c22bc2e45844934385363ee61c0a2dcfa8f71b87e608/multidict-6.7.0-cp313-cp313-win_arm64.whl", hash = "sha256:ea3334cabe4d41b7ccd01e4d349828678794edbc2d3ae97fc162a3312095092e", size = 43077, upload-time = "2025-10-06T14:50:19.853Z" }, + { url = "https://files.pythonhosted.org/packages/e8/68/7b3a5170a382a340147337b300b9eb25a9ddb573bcdfff19c0fa3f31ffba/multidict-6.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:ad9ce259f50abd98a1ca0aa6e490b58c316a0fce0617f609723e40804add2c00", size = 83114, upload-time = "2025-10-06T14:50:21.223Z" }, + { url = "https://files.pythonhosted.org/packages/55/5c/3fa2d07c84df4e302060f555bbf539310980362236ad49f50eeb0a1c1eb9/multidict-6.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07f5594ac6d084cbb5de2df218d78baf55ef150b91f0ff8a21cc7a2e3a5a58eb", size = 48442, upload-time = "2025-10-06T14:50:22.871Z" }, + { url = "https://files.pythonhosted.org/packages/fc/56/67212d33239797f9bd91962bb899d72bb0f4c35a8652dcdb8ed049bef878/multidict-6.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0591b48acf279821a579282444814a2d8d0af624ae0bc600aa4d1b920b6e924b", size = 46885, upload-time = "2025-10-06T14:50:24.258Z" }, + { url = "https://files.pythonhosted.org/packages/46/d1/908f896224290350721597a61a69cd19b89ad8ee0ae1f38b3f5cd12ea2ac/multidict-6.7.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:749a72584761531d2b9467cfbdfd29487ee21124c304c4b6cb760d8777b27f9c", size = 242588, upload-time = "2025-10-06T14:50:25.716Z" }, + { url = "https://files.pythonhosted.org/packages/ab/67/8604288bbd68680eee0ab568fdcb56171d8b23a01bcd5cb0c8fedf6e5d99/multidict-6.7.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b4c3d199f953acd5b446bf7c0de1fe25d94e09e79086f8dc2f48a11a129cdf1", size = 249966, upload-time = "2025-10-06T14:50:28.192Z" }, + { url = "https://files.pythonhosted.org/packages/20/33/9228d76339f1ba51e3efef7da3ebd91964d3006217aae13211653193c3ff/multidict-6.7.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9fb0211dfc3b51efea2f349ec92c114d7754dd62c01f81c3e32b765b70c45c9b", size = 228618, upload-time = "2025-10-06T14:50:29.82Z" }, + { url = "https://files.pythonhosted.org/packages/f8/2d/25d9b566d10cab1c42b3b9e5b11ef79c9111eaf4463b8c257a3bd89e0ead/multidict-6.7.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a027ec240fe73a8d6281872690b988eed307cd7d91b23998ff35ff577ca688b5", size = 257539, upload-time = "2025-10-06T14:50:31.731Z" }, + { url = "https://files.pythonhosted.org/packages/b6/b1/8d1a965e6637fc33de3c0d8f414485c2b7e4af00f42cab3d84e7b955c222/multidict-6.7.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1d964afecdf3a8288789df2f5751dc0a8261138c3768d9af117ed384e538fad", size = 256345, upload-time = "2025-10-06T14:50:33.26Z" }, + { url = "https://files.pythonhosted.org/packages/ba/0c/06b5a8adbdeedada6f4fb8d8f193d44a347223b11939b42953eeb6530b6b/multidict-6.7.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:caf53b15b1b7df9fbd0709aa01409000a2b4dd03a5f6f5cc548183c7c8f8b63c", size = 247934, upload-time = "2025-10-06T14:50:34.808Z" }, + { url = "https://files.pythonhosted.org/packages/8f/31/b2491b5fe167ca044c6eb4b8f2c9f3b8a00b24c432c365358eadac5d7625/multidict-6.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:654030da3197d927f05a536a66186070e98765aa5142794c9904555d3a9d8fb5", size = 245243, upload-time = "2025-10-06T14:50:36.436Z" }, + { url = "https://files.pythonhosted.org/packages/61/1a/982913957cb90406c8c94f53001abd9eafc271cb3e70ff6371590bec478e/multidict-6.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:2090d3718829d1e484706a2f525e50c892237b2bf9b17a79b059cb98cddc2f10", size = 235878, upload-time = "2025-10-06T14:50:37.953Z" }, + { url = "https://files.pythonhosted.org/packages/be/c0/21435d804c1a1cf7a2608593f4d19bca5bcbd7a81a70b253fdd1c12af9c0/multidict-6.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2d2cfeec3f6f45651b3d408c4acec0ebf3daa9bc8a112a084206f5db5d05b754", size = 243452, upload-time = "2025-10-06T14:50:39.574Z" }, + { url = "https://files.pythonhosted.org/packages/54/0a/4349d540d4a883863191be6eb9a928846d4ec0ea007d3dcd36323bb058ac/multidict-6.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:4ef089f985b8c194d341eb2c24ae6e7408c9a0e2e5658699c92f497437d88c3c", size = 252312, upload-time = "2025-10-06T14:50:41.612Z" }, + { url = "https://files.pythonhosted.org/packages/26/64/d5416038dbda1488daf16b676e4dbfd9674dde10a0cc8f4fc2b502d8125d/multidict-6.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e93a0617cd16998784bf4414c7e40f17a35d2350e5c6f0bd900d3a8e02bd3762", size = 246935, upload-time = "2025-10-06T14:50:43.972Z" }, + { url = "https://files.pythonhosted.org/packages/9f/8c/8290c50d14e49f35e0bd4abc25e1bc7711149ca9588ab7d04f886cdf03d9/multidict-6.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f0feece2ef8ebc42ed9e2e8c78fc4aa3cf455733b507c09ef7406364c94376c6", size = 243385, upload-time = "2025-10-06T14:50:45.648Z" }, + { url = "https://files.pythonhosted.org/packages/ef/a0/f83ae75e42d694b3fbad3e047670e511c138be747bc713cf1b10d5096416/multidict-6.7.0-cp313-cp313t-win32.whl", hash = "sha256:19a1d55338ec1be74ef62440ca9e04a2f001a04d0cc49a4983dc320ff0f3212d", size = 47777, upload-time = "2025-10-06T14:50:47.154Z" }, + { url = "https://files.pythonhosted.org/packages/dc/80/9b174a92814a3830b7357307a792300f42c9e94664b01dee8e457551fa66/multidict-6.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3da4fb467498df97e986af166b12d01f05d2e04f978a9c1c680ea1988e0bc4b6", size = 53104, upload-time = "2025-10-06T14:50:48.851Z" }, + { url = "https://files.pythonhosted.org/packages/cc/28/04baeaf0428d95bb7a7bea0e691ba2f31394338ba424fb0679a9ed0f4c09/multidict-6.7.0-cp313-cp313t-win_arm64.whl", hash = "sha256:b4121773c49a0776461f4a904cdf6264c88e42218aaa8407e803ca8025872792", size = 45503, upload-time = "2025-10-06T14:50:50.16Z" }, + { url = "https://files.pythonhosted.org/packages/e2/b1/3da6934455dd4b261d4c72f897e3a5728eba81db59959f3a639245891baa/multidict-6.7.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3bab1e4aff7adaa34410f93b1f8e57c4b36b9af0426a76003f441ee1d3c7e842", size = 75128, upload-time = "2025-10-06T14:50:51.92Z" }, + { url = "https://files.pythonhosted.org/packages/14/2c/f069cab5b51d175a1a2cb4ccdf7a2c2dabd58aa5bd933fa036a8d15e2404/multidict-6.7.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b8512bac933afc3e45fb2b18da8e59b78d4f408399a960339598374d4ae3b56b", size = 44410, upload-time = "2025-10-06T14:50:53.275Z" }, + { url = "https://files.pythonhosted.org/packages/42/e2/64bb41266427af6642b6b128e8774ed84c11b80a90702c13ac0a86bb10cc/multidict-6.7.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:79dcf9e477bc65414ebfea98ffd013cb39552b5ecd62908752e0e413d6d06e38", size = 43205, upload-time = "2025-10-06T14:50:54.911Z" }, + { url = "https://files.pythonhosted.org/packages/02/68/6b086fef8a3f1a8541b9236c594f0c9245617c29841f2e0395d979485cde/multidict-6.7.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:31bae522710064b5cbeddaf2e9f32b1abab70ac6ac91d42572502299e9953128", size = 245084, upload-time = "2025-10-06T14:50:56.369Z" }, + { url = "https://files.pythonhosted.org/packages/15/ee/f524093232007cd7a75c1d132df70f235cfd590a7c9eaccd7ff422ef4ae8/multidict-6.7.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a0df7ff02397bb63e2fd22af2c87dfa39e8c7f12947bc524dbdc528282c7e34", size = 252667, upload-time = "2025-10-06T14:50:57.991Z" }, + { url = "https://files.pythonhosted.org/packages/02/a5/eeb3f43ab45878f1895118c3ef157a480db58ede3f248e29b5354139c2c9/multidict-6.7.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7a0222514e8e4c514660e182d5156a415c13ef0aabbd71682fc714e327b95e99", size = 233590, upload-time = "2025-10-06T14:50:59.589Z" }, + { url = "https://files.pythonhosted.org/packages/6a/1e/76d02f8270b97269d7e3dbd45644b1785bda457b474315f8cf999525a193/multidict-6.7.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2397ab4daaf2698eb51a76721e98db21ce4f52339e535725de03ea962b5a3202", size = 264112, upload-time = "2025-10-06T14:51:01.183Z" }, + { url = "https://files.pythonhosted.org/packages/76/0b/c28a70ecb58963847c2a8efe334904cd254812b10e535aefb3bcce513918/multidict-6.7.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8891681594162635948a636c9fe0ff21746aeb3dd5463f6e25d9bea3a8a39ca1", size = 261194, upload-time = "2025-10-06T14:51:02.794Z" }, + { url = "https://files.pythonhosted.org/packages/b4/63/2ab26e4209773223159b83aa32721b4021ffb08102f8ac7d689c943fded1/multidict-6.7.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18706cc31dbf402a7945916dd5cddf160251b6dab8a2c5f3d6d5a55949f676b3", size = 248510, upload-time = "2025-10-06T14:51:04.724Z" }, + { url = "https://files.pythonhosted.org/packages/93/cd/06c1fa8282af1d1c46fd55c10a7930af652afdce43999501d4d68664170c/multidict-6.7.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f844a1bbf1d207dd311a56f383f7eda2d0e134921d45751842d8235e7778965d", size = 248395, upload-time = "2025-10-06T14:51:06.306Z" }, + { url = "https://files.pythonhosted.org/packages/99/ac/82cb419dd6b04ccf9e7e61befc00c77614fc8134362488b553402ecd55ce/multidict-6.7.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:d4393e3581e84e5645506923816b9cc81f5609a778c7e7534054091acc64d1c6", size = 239520, upload-time = "2025-10-06T14:51:08.091Z" }, + { url = "https://files.pythonhosted.org/packages/fa/f3/a0f9bf09493421bd8716a362e0cd1d244f5a6550f5beffdd6b47e885b331/multidict-6.7.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:fbd18dc82d7bf274b37aa48d664534330af744e03bccf696d6f4c6042e7d19e7", size = 245479, upload-time = "2025-10-06T14:51:10.365Z" }, + { url = "https://files.pythonhosted.org/packages/8d/01/476d38fc73a212843f43c852b0eee266b6971f0e28329c2184a8df90c376/multidict-6.7.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:b6234e14f9314731ec45c42fc4554b88133ad53a09092cc48a88e771c125dadb", size = 258903, upload-time = "2025-10-06T14:51:12.466Z" }, + { url = "https://files.pythonhosted.org/packages/49/6d/23faeb0868adba613b817d0e69c5f15531b24d462af8012c4f6de4fa8dc3/multidict-6.7.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:08d4379f9744d8f78d98c8673c06e202ffa88296f009c71bbafe8a6bf847d01f", size = 252333, upload-time = "2025-10-06T14:51:14.48Z" }, + { url = "https://files.pythonhosted.org/packages/1e/cc/48d02ac22b30fa247f7dad82866e4b1015431092f4ba6ebc7e77596e0b18/multidict-6.7.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9fe04da3f79387f450fd0061d4dd2e45a72749d31bf634aecc9e27f24fdc4b3f", size = 243411, upload-time = "2025-10-06T14:51:16.072Z" }, + { url = "https://files.pythonhosted.org/packages/4a/03/29a8bf5a18abf1fe34535c88adbdfa88c9fb869b5a3b120692c64abe8284/multidict-6.7.0-cp314-cp314-win32.whl", hash = "sha256:fbafe31d191dfa7c4c51f7a6149c9fb7e914dcf9ffead27dcfd9f1ae382b3885", size = 40940, upload-time = "2025-10-06T14:51:17.544Z" }, + { url = "https://files.pythonhosted.org/packages/82/16/7ed27b680791b939de138f906d5cf2b4657b0d45ca6f5dd6236fdddafb1a/multidict-6.7.0-cp314-cp314-win_amd64.whl", hash = "sha256:2f67396ec0310764b9222a1728ced1ab638f61aadc6226f17a71dd9324f9a99c", size = 45087, upload-time = "2025-10-06T14:51:18.875Z" }, + { url = "https://files.pythonhosted.org/packages/cd/3c/e3e62eb35a1950292fe39315d3c89941e30a9d07d5d2df42965ab041da43/multidict-6.7.0-cp314-cp314-win_arm64.whl", hash = "sha256:ba672b26069957ee369cfa7fc180dde1fc6f176eaf1e6beaf61fbebbd3d9c000", size = 42368, upload-time = "2025-10-06T14:51:20.225Z" }, + { url = "https://files.pythonhosted.org/packages/8b/40/cd499bd0dbc5f1136726db3153042a735fffd0d77268e2ee20d5f33c010f/multidict-6.7.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:c1dcc7524066fa918c6a27d61444d4ee7900ec635779058571f70d042d86ed63", size = 82326, upload-time = "2025-10-06T14:51:21.588Z" }, + { url = "https://files.pythonhosted.org/packages/13/8a/18e031eca251c8df76daf0288e6790561806e439f5ce99a170b4af30676b/multidict-6.7.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:27e0b36c2d388dc7b6ced3406671b401e84ad7eb0656b8f3a2f46ed0ce483718", size = 48065, upload-time = "2025-10-06T14:51:22.93Z" }, + { url = "https://files.pythonhosted.org/packages/40/71/5e6701277470a87d234e433fb0a3a7deaf3bcd92566e421e7ae9776319de/multidict-6.7.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2a7baa46a22e77f0988e3b23d4ede5513ebec1929e34ee9495be535662c0dfe2", size = 46475, upload-time = "2025-10-06T14:51:24.352Z" }, + { url = "https://files.pythonhosted.org/packages/fe/6a/bab00cbab6d9cfb57afe1663318f72ec28289ea03fd4e8236bb78429893a/multidict-6.7.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7bf77f54997a9166a2f5675d1201520586439424c2511723a7312bdb4bcc034e", size = 239324, upload-time = "2025-10-06T14:51:25.822Z" }, + { url = "https://files.pythonhosted.org/packages/2a/5f/8de95f629fc22a7769ade8b41028e3e5a822c1f8904f618d175945a81ad3/multidict-6.7.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e011555abada53f1578d63389610ac8a5400fc70ce71156b0aa30d326f1a5064", size = 246877, upload-time = "2025-10-06T14:51:27.604Z" }, + { url = "https://files.pythonhosted.org/packages/23/b4/38881a960458f25b89e9f4a4fdcb02ac101cfa710190db6e5528841e67de/multidict-6.7.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:28b37063541b897fd6a318007373930a75ca6d6ac7c940dbe14731ffdd8d498e", size = 225824, upload-time = "2025-10-06T14:51:29.664Z" }, + { url = "https://files.pythonhosted.org/packages/1e/39/6566210c83f8a261575f18e7144736059f0c460b362e96e9cf797a24b8e7/multidict-6.7.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:05047ada7a2fde2631a0ed706f1fd68b169a681dfe5e4cf0f8e4cb6618bbc2cd", size = 253558, upload-time = "2025-10-06T14:51:31.684Z" }, + { url = "https://files.pythonhosted.org/packages/00/a3/67f18315100f64c269f46e6c0319fa87ba68f0f64f2b8e7fd7c72b913a0b/multidict-6.7.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:716133f7d1d946a4e1b91b1756b23c088881e70ff180c24e864c26192ad7534a", size = 252339, upload-time = "2025-10-06T14:51:33.699Z" }, + { url = "https://files.pythonhosted.org/packages/c8/2a/1cb77266afee2458d82f50da41beba02159b1d6b1f7973afc9a1cad1499b/multidict-6.7.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d1bed1b467ef657f2a0ae62844a607909ef1c6889562de5e1d505f74457d0b96", size = 244895, upload-time = "2025-10-06T14:51:36.189Z" }, + { url = "https://files.pythonhosted.org/packages/dd/72/09fa7dd487f119b2eb9524946ddd36e2067c08510576d43ff68469563b3b/multidict-6.7.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ca43bdfa5d37bd6aee89d85e1d0831fb86e25541be7e9d376ead1b28974f8e5e", size = 241862, upload-time = "2025-10-06T14:51:41.291Z" }, + { url = "https://files.pythonhosted.org/packages/65/92/bc1f8bd0853d8669300f732c801974dfc3702c3eeadae2f60cef54dc69d7/multidict-6.7.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:44b546bd3eb645fd26fb949e43c02a25a2e632e2ca21a35e2e132c8105dc8599", size = 232376, upload-time = "2025-10-06T14:51:43.55Z" }, + { url = "https://files.pythonhosted.org/packages/09/86/ac39399e5cb9d0c2ac8ef6e10a768e4d3bc933ac808d49c41f9dc23337eb/multidict-6.7.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a6ef16328011d3f468e7ebc326f24c1445f001ca1dec335b2f8e66bed3006394", size = 240272, upload-time = "2025-10-06T14:51:45.265Z" }, + { url = "https://files.pythonhosted.org/packages/3d/b6/fed5ac6b8563ec72df6cb1ea8dac6d17f0a4a1f65045f66b6d3bf1497c02/multidict-6.7.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:5aa873cbc8e593d361ae65c68f85faadd755c3295ea2c12040ee146802f23b38", size = 248774, upload-time = "2025-10-06T14:51:46.836Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8d/b954d8c0dc132b68f760aefd45870978deec6818897389dace00fcde32ff/multidict-6.7.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:3d7b6ccce016e29df4b7ca819659f516f0bc7a4b3efa3bb2012ba06431b044f9", size = 242731, upload-time = "2025-10-06T14:51:48.541Z" }, + { url = "https://files.pythonhosted.org/packages/16/9d/a2dac7009125d3540c2f54e194829ea18ac53716c61b655d8ed300120b0f/multidict-6.7.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:171b73bd4ee683d307599b66793ac80981b06f069b62eea1c9e29c9241aa66b0", size = 240193, upload-time = "2025-10-06T14:51:50.355Z" }, + { url = "https://files.pythonhosted.org/packages/39/ca/c05f144128ea232ae2178b008d5011d4e2cea86e4ee8c85c2631b1b94802/multidict-6.7.0-cp314-cp314t-win32.whl", hash = "sha256:b2d7f80c4e1fd010b07cb26820aae86b7e73b681ee4889684fb8d2d4537aab13", size = 48023, upload-time = "2025-10-06T14:51:51.883Z" }, + { url = "https://files.pythonhosted.org/packages/ba/8f/0a60e501584145588be1af5cc829265701ba3c35a64aec8e07cbb71d39bb/multidict-6.7.0-cp314-cp314t-win_amd64.whl", hash = "sha256:09929cab6fcb68122776d575e03c6cc64ee0b8fca48d17e135474b042ce515cd", size = 53507, upload-time = "2025-10-06T14:51:53.672Z" }, + { url = "https://files.pythonhosted.org/packages/7f/ae/3148b988a9c6239903e786eac19c889fab607c31d6efa7fb2147e5680f23/multidict-6.7.0-cp314-cp314t-win_arm64.whl", hash = "sha256:cc41db090ed742f32bd2d2c721861725e6109681eddf835d0a82bd3a5c382827", size = 44804, upload-time = "2025-10-06T14:51:55.415Z" }, + { url = "https://files.pythonhosted.org/packages/b7/da/7d22601b625e241d4f23ef1ebff8acfc60da633c9e7e7922e24d10f592b3/multidict-6.7.0-py3-none-any.whl", hash = "sha256:394fc5c42a333c9ffc3e421a4c85e08580d990e08b99f6bf35b4132114c5dcb3", size = 12317, upload-time = "2025-10-06T14:52:29.272Z" }, +] + +[[package]] +name = "mypy" +version = "1.18.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "pathspec" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/77/8f0d0001ffad290cef2f7f216f96c814866248a0b92a722365ed54648e7e/mypy-1.18.2.tar.gz", hash = "sha256:06a398102a5f203d7477b2923dda3634c36727fa5c237d8f859ef90c42a9924b", size = 3448846, upload-time = "2025-09-19T00:11:10.519Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/04/7f462e6fbba87a72bc8097b93f6842499c428a6ff0c81dd46948d175afe8/mypy-1.18.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:07b8b0f580ca6d289e69209ec9d3911b4a26e5abfde32228a288eb79df129fcc", size = 12898728, upload-time = "2025-09-19T00:10:01.33Z" }, + { url = "https://files.pythonhosted.org/packages/99/5b/61ed4efb64f1871b41fd0b82d29a64640f3516078f6c7905b68ab1ad8b13/mypy-1.18.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ed4482847168439651d3feee5833ccedbf6657e964572706a2adb1f7fa4dfe2e", size = 11910758, upload-time = "2025-09-19T00:10:42.607Z" }, + { url = "https://files.pythonhosted.org/packages/3c/46/d297d4b683cc89a6e4108c4250a6a6b717f5fa96e1a30a7944a6da44da35/mypy-1.18.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3ad2afadd1e9fea5cf99a45a822346971ede8685cc581ed9cd4d42eaf940986", size = 12475342, upload-time = "2025-09-19T00:11:00.371Z" }, + { url = "https://files.pythonhosted.org/packages/83/45/4798f4d00df13eae3bfdf726c9244bcb495ab5bd588c0eed93a2f2dd67f3/mypy-1.18.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a431a6f1ef14cf8c144c6b14793a23ec4eae3db28277c358136e79d7d062f62d", size = 13338709, upload-time = "2025-09-19T00:11:03.358Z" }, + { url = "https://files.pythonhosted.org/packages/d7/09/479f7358d9625172521a87a9271ddd2441e1dab16a09708f056e97007207/mypy-1.18.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7ab28cc197f1dd77a67e1c6f35cd1f8e8b73ed2217e4fc005f9e6a504e46e7ba", size = 13529806, upload-time = "2025-09-19T00:10:26.073Z" }, + { url = "https://files.pythonhosted.org/packages/71/cf/ac0f2c7e9d0ea3c75cd99dff7aec1c9df4a1376537cb90e4c882267ee7e9/mypy-1.18.2-cp313-cp313-win_amd64.whl", hash = "sha256:0e2785a84b34a72ba55fb5daf079a1003a34c05b22238da94fcae2bbe46f3544", size = 9833262, upload-time = "2025-09-19T00:10:40.035Z" }, + { url = "https://files.pythonhosted.org/packages/5a/0c/7d5300883da16f0063ae53996358758b2a2df2a09c72a5061fa79a1f5006/mypy-1.18.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:62f0e1e988ad41c2a110edde6c398383a889d95b36b3e60bcf155f5164c4fdce", size = 12893775, upload-time = "2025-09-19T00:10:03.814Z" }, + { url = "https://files.pythonhosted.org/packages/50/df/2cffbf25737bdb236f60c973edf62e3e7b4ee1c25b6878629e88e2cde967/mypy-1.18.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8795a039bab805ff0c1dfdb8cd3344642c2b99b8e439d057aba30850b8d3423d", size = 11936852, upload-time = "2025-09-19T00:10:51.631Z" }, + { url = "https://files.pythonhosted.org/packages/be/50/34059de13dd269227fb4a03be1faee6e2a4b04a2051c82ac0a0b5a773c9a/mypy-1.18.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6ca1e64b24a700ab5ce10133f7ccd956a04715463d30498e64ea8715236f9c9c", size = 12480242, upload-time = "2025-09-19T00:11:07.955Z" }, + { url = "https://files.pythonhosted.org/packages/5b/11/040983fad5132d85914c874a2836252bbc57832065548885b5bb5b0d4359/mypy-1.18.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d924eef3795cc89fecf6bedc6ed32b33ac13e8321344f6ddbf8ee89f706c05cb", size = 13326683, upload-time = "2025-09-19T00:09:55.572Z" }, + { url = "https://files.pythonhosted.org/packages/e9/ba/89b2901dd77414dd7a8c8729985832a5735053be15b744c18e4586e506ef/mypy-1.18.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:20c02215a080e3a2be3aa50506c67242df1c151eaba0dcbc1e4e557922a26075", size = 13514749, upload-time = "2025-09-19T00:10:44.827Z" }, + { url = "https://files.pythonhosted.org/packages/25/bc/cc98767cffd6b2928ba680f3e5bc969c4152bf7c2d83f92f5a504b92b0eb/mypy-1.18.2-cp314-cp314-win_amd64.whl", hash = "sha256:749b5f83198f1ca64345603118a6f01a4e99ad4bf9d103ddc5a3200cc4614adf", size = 9982959, upload-time = "2025-09-19T00:10:37.344Z" }, + { url = "https://files.pythonhosted.org/packages/87/e3/be76d87158ebafa0309946c4a73831974d4d6ab4f4ef40c3b53a385a66fd/mypy-1.18.2-py3-none-any.whl", hash = "sha256:22a1748707dd62b58d2ae53562ffc4d7f8bcc727e8ac7cbc69c053ddc874d47e", size = 2352367, upload-time = "2025-09-19T00:10:15.489Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "importlib-metadata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/8d/1f5a45fbcb9a7d87809d460f09dc3399e3fbd31d7f3e14888345e9d29951/opentelemetry_api-1.33.1.tar.gz", hash = "sha256:1c6055fc0a2d3f23a50c7e17e16ef75ad489345fd3df1f8b8af7c0bbf8a109e8", size = 65002, upload-time = "2025-05-16T18:52:41.146Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/44/4c45a34def3506122ae61ad684139f0bbc4e00c39555d4f7e20e0e001c8a/opentelemetry_api-1.33.1-py3-none-any.whl", hash = "sha256:4db83ebcf7ea93e64637ec6ee6fabee45c5cbe4abd9cf3da95c43828ddb50b83", size = 65771, upload-time = "2025-05-16T18:52:17.419Z" }, +] + +[[package]] +name = "opentelemetry-distro" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-sdk" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dd/0b/0012cb5947c255d6755cb91e3b9fd9bb1876b7e14d5ab67131c030fd90b2/opentelemetry_distro-0.54b1.tar.gz", hash = "sha256:61d6b97bb7a245fddbb829345bb4ad18be39eb52f770fab89a127107fca3149f", size = 2593, upload-time = "2025-05-16T19:03:19.71Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/b1/5f008a2909d59c02c7b88aa595502d438ca21c15e88edd7620c697a56ce8/opentelemetry_distro-0.54b1-py3-none-any.whl", hash = "sha256:009486513b32b703e275bb2f9ccaf5791676bbf5e2dcfdd90201ddc8f56f122b", size = 3348, upload-time = "2025-05-16T19:02:11.624Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-proto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7a/18/a1ec9dcb6713a48b4bdd10f1c1e4d5d2489d3912b80d2bcc059a9a842836/opentelemetry_exporter_otlp_proto_common-1.33.1.tar.gz", hash = "sha256:c57b3fa2d0595a21c4ed586f74f948d259d9949b58258f11edb398f246bec131", size = 20828, upload-time = "2025-05-16T18:52:43.795Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/09/52/9bcb17e2c29c1194a28e521b9d3f2ced09028934c3c52a8205884c94b2df/opentelemetry_exporter_otlp_proto_common-1.33.1-py3-none-any.whl", hash = "sha256:b81c1de1ad349785e601d02715b2d29d6818aed2c809c20219f3d1f20b038c36", size = 18839, upload-time = "2025-05-16T18:52:22.447Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-grpc" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "googleapis-common-protos" }, + { name = "grpcio" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d8/5f/75ef5a2a917bd0e6e7b83d3fb04c99236ee958f6352ba3019ea9109ae1a6/opentelemetry_exporter_otlp_proto_grpc-1.33.1.tar.gz", hash = "sha256:345696af8dc19785fac268c8063f3dc3d5e274c774b308c634f39d9c21955728", size = 22556, upload-time = "2025-05-16T18:52:44.76Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ba/ec/6047e230bb6d092c304511315b13893b1c9d9260044dd1228c9d48b6ae0e/opentelemetry_exporter_otlp_proto_grpc-1.33.1-py3-none-any.whl", hash = "sha256:7e8da32c7552b756e75b4f9e9c768a61eb47dee60b6550b37af541858d669ce1", size = 18591, upload-time = "2025-05-16T18:52:23.772Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "googleapis-common-protos" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/48/e4314ac0ed2ad043c07693d08c9c4bf5633857f5b72f2fefc64fd2b114f6/opentelemetry_exporter_otlp_proto_http-1.33.1.tar.gz", hash = "sha256:46622d964a441acb46f463ebdc26929d9dec9efb2e54ef06acdc7305e8593c38", size = 15353, upload-time = "2025-05-16T18:52:45.522Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/63/ba/5a4ad007588016fe37f8d36bf08f325fe684494cc1e88ca8fa064a4c8f57/opentelemetry_exporter_otlp_proto_http-1.33.1-py3-none-any.whl", hash = "sha256:ebd6c523b89a2ecba0549adb92537cc2bf647b4ee61afbbd5a4c6535aa3da7cf", size = 17733, upload-time = "2025-05-16T18:52:25.137Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "packaging" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/fd/5756aea3fdc5651b572d8aef7d94d22a0a36e49c8b12fcb78cb905ba8896/opentelemetry_instrumentation-0.54b1.tar.gz", hash = "sha256:7658bf2ff914b02f246ec14779b66671508125c0e4227361e56b5ebf6cef0aec", size = 28436, upload-time = "2025-05-16T19:03:22.223Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/89/0790abc5d9c4fc74bd3e03cb87afe2c820b1d1a112a723c1163ef32453ee/opentelemetry_instrumentation-0.54b1-py3-none-any.whl", hash = "sha256:a4ae45f4a90c78d7006c51524f57cd5aa1231aef031eae905ee34d5423f5b198", size = 31019, upload-time = "2025-05-16T19:02:15.611Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-aio-pika" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/79/e7/b92741e7dc1c38d512fcd0c3d6b3270cbbe3f3965f4280810c3f48688b1f/opentelemetry_instrumentation_aio_pika-0.54b1.tar.gz", hash = "sha256:a1b9f2d2735f1e9808bac263776f445c446c19580c3a24d0ecc02e289b55b21d", size = 10092, upload-time = "2025-05-16T19:03:25.329Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/46/b77e99e0e3a4f473e8a38e46d12269a5ef28ed0f7d52306a06c6b82f2aff/opentelemetry_instrumentation_aio_pika-0.54b1-py3-none-any.whl", hash = "sha256:c1d1a52296937e54a8c69878434c86bdc038d53c1eba6f133c0e63f479484990", size = 13462, upload-time = "2025-05-16T19:02:16.816Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-aiohttp-client" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/04/fe/535efdb090543cb8e23149271c3ef27e37d3862865c52e2b2b58f7b5cb8d/opentelemetry_instrumentation_aiohttp_client-0.54b1.tar.gz", hash = "sha256:c51c643a5587b9efce6c4cae0f5e2202a25fac69caa89643465f57d5d8ba3789", size = 13643, upload-time = "2025-05-16T19:03:27.156Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/de/07f25301d57bb83f29ee1eb5503871bddc132d4362ff9897c605e8c54c04/opentelemetry_instrumentation_aiohttp_client-0.54b1-py3-none-any.whl", hash = "sha256:d9b53c04865e8a4c984c1330e4f1d5570bc28543833a4718cbe4265091ee0e71", size = 11661, upload-time = "2025-05-16T19:02:17.827Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-aiopg" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-dbapi" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/48/4b/ef14e66e9b7b8bf859844c08d78bbb921c7ec41e2008bd657942a15a5797/opentelemetry_instrumentation_aiopg-0.54b1.tar.gz", hash = "sha256:d00a6845bb8f8d45e81d42bc8ba38df88bb7efdc2cd0e572968dc5359f5b8355", size = 11808, upload-time = "2025-05-16T19:03:29.548Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f2/eb/1b7d0ff786ec1734766b082ebceea729c33b5f7d986816411fb8feb74373/opentelemetry_instrumentation_aiopg-0.54b1-py3-none-any.whl", hash = "sha256:1d162793c4dee9db469d89c962f161801027abc55002eeb23c076ab5f1f334d4", size = 12455, upload-time = "2025-05-16T19:02:21.718Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-asgi" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asgiref" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/20/f7/a3377f9771947f4d3d59c96841d3909274f446c030dbe8e4af871695ddee/opentelemetry_instrumentation_asgi-0.54b1.tar.gz", hash = "sha256:ab4df9776b5f6d56a78413c2e8bbe44c90694c67c844a1297865dc1bd926ed3c", size = 24230, upload-time = "2025-05-16T19:03:30.234Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/24/7a6f0ae79cae49927f528ecee2db55a5bddd87b550e310ce03451eae7491/opentelemetry_instrumentation_asgi-0.54b1-py3-none-any.whl", hash = "sha256:84674e822b89af563b283a5283c2ebb9ed585d1b80a1c27fb3ac20b562e9f9fc", size = 16338, upload-time = "2025-05-16T19:02:22.808Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-asyncpg" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b2/66/d2e2ccbb13cf6d6f6c7c8d907021e9bd8b56585c59e28d99ebc74138c3d1/opentelemetry_instrumentation_asyncpg-0.54b1.tar.gz", hash = "sha256:58e50de68b40221c2d6e22d626e5d03d9d6b950ba59504a5fc060c95cdc7c4fb", size = 8717, upload-time = "2025-05-16T19:03:32.31Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f0/93/c17ef16b63d6e073f875bfe4624b9711269a3d208ee11cdfc5cc1b3537d8/opentelemetry_instrumentation_asyncpg-0.54b1-py3-none-any.whl", hash = "sha256:2348843f0c6f0cefb0badc974cbeae244ee89c57e1ae2a587e5f641c23e16fdc", size = 10062, upload-time = "2025-05-16T19:02:26.371Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-aws-lambda" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-propagator-aws-xray" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/77/fd/57a1360203efa8410637679b00b61603782dd84ca9c0b3619192c07e0d1f/opentelemetry_instrumentation_aws_lambda-0.54b1.tar.gz", hash = "sha256:c40f011581abf3cd28d8833fb6218bac75eec3adda7774ff2685f41b279a9fdd", size = 17904, upload-time = "2025-05-16T19:03:33.658Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/f3/c08fee6ae3f2d2b461ee7e7c2b3ac8de52281b236f3593146ba456cd0db7/opentelemetry_instrumentation_aws_lambda-0.54b1-py3-none-any.whl", hash = "sha256:51bc4301b9733fcda616d68197ee5f15108175a217f5fd8db349d53ba14cc172", size = 12484, upload-time = "2025-05-16T19:02:27.421Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-boto" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/b5/5b777b6b1f3ce586141485584a52f0fdd3d63398011b0d02feb822f46f0a/opentelemetry_instrumentation_boto-0.54b1.tar.gz", hash = "sha256:83407a5f6f69cd0bebff802da0d228eb13196a1de713b43e1348b77f80033c6a", size = 9716, upload-time = "2025-05-16T19:03:34.364Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4a/5e/8f8bfb5fa1c51aa66b6af7e4a64d9be9dc9aba6ff2d8c0f405204a5069ea/opentelemetry_instrumentation_boto-0.54b1-py3-none-any.whl", hash = "sha256:b52b1216bee095858bcd0d992360911b6e870acc4f4c9090f8ca1081d9fdede6", size = 10146, upload-time = "2025-05-16T19:02:28.417Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-boto3sqs" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/be/44/232d566fb06a640f386ce2bdd271e64ecaaae9bdcc5c68f84f2552c5e585/opentelemetry_instrumentation_boto3sqs-0.54b1.tar.gz", hash = "sha256:c8bf67bc836bb66da6a1b000e6c1b07229481c75731ea6a0ed0b59b256e035b9", size = 11715, upload-time = "2025-05-16T19:03:35.028Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/db/62ebd5d172eb3997038f24a238792b5ebe604bc70dbda1cba91c3d36a655/opentelemetry_instrumentation_boto3sqs-0.54b1-py3-none-any.whl", hash = "sha256:40ae98fe53584e5b1d61725fc8e153a1be2d6b308f65f56deb4f276a23b43cf4", size = 11672, upload-time = "2025-05-16T19:02:29.62Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-botocore" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-propagator-aws-xray" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/c9/88127b0714881e6801e4921bca445de634b0b3568e607ccc4a606f711ea7/opentelemetry_instrumentation_botocore-0.54b1.tar.gz", hash = "sha256:54f7b0b48398dfc8b8e98deec89df5b4c8c359d803a0d6c8ce4bd972d50c03dd", size = 110252, upload-time = "2025-05-16T19:03:35.805Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/0e/22e35a74e6566feacd8a80f5899242920765f134c0edbb0b943ddb369c0e/opentelemetry_instrumentation_botocore-0.54b1-py3-none-any.whl", hash = "sha256:74d3a36d5bab8447669b25f915a3db6c37ae14a5faa198500471d5b1bbd1902f", size = 35461, upload-time = "2025-05-16T19:02:30.621Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-cassandra" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/26/fb/9a405a3fed8389603bbcd63a74ea303d55992c2c7e9abdc8daeba1945fa9/opentelemetry_instrumentation_cassandra-0.54b1.tar.gz", hash = "sha256:f9a79c0139888eaedb58bb50da42709c7bc6ead9b9f5263164873e4275cefbce", size = 7581, upload-time = "2025-05-16T19:03:36.591Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/ca/e726bfd5dc40eef7961aa5a7a5e7238eb407c84bd709cb531abd09c62302/opentelemetry_instrumentation_cassandra-0.54b1-py3-none-any.whl", hash = "sha256:81b8d963a02ea43ea4a9d00c88cd0b01dda69daf914d6e4984b2e98b1e8fdeb7", size = 8899, upload-time = "2025-05-16T19:02:31.738Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-celery" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7c/71/4ac353874e0f7ca93591e1a74b7a290dec2027733bbb31bd76da3a74f97f/opentelemetry_instrumentation_celery-0.54b1.tar.gz", hash = "sha256:f2bd019afe9286214083ae2db95ed24adf9a0aa2e943177462d64ceb8380d78e", size = 14778, upload-time = "2025-05-16T19:03:37.376Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/be/90e2b7d26915639cfcdf6e200b309c9d64027ff752c56145bc149cd67d68/opentelemetry_instrumentation_celery-0.54b1-py3-none-any.whl", hash = "sha256:892ec6bf829a0d60cf3bffd1a8bb6fd8055f1194167b4e132e33321de8e05c24", size = 13809, upload-time = "2025-05-16T19:02:33.046Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-confluent-kafka" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/a8/472ddb40f8caab693de4a5c2084b1513b67f879060e5e46cfb2f96bc0872/opentelemetry_instrumentation_confluent_kafka-0.54b1.tar.gz", hash = "sha256:1e378b5c88170c7fcd23b07054a61d2af7a7ec5af1aba120446514ef27b7ad82", size = 11615, upload-time = "2025-05-16T19:03:39.409Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5e/9e/107e45d5eb41961a187c28eb4d0da02d133d371dfdd149b1f7ef96e78926/opentelemetry_instrumentation_confluent_kafka-0.54b1-py3-none-any.whl", hash = "sha256:9dc896233a973705e1ac25950ababe23322338f4cd3fff0ccd509759aeb2e802", size = 12624, upload-time = "2025-05-16T19:02:35.018Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-dbapi" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7b/b7/b74e2c7c858cde8909516cbe77cb0e841167d38795c90df524d84440e1f1/opentelemetry_instrumentation_dbapi-0.54b1.tar.gz", hash = "sha256:69421c36994114040d197f7e846c01869d663084c6c2025e85b2d6cfce2f8299", size = 14145, upload-time = "2025-05-16T19:03:40.074Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/6a/98d409ae5ca60ae4e41295a42256d81bb96bd5a7a386ca0343e27494d53d/opentelemetry_instrumentation_dbapi-0.54b1-py3-none-any.whl", hash = "sha256:21bc20cd878a78bf44bab686e9679cef1eed77e53c754c0a09f0ca49f5fd0283", size = 12450, upload-time = "2025-05-16T19:02:36.041Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-django" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-wsgi" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ac/93/8d194bda118fc4c369b9a3091c39eec384137b46f33421272359883c53d9/opentelemetry_instrumentation_django-0.54b1.tar.gz", hash = "sha256:38414f989f60e9dba82928e13f6a20a26baf5cc700f1d891f27e0703ca577802", size = 24866, upload-time = "2025-05-16T19:03:41.183Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/75/1b0ae1b8b7d6a85d5d54e8092c84b18669bd5da6f5ceb3410047674db3c0/opentelemetry_instrumentation_django-0.54b1-py3-none-any.whl", hash = "sha256:462fbd577991021f56152df21ca1fdcd7c4abdc10dd44254a44d515b8e3d61ca", size = 19541, upload-time = "2025-05-16T19:02:37.4Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-elasticsearch" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/31/8b/e7d57ab4aab2d63e2094001e0301d848ec83b86ee428e538101922cd27ed/opentelemetry_instrumentation_elasticsearch-0.54b1.tar.gz", hash = "sha256:d5b6996919679c91e5791457de24d9ff6472887a4e1426b8f2345c52f6ba6f10", size = 14379, upload-time = "2025-05-16T19:03:41.939Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/bd/4919e716190454895c895c37745bbf22d59231d864862a9bc4ac68f4c8d8/opentelemetry_instrumentation_elasticsearch-0.54b1-py3-none-any.whl", hash = "sha256:9f5c968954d72f15e133d06760294f13886d98c4da626374168094035f6dec50", size = 12607, upload-time = "2025-05-16T19:02:38.944Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-falcon" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-wsgi" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1a/7d/73df17199014ea57ae71bb128a5155ea4d81d86d0b61d4c852cec485ccb1/opentelemetry_instrumentation_falcon-0.54b1.tar.gz", hash = "sha256:06e72aac39fd4ac65555a8cb056428d7c4366bb1fafa65e60474d6e3d6c3eada", size = 17176, upload-time = "2025-05-16T19:03:42.651Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/40/65a3cecd312ac380477ff44306c737b6a3d0cb7ec1ec28e09aacdc8904ac/opentelemetry_instrumentation_falcon-0.54b1-py3-none-any.whl", hash = "sha256:6eaf3bf714a6e3398a5ddc132c3e77de851331ee00989302f88a4d4ce829e679", size = 14206, upload-time = "2025-05-16T19:02:40.082Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-fastapi" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-asgi" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/98/3b/9a262cdc1a4defef0e52afebdde3e8add658cc6f922e39e9dcee0da98349/opentelemetry_instrumentation_fastapi-0.54b1.tar.gz", hash = "sha256:1fcad19cef0db7092339b571a59e6f3045c9b58b7fd4670183f7addc459d78df", size = 19325, upload-time = "2025-05-16T19:03:45.359Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/9c/6b2b0f9d6c5dea7528ae0bf4e461dd765b0ae35f13919cd452970bb0d0b3/opentelemetry_instrumentation_fastapi-0.54b1-py3-none-any.whl", hash = "sha256:fb247781cfa75fd09d3d8713c65e4a02bd1e869b00e2c322cc516d4b5429860c", size = 12125, upload-time = "2025-05-16T19:02:41.172Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-flask" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-wsgi" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/b8/d46dcb20889713a355de418a0d31d552089bf4454e1baf48c7b6b3fb6035/opentelemetry_instrumentation_flask-0.54b1.tar.gz", hash = "sha256:683f9963f06d065fc07ceaffa106df1f6f20075318530328f69fde39dfb1192f", size = 19221, upload-time = "2025-05-16T19:03:46.063Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/85/aaaed416e9ee7d5c4ab98b3dba3d66675f44cfdcbf5d683e144a10fafad0/opentelemetry_instrumentation_flask-0.54b1-py3-none-any.whl", hash = "sha256:1f9d44b8ca9bc7d52e2aeb539bc64a88d6fc04f2f67c1ffb278148c99cc8ec6a", size = 14626, upload-time = "2025-05-16T19:02:42.202Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-grpc" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/95/7a/a2e879f5b39d77091181c944064bf99e11646a58242f1e8efa829646bcb1/opentelemetry_instrumentation_grpc-0.54b1.tar.gz", hash = "sha256:4198aab2a380b2807a50112892f9b8a50772169a3722fa99634ef70c6c017ea2", size = 30926, upload-time = "2025-05-16T19:03:46.813Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/51/22ca8af0b9f78029657957f33604813c07dde18fb035dd37a60e2a4070d8/opentelemetry_instrumentation_grpc-0.54b1-py3-none-any.whl", hash = "sha256:c01114c5c147c216f9144da065d4a84bffb2a43b3cb05763b40ec744bbf5206e", size = 27112, upload-time = "2025-05-16T19:02:43.853Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-httpx" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/64/65b2e599c5043a5dbd14c251d48dec4947e2ec8713f601df197ea9b51246/opentelemetry_instrumentation_httpx-0.54b1.tar.gz", hash = "sha256:37e1cd0190f98508d960ec1667c9f148f8c8ad9a6cab127b57c9ad92c37493c3", size = 17734, upload-time = "2025-05-16T19:03:47.762Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/63/f92e93b613b51344a979dc6674641f2c0d24b031f6a08557304398962e41/opentelemetry_instrumentation_httpx-0.54b1-py3-none-any.whl", hash = "sha256:99b8e43ebf1d945ca298d84d32298ba26d1c3431738cea9f69a26c442661745f", size = 14129, upload-time = "2025-05-16T19:02:45.418Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-jinja2" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f7/9d/48836360719cfc0aaa892440b42d2fc3cf83bb84d4f92cda0ad9af7dd598/opentelemetry_instrumentation_jinja2-0.54b1.tar.gz", hash = "sha256:21e435e2029e876e9c91277fb88e9cf235211f96973c64e494b8be7551c7b3e1", size = 8468, upload-time = "2025-05-16T19:03:48.499Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/d4/213e701c74541f860bfc89211ab54b7c9d3c89576dc461bed14d6f1d0e2f/opentelemetry_instrumentation_jinja2-0.54b1-py3-none-any.whl", hash = "sha256:bcefb00e177c3481a0f735ffe96589ee40ba6b603092c19fca7b03fcb5c72a19", size = 9428, upload-time = "2025-05-16T19:02:46.544Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-kafka-python" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b2/1c/232ffeb76dd519d82c6b0f1b28dc33f6583f3a90b35dd3360179d46e0c72/opentelemetry_instrumentation_kafka_python-0.54b1.tar.gz", hash = "sha256:8b3f18be44939a270ca55b8017c5f822b94bdc1372b59a49464b990c715d0ba4", size = 10535, upload-time = "2025-05-16T19:03:49.198Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/88/9998fac3940d818100f0b3b1b67992481df233516d4d0a14fce43d6dcbc8/opentelemetry_instrumentation_kafka_python-0.54b1-py3-none-any.whl", hash = "sha256:ab53ed8af3281a337feb5c1fa01059d5af99ec7aa84f2b360627a20fed385ab7", size = 11502, upload-time = "2025-05-16T19:02:48.012Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-logging" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d9/5b/88ed39f22e8c6eb4f6192ab9a62adaa115579fcbcadb3f0241ee645eea56/opentelemetry_instrumentation_logging-0.54b1.tar.gz", hash = "sha256:893a3cbfda893b64ff71b81991894e2fd6a9267ba85bb6c251f51c0419fbe8fa", size = 9976, upload-time = "2025-05-16T19:03:49.976Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/96/0c/b441fb30d860f25040eaed61e89d68f4d9ee31873159ed18cbc1b92eba56/opentelemetry_instrumentation_logging-0.54b1-py3-none-any.whl", hash = "sha256:01a4cec54348f13941707d857b850b0febf9d49f45d0fcf0673866e079d7357b", size = 12579, upload-time = "2025-05-16T19:02:49.039Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-mysql" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-dbapi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1e/6e/8b203e0f0afb994a2b8734d37d4ffe8a70cd45202bf021c3a531d7b1cb9d/opentelemetry_instrumentation_mysql-0.54b1.tar.gz", hash = "sha256:de3a9367886523f30bd04b51edcf8d0777de7eac4a2467f52478231f51405b49", size = 9390, upload-time = "2025-05-16T19:03:50.66Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/18/aeae1a3cc4dd17f4338d105592a8e6cba572ef9d94089649d4b8a0d7b4dc/opentelemetry_instrumentation_mysql-0.54b1-py3-none-any.whl", hash = "sha256:07cd8c3003b439e0626e2b77f2b7f28f73c75879e28d9260f8d9a9600fb85fc2", size = 10100, upload-time = "2025-05-16T19:02:49.952Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-mysqlclient" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-dbapi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/c6/27ac94688611cb51d20d83855b1dbd8610009f8ccf73e0fdca40648b4db4/opentelemetry_instrumentation_mysqlclient-0.54b1.tar.gz", hash = "sha256:c14abdc5e19015ab7d6aa23ce96122c4f966fac629489eaa614e28da84e94d88", size = 9330, upload-time = "2025-05-16T19:03:51.382Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d6/4d/9d8a5e571c370331c771467a4c51bb2da5ced1c2601bd2990c2a2bdc0caa/opentelemetry_instrumentation_mysqlclient-0.54b1-py3-none-any.whl", hash = "sha256:462972e140586e00a5c0f0025585b2decfd0c4d7189cd42e2f786ca8e9fdab27", size = 10125, upload-time = "2025-05-16T19:02:51.422Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-pika" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "packaging" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5d/8b/e7510900b383a2aaaec728034d8353d9112ce6fb75df1b53094185deae10/opentelemetry_instrumentation_pika-0.54b1.tar.gz", hash = "sha256:b8e20202233fee5aca35bd58db431bdcfeeddd85f83067800ab494c234479f51", size = 12993, upload-time = "2025-05-16T19:03:52.055Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/68/c1dd5a8fcf3e98644ff3d1dfc3db9a7ac65a9ae964011c139343756b1e24/opentelemetry_instrumentation_pika-0.54b1-py3-none-any.whl", hash = "sha256:3098ba31cdf3b390deb18c9eb824fccff9b8a2d51878fdcc7b69f1e6218963dc", size = 13661, upload-time = "2025-05-16T19:02:52.407Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-psycopg2" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-dbapi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/09/dd6e55a852c87ee6402d745486d7d2e32577e728781bc1c89812d2645f48/opentelemetry_instrumentation_psycopg2-0.54b1.tar.gz", hash = "sha256:6e899baf7b6687320491b25d5ceadde5c614a95fb379da8e2a513d430f28102f", size = 10663, upload-time = "2025-05-16T19:03:53.817Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/d0/4915e34533c26f319ba9b5346c0d1aa48d099bb29719674dbace3e4d643b/opentelemetry_instrumentation_psycopg2-0.54b1-py3-none-any.whl", hash = "sha256:2f493b180c2028bcab2ecaff8bd25560dd92a538bba8b9510411f182dd2a075e", size = 10709, upload-time = "2025-05-16T19:02:54.388Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-pymemcache" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4c/58/66b4eb77a1279816b108d41b852f5ae02c69c8442522fb37539c119ff056/opentelemetry_instrumentation_pymemcache-0.54b1.tar.gz", hash = "sha256:03a272e3a416a633f83ee5b494a346d37fbe8249271bbf5e02686c354ae810a9", size = 10606, upload-time = "2025-05-16T19:03:54.485Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8b/91/678a2215292ce4cdfb28e282bef97e63bb497b42e2d677a24db7b979474d/opentelemetry_instrumentation_pymemcache-0.54b1-py3-none-any.whl", hash = "sha256:d752ccc03214cb079733d8d811ba9e624a7b6c76454ce96e30edccfed1f75f91", size = 9685, upload-time = "2025-05-16T19:02:55.389Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-pymongo" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/4c/e214f98f6d0885cd1a4e09740fc68d59dfb5e108c310c0003415eb593a47/opentelemetry_instrumentation_pymongo-0.54b1.tar.gz", hash = "sha256:75cbcfe499009d535e508b869825113fc0888d4d60c544d4337ef65eb4d299f0", size = 9614, upload-time = "2025-05-16T19:03:55.135Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/f4/b4504705ce678ac6118e4c5226b566d940aa4f7baf8e6c585abad36d1197/opentelemetry_instrumentation_pymongo-0.54b1-py3-none-any.whl", hash = "sha256:2331f4f0cbd5a5053edebb956b4dd288d60eb8971d9b6d5927f0753d0651161e", size = 11314, upload-time = "2025-05-16T19:02:56.958Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-pymysql" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-dbapi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2e/f5/f6f3f593c6f95994470eea001960c4891ead94d6583698862d2c1c2eb046/opentelemetry_instrumentation_pymysql-0.54b1.tar.gz", hash = "sha256:c22501ee104c34b70e37e5cdc59d74ffb833d473ac3ecfe899b707bf194e914b", size = 9208, upload-time = "2025-05-16T19:03:57.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/2f/e7a0e6555757cb14c54a4e923f0ba0a0ed9833cfae0fe8334e698d6a2767/opentelemetry_instrumentation_pymysql-0.54b1-py3-none-any.whl", hash = "sha256:54cb13c6ab559cf14e6de94f778e286d8bc89a2262cff59ee3566a41c6ab5dd1", size = 9984, upload-time = "2025-05-16T19:02:58.926Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-pyramid" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-wsgi" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cb/be/488a87bf48049c260da15ecc5ebec0e99287aaabf0a9e94d759066b84872/opentelemetry_instrumentation_pyramid-0.54b1.tar.gz", hash = "sha256:c68d46de5cbf1e804b2b730f7f60bf87f0bc9735e3d21b8359d35705ff8457b3", size = 15046, upload-time = "2025-05-16T19:03:58.137Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7d/eb/456f9a79c0e3ac26036a0d262235b9cde3a085b88c8ec17e1f062b2d2327/opentelemetry_instrumentation_pyramid-0.54b1-py3-none-any.whl", hash = "sha256:11b7f210ff45b754db30f7522bb2e27be902ddea38a59cc16c08e16dd8061f42", size = 13999, upload-time = "2025-05-16T19:02:59.938Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-redis" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c2/01/fad85231c3518bf6349a7ef483ef06a27100da8d1b7531dec9d8d09b94d8/opentelemetry_instrumentation_redis-0.54b1.tar.gz", hash = "sha256:89024c4752147d528e8c51fff0034193e628da339848cda78afe0cf4eb0c7ccb", size = 13908, upload-time = "2025-05-16T19:03:58.876Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/c1/78f18965f16e34a8fecc5b10c52aca1243e75a512a0a0320556a69583f36/opentelemetry_instrumentation_redis-0.54b1-py3-none-any.whl", hash = "sha256:e98992bd38e93081158f9947a1a8eea51d96e8bfe5054894a5b8d1d82117c0c8", size = 14924, upload-time = "2025-05-16T19:03:01.07Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-remoulade" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e4/f5/d360444cd559f67a6d6f2467ca3f036db1894d3ba8c4a82a2c443eae674f/opentelemetry_instrumentation_remoulade-0.54b1.tar.gz", hash = "sha256:0c2f5571985375c55532402238dafb09d0e6b4b8c2a3c18925ef461bb3896c96", size = 8131, upload-time = "2025-05-16T19:03:59.804Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/35/0a17505193fd93e16d26d18a0605a9dedb5bdde9c4aed56f391160ed657b/opentelemetry_instrumentation_remoulade-0.54b1-py3-none-any.whl", hash = "sha256:5d50d298a1d456e1008166d0a20cb7ccada93b502b99cf74f344fb6d1df947c9", size = 10130, upload-time = "2025-05-16T19:03:02.152Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-requests" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e7/45/116da84930d3dc2f5cdd876283ca96e9b96547bccee7eaa0bd01ce6bf046/opentelemetry_instrumentation_requests-0.54b1.tar.gz", hash = "sha256:3eca5d697c5564af04c6a1dd23b6a3ffbaf11e64887c6051655cee03998f4654", size = 15148, upload-time = "2025-05-16T19:04:00.488Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2b/b1/6e33d2c3d3cc9e3ae20a9a77625ec81a509a0e5d7fa87e09e7f879468990/opentelemetry_instrumentation_requests-0.54b1-py3-none-any.whl", hash = "sha256:a0c4cd5d946224f336d6bd73cdabdecc6f80d5c39208f84eb96eb15f16cd41a0", size = 12968, upload-time = "2025-05-16T19:03:03.131Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-sqlalchemy" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "packaging" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ac/33/78a25ae4233d42058bb0b363ba4fea7d7210e53c24e5e31f16d5cf6cf957/opentelemetry_instrumentation_sqlalchemy-0.54b1.tar.gz", hash = "sha256:97839acf1c9b96ded857fca57a09b86a56cf8d9eb6d706b7ceaee9352a460e03", size = 14620, upload-time = "2025-05-16T19:04:01.215Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/2b/1c954885815614ef5c1e8c7bbf57a5275e64cd6fb5946b65e17162a34037/opentelemetry_instrumentation_sqlalchemy-0.54b1-py3-none-any.whl", hash = "sha256:d2ca5edb4c7ecef120d51aad6793b7da1cc80207ccfd31c437ee18f098e7c4c4", size = 14169, upload-time = "2025-05-16T19:03:04.119Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-sqlite3" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-dbapi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/07/cae18dbc2ba1997a382e63f1ee7527dff9557675c2802709ca8a011341c4/opentelemetry_instrumentation_sqlite3-0.54b1.tar.gz", hash = "sha256:e32ec80a2f50df035bf16de142527157b98a60a3863ddcb6aa20beae8a64a24d", size = 7929, upload-time = "2025-05-16T19:04:02.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/8a/7a6b6b1cabc65e237ebbfd10429997579eaa4281c169429c28eb5a60e177/opentelemetry_instrumentation_sqlite3-0.54b1-py3-none-any.whl", hash = "sha256:756c8f51a3b738f4cd52556b2146a6e2e6a33516b494aa4dbb7478702af4a475", size = 9342, upload-time = "2025-05-16T19:03:05.641Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-starlette" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-asgi" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f9/43/c8095007bcc800a5465ebe50b097ab0da8b1d973f9afdcea04d98d2cb81d/opentelemetry_instrumentation_starlette-0.54b1.tar.gz", hash = "sha256:04f5902185166ad0a96bbc5cc184983bdf535ac92b1edc7a6093e9d14efa00d1", size = 14492, upload-time = "2025-05-16T19:04:03.012Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1d/9215d1696a428bbc0c46b8fc7c0189693ba5cdd9032f1dbeff04e9526828/opentelemetry_instrumentation_starlette-0.54b1-py3-none-any.whl", hash = "sha256:533e730308b5e6e99ab2a219c891f8e08ef5e67db76a148cc2f6c4fd5b6bcc0e", size = 11740, upload-time = "2025-05-16T19:03:07.079Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-system-metrics" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "psutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/10/cc/0db64253beac5a58dca621114f1be8c95af3ec8ac31785fb28b6ed82021e/opentelemetry_instrumentation_system_metrics-0.54b1.tar.gz", hash = "sha256:2846ba1019e1672fb605eff3d3af198fa1b8f1540ece70da82a2d20d9b95779b", size = 15007, upload-time = "2025-05-16T19:04:03.758Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f5/fd/e9bd23fd734bbdc028e7ebe3d25855381b696ceca214f80ad7fe74e9079c/opentelemetry_instrumentation_system_metrics-0.54b1-py3-none-any.whl", hash = "sha256:1b6f23cc8cf18b525bdb285c3664b521ce81b1e82c4f3db6a82210b8c37af1e4", size = 13093, upload-time = "2025-05-16T19:03:08.516Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-threading" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a0/bd/561245292e7cc78ac7a0a75537873aea87440cb9493d41371421b3308c2b/opentelemetry_instrumentation_threading-0.54b1.tar.gz", hash = "sha256:3a081085b59675baf7bd93126a681903e6304a5f283df5eaecdd44bcb66df578", size = 8774, upload-time = "2025-05-16T19:04:04.482Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/10/d87ec07d69546adaad525ba5d40d27324a45cba29097d9854a53d9af5047/opentelemetry_instrumentation_threading-0.54b1-py3-none-any.whl", hash = "sha256:bc229e6cd3f2b29fafe0a8dd3141f452e16fcb4906bca4fbf52609f99fb1eb42", size = 9314, upload-time = "2025-05-16T19:03:09.527Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-tornado" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/61/61/9da044c2ae3cea9a4f0e4cf28bbc1a5aaf7052c2b00ad9f305a107da9110/opentelemetry_instrumentation_tornado-0.54b1.tar.gz", hash = "sha256:73a5ba0f915688907dd4640653d3970167715c42a5ef4a948bbcf93ad9682b8d", size = 17089, upload-time = "2025-05-16T19:04:05.666Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/70/858aabf04ef24f409995c032c06c9a96e7c8bb9a257c9981b7fb380b7458/opentelemetry_instrumentation_tornado-0.54b1-py3-none-any.whl", hash = "sha256:3f4773cb3adfd6fdd592f182a72be85ca6cf01500a9973ac17947ce81d9872ee", size = 15327, upload-time = "2025-05-16T19:03:10.527Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-tortoiseorm" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/ec/c1c2916e9448ea2c5fde2700bf6577d42db5a2ed0fda856e388d34e42872/opentelemetry_instrumentation_tortoiseorm-0.54b1.tar.gz", hash = "sha256:f9ffe00bcdfa895dfa1a512f4fde186ebd816a4636afd26a7716f258b4c7e3f9", size = 8263, upload-time = "2025-05-16T19:04:06.372Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/e0/81eb1ec3cbe436030c32ada365f6fcf9e034c882d8c3060dfe35ffdfabc0/opentelemetry_instrumentation_tortoiseorm-0.54b1-py3-none-any.whl", hash = "sha256:0335efcd4f5e240efecc36f909939dbc6fb8c9b0733dc3f0615a39c3f6544c7e", size = 10158, upload-time = "2025-05-16T19:03:11.572Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-urllib" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/52/47ecbce59d47e4543286ab88753efe1903f40a80c05397407375b4e600c2/opentelemetry_instrumentation_urllib-0.54b1.tar.gz", hash = "sha256:99943400b6814ebf072735e0fb42dc5c74705f30b64ebed3778f0e7c6e16d63e", size = 13788, upload-time = "2025-05-16T19:04:07.028Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/2a/d8c9876d80d89f728c89439a880eaccedab3ffe1cc83b2c49abf17b81038/opentelemetry_instrumentation_urllib-0.54b1-py3-none-any.whl", hash = "sha256:94744470733f61f3dd282be7868e93f5bc277f07a0aeda7c836c913cbcf4f416", size = 12625, upload-time = "2025-05-16T19:03:12.701Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-urllib3" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/6f/76a46806cd21002cac1bfd087f5e4674b195ab31ab44c773ca534b6bb546/opentelemetry_instrumentation_urllib3-0.54b1.tar.gz", hash = "sha256:0d30ba3b230e4100cfadaad29174bf7bceac70e812e4f5204e681e4b55a74cd9", size = 15697, upload-time = "2025-05-16T19:04:07.709Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/7a/d75bec41edb6deaf1d2859bab66a84c8ba03e822e7eafdb245da205e53f6/opentelemetry_instrumentation_urllib3-0.54b1-py3-none-any.whl", hash = "sha256:e87958c297ddd36d30e1c9069f34a9690e845e4ccc2662dd80e99ed976d4c03e", size = 13123, upload-time = "2025-05-16T19:03:14.053Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-wsgi" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a9/0f/442eba02bd277fae2f5eb3ac5f8dd5f8cc52ddbe080506748871b91a63ab/opentelemetry_instrumentation_wsgi-0.54b1.tar.gz", hash = "sha256:261ad737e0058812aaae6bb7d6e0fa7344de62464c5df30c82bea180e735b903", size = 18244, upload-time = "2025-05-16T19:04:08.448Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/aa/2f/075156d123e589d6728cc4c1a43d0335fa16e8f4a9f723a4af9267d91169/opentelemetry_instrumentation_wsgi-0.54b1-py3-none-any.whl", hash = "sha256:6d99dca32ce232251cd321bf86e8c9d0a60c5f088bcbe5ad55d12a2006fe056e", size = 14378, upload-time = "2025-05-16T19:03:15.074Z" }, +] + +[[package]] +name = "opentelemetry-processor-baggage" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-sdk" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9b/47/6ebc196ca33a79e6e8839d33ebf1b9a7d88646f48b12c5687e5a90300879/opentelemetry_processor_baggage-0.54b1.tar.gz", hash = "sha256:d3ec2a99fb8b88ca1153cf9b1b8eae76bd2bb518fb900f758a8d24e439276055", size = 7579, upload-time = "2025-05-16T19:04:09.148Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b0/9f/db3a2e7162dc73f012b440c5600acaab301170cffe8d8ccce5e069bc4176/opentelemetry_processor_baggage-0.54b1-py3-none-any.whl", hash = "sha256:1502475016c90b68642c9377803fd77b7f295d0b33e0d3449ba113b405de2b49", size = 8877, upload-time = "2025-05-16T19:03:16.127Z" }, +] + +[[package]] +name = "opentelemetry-propagator-aws-xray" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/32/13/310a7f3c789eb9bb51f8ee9b88fb4b9f4f1e7191c8c96c7ea6f15eaa99b5/opentelemetry-propagator-aws-xray-1.0.1.tar.gz", hash = "sha256:6e8be667bbcf17c3d81d70b2a7cdec0b11257ff64d3829ffe75b810ba1b49f86", size = 8932, upload-time = "2021-10-18T22:07:40.108Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/24/2b1694b9452ac7ab3567dcb80902f7c5c8a39962751d5a4c54a357caa49e/opentelemetry_propagator_aws_xray-1.0.1-py3-none-any.whl", hash = "sha256:49267a1d72b3f04880ac75e24f9ef38fe323e2f3156c4531e0e00c71c0829c0f", size = 10812, upload-time = "2021-10-18T22:07:38.08Z" }, +] + +[[package]] +name = "opentelemetry-propagator-b3" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "opentelemetry-api" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1c/b4/4fe00e8c63175e35c310ac4e5091b3c22a468a6098e8a5eacd8b991d6989/opentelemetry_propagator_b3-1.33.1.tar.gz", hash = "sha256:46bbe76d95ac7e1f50b263230aa1ce86445120f10c7008d66cb08266468561a3", size = 9618, upload-time = "2025-05-16T18:52:50.973Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/67/4a/16676216b5b8db95a6bdeb529bf17603e14c70ac15fcadca8de2bd135c65/opentelemetry_propagator_b3-1.33.1-py3-none-any.whl", hash = "sha256:5c65708fbecb317ab4f1880e81f7bb0bf48caa2e1d52fe31f89d1cb86172a69c", size = 8936, upload-time = "2025-05-16T18:52:34.125Z" }, +] + +[[package]] +name = "opentelemetry-propagator-jaeger" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7c/28/2be617ef9bf804f65864d17eef13af582992d529c61d58a8a17d711b918a/opentelemetry_propagator_jaeger-1.33.1.tar.gz", hash = "sha256:b4cd3f123a720db872401e2179f7384c70922a6b9bab2873f003419be82bb5e3", size = 8676, upload-time = "2025-05-16T18:52:51.559Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/66/24/a20343cfa49b38192ca6e314294b50a76d427c7dcbfd1a3ddb19706fed71/opentelemetry_propagator_jaeger-1.33.1-py3-none-any.whl", hash = "sha256:d5cfd139b245b32b45edda478b7be1fc52ecc93a199aa6ed7fd074086d81d083", size = 8778, upload-time = "2025-05-16T18:52:34.976Z" }, +] + +[[package]] +name = "opentelemetry-propagator-ot-trace" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-sdk" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/01/a3/b1bc6a7dc4aa7467b7d4537452a4fb089cb82246138fed6a3272e9ec2de9/opentelemetry_propagator_ot_trace-0.54b1.tar.gz", hash = "sha256:ce6bbebe9a3e57d8abada605b3ef296d363c764bb9a075677ea6f7aed7ddf8e6", size = 5026, upload-time = "2025-05-16T19:04:10.126Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/62/cab99d81b9de2f74e80cf5deac45c31ec110d65a6d9b043152cffe2e3edd/opentelemetry_propagator_ot_trace-0.54b1-py3-none-any.whl", hash = "sha256:3c7885bdee37b28562e17cd8cb72747102fdccd9d4e557f5b4afb109092db829", size = 4769, upload-time = "2025-05-16T19:03:17.047Z" }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f6/dc/791f3d60a1ad8235930de23eea735ae1084be1c6f96fdadf38710662a7e5/opentelemetry_proto-1.33.1.tar.gz", hash = "sha256:9627b0a5c90753bf3920c398908307063e4458b287bb890e5c1d6fa11ad50b68", size = 34363, upload-time = "2025-05-16T18:52:52.141Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/29/48609f4c875c2b6c80930073c82dd1cafd36b6782244c01394007b528960/opentelemetry_proto-1.33.1-py3-none-any.whl", hash = "sha256:243d285d9f29663fc7ea91a7171fcc1ccbbfff43b48df0774fd64a37d98eda70", size = 55854, upload-time = "2025-05-16T18:52:36.269Z" }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/67/12/909b98a7d9b110cce4b28d49b2e311797cffdce180371f35eba13a72dd00/opentelemetry_sdk-1.33.1.tar.gz", hash = "sha256:85b9fcf7c3d23506fbc9692fd210b8b025a1920535feec50bd54ce203d57a531", size = 161885, upload-time = "2025-05-16T18:52:52.832Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/8e/ae2d0742041e0bd7fe0d2dcc5e7cce51dcf7d3961a26072d5b43cc8fa2a7/opentelemetry_sdk-1.33.1-py3-none-any.whl", hash = "sha256:19ea73d9a01be29cacaa5d6c8ce0adc0b7f7b4d58cc52f923e4413609f670112", size = 118950, upload-time = "2025-05-16T18:52:37.297Z" }, +] + +[[package]] +name = "opentelemetry-sdk-extension-aws" +version = "2.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-sdk" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/86/52a95a0128b5aeb9db76e3ee6f9aeb6f2417ad24da28747318cbdf11c43d/opentelemetry_sdk_extension_aws-2.0.2.tar.gz", hash = "sha256:9faa9bdf480d1c5c53151dabee75735c94dbde09e4762c68ff5c7bd4aa3408f3", size = 16014, upload-time = "2024-08-05T17:45:06.44Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2d/6c/c85409f89ebe33d0998391f6e68ae0f2353a8e526450aad8b177ed5a26d3/opentelemetry_sdk_extension_aws-2.0.2-py3-none-any.whl", hash = "sha256:4c6e4b9fec01a4a9cfeac5272ce5aae6bc80e080a6bae1e52098746f53a7b32d", size = 18652, upload-time = "2024-08-05T17:45:05.27Z" }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "opentelemetry-api" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/2c/d7990fc1ffc82889d466e7cd680788ace44a26789809924813b164344393/opentelemetry_semantic_conventions-0.54b1.tar.gz", hash = "sha256:d1cecedae15d19bdaafca1e56b29a66aa286f50b5d08f036a145c7f3e9ef9cee", size = 118642, upload-time = "2025-05-16T18:52:53.962Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/80/08b1698c52ff76d96ba440bf15edc2f4bc0a279868778928e947c1004bdd/opentelemetry_semantic_conventions-0.54b1-py3-none-any.whl", hash = "sha256:29dab644a7e435b58d3a3918b58c333c92686236b30f7891d5e51f02933ca60d", size = 194938, upload-time = "2025-05-16T18:52:38.796Z" }, +] + +[[package]] +name = "opentelemetry-util-http" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a8/9f/1d8a1d1f34b9f62f2b940b388bf07b8167a8067e70870055bd05db354e5c/opentelemetry_util_http-0.54b1.tar.gz", hash = "sha256:f0b66868c19fbaf9c9d4e11f4a7599fa15d5ea50b884967a26ccd9d72c7c9d15", size = 8044, upload-time = "2025-05-16T19:04:10.79Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/ef/c5aa08abca6894792beed4c0405e85205b35b8e73d653571c9ff13a8e34e/opentelemetry_util_http-0.54b1-py3-none-any.whl", hash = "sha256:b1c91883f980344a1c3c486cffd47ae5c9c1dd7323f9cbe9fdb7cadb401c87c9", size = 7301, upload-time = "2025-05-16T19:03:18.18Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +] + +[[package]] +name = "pillow" +version = "11.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/93/0952f2ed8db3a5a4c7a11f91965d6184ebc8cd7cbb7941a260d5f018cd2d/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd", size = 2128328, upload-time = "2025-07-01T09:14:35.276Z" }, + { url = "https://files.pythonhosted.org/packages/4b/e8/100c3d114b1a0bf4042f27e0f87d2f25e857e838034e98ca98fe7b8c0a9c/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8", size = 2170652, upload-time = "2025-07-01T09:14:37.203Z" }, + { url = "https://files.pythonhosted.org/packages/aa/86/3f758a28a6e381758545f7cdb4942e1cb79abd271bea932998fc0db93cb6/pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f", size = 2227443, upload-time = "2025-07-01T09:14:39.344Z" }, + { url = "https://files.pythonhosted.org/packages/01/f4/91d5b3ffa718df2f53b0dc109877993e511f4fd055d7e9508682e8aba092/pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c", size = 5278474, upload-time = "2025-07-01T09:14:41.843Z" }, + { url = "https://files.pythonhosted.org/packages/f9/0e/37d7d3eca6c879fbd9dba21268427dffda1ab00d4eb05b32923d4fbe3b12/pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd", size = 4686038, upload-time = "2025-07-01T09:14:44.008Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b0/3426e5c7f6565e752d81221af9d3676fdbb4f352317ceafd42899aaf5d8a/pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e", size = 5864407, upload-time = "2025-07-03T13:10:15.628Z" }, + { url = "https://files.pythonhosted.org/packages/fc/c1/c6c423134229f2a221ee53f838d4be9d82bab86f7e2f8e75e47b6bf6cd77/pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1", size = 7639094, upload-time = "2025-07-03T13:10:21.857Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c9/09e6746630fe6372c67c648ff9deae52a2bc20897d51fa293571977ceb5d/pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805", size = 5973503, upload-time = "2025-07-01T09:14:45.698Z" }, + { url = "https://files.pythonhosted.org/packages/d5/1c/a2a29649c0b1983d3ef57ee87a66487fdeb45132df66ab30dd37f7dbe162/pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8", size = 6642574, upload-time = "2025-07-01T09:14:47.415Z" }, + { url = "https://files.pythonhosted.org/packages/36/de/d5cc31cc4b055b6c6fd990e3e7f0f8aaf36229a2698501bcb0cdf67c7146/pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2", size = 6084060, upload-time = "2025-07-01T09:14:49.636Z" }, + { url = "https://files.pythonhosted.org/packages/d5/ea/502d938cbaeec836ac28a9b730193716f0114c41325db428e6b280513f09/pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b", size = 6721407, upload-time = "2025-07-01T09:14:51.962Z" }, + { url = "https://files.pythonhosted.org/packages/45/9c/9c5e2a73f125f6cbc59cc7087c8f2d649a7ae453f83bd0362ff7c9e2aee2/pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3", size = 6273841, upload-time = "2025-07-01T09:14:54.142Z" }, + { url = "https://files.pythonhosted.org/packages/23/85/397c73524e0cd212067e0c969aa245b01d50183439550d24d9f55781b776/pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51", size = 6978450, upload-time = "2025-07-01T09:14:56.436Z" }, + { url = "https://files.pythonhosted.org/packages/17/d2/622f4547f69cd173955194b78e4d19ca4935a1b0f03a302d655c9f6aae65/pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580", size = 2423055, upload-time = "2025-07-01T09:14:58.072Z" }, + { url = "https://files.pythonhosted.org/packages/dd/80/a8a2ac21dda2e82480852978416cfacd439a4b490a501a288ecf4fe2532d/pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e", size = 5281110, upload-time = "2025-07-01T09:14:59.79Z" }, + { url = "https://files.pythonhosted.org/packages/44/d6/b79754ca790f315918732e18f82a8146d33bcd7f4494380457ea89eb883d/pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d", size = 4689547, upload-time = "2025-07-01T09:15:01.648Z" }, + { url = "https://files.pythonhosted.org/packages/49/20/716b8717d331150cb00f7fdd78169c01e8e0c219732a78b0e59b6bdb2fd6/pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced", size = 5901554, upload-time = "2025-07-03T13:10:27.018Z" }, + { url = "https://files.pythonhosted.org/packages/74/cf/a9f3a2514a65bb071075063a96f0a5cf949c2f2fce683c15ccc83b1c1cab/pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c", size = 7669132, upload-time = "2025-07-03T13:10:33.01Z" }, + { url = "https://files.pythonhosted.org/packages/98/3c/da78805cbdbee9cb43efe8261dd7cc0b4b93f2ac79b676c03159e9db2187/pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8", size = 6005001, upload-time = "2025-07-01T09:15:03.365Z" }, + { url = "https://files.pythonhosted.org/packages/6c/fa/ce044b91faecf30e635321351bba32bab5a7e034c60187fe9698191aef4f/pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59", size = 6668814, upload-time = "2025-07-01T09:15:05.655Z" }, + { url = "https://files.pythonhosted.org/packages/7b/51/90f9291406d09bf93686434f9183aba27b831c10c87746ff49f127ee80cb/pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe", size = 6113124, upload-time = "2025-07-01T09:15:07.358Z" }, + { url = "https://files.pythonhosted.org/packages/cd/5a/6fec59b1dfb619234f7636d4157d11fb4e196caeee220232a8d2ec48488d/pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c", size = 6747186, upload-time = "2025-07-01T09:15:09.317Z" }, + { url = "https://files.pythonhosted.org/packages/49/6b/00187a044f98255225f172de653941e61da37104a9ea60e4f6887717e2b5/pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788", size = 6277546, upload-time = "2025-07-01T09:15:11.311Z" }, + { url = "https://files.pythonhosted.org/packages/e8/5c/6caaba7e261c0d75bab23be79f1d06b5ad2a2ae49f028ccec801b0e853d6/pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31", size = 6985102, upload-time = "2025-07-01T09:15:13.164Z" }, + { url = "https://files.pythonhosted.org/packages/f3/7e/b623008460c09a0cb38263c93b828c666493caee2eb34ff67f778b87e58c/pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e", size = 2424803, upload-time = "2025-07-01T09:15:15.695Z" }, + { url = "https://files.pythonhosted.org/packages/73/f4/04905af42837292ed86cb1b1dabe03dce1edc008ef14c473c5c7e1443c5d/pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12", size = 5278520, upload-time = "2025-07-01T09:15:17.429Z" }, + { url = "https://files.pythonhosted.org/packages/41/b0/33d79e377a336247df6348a54e6d2a2b85d644ca202555e3faa0cf811ecc/pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a", size = 4686116, upload-time = "2025-07-01T09:15:19.423Z" }, + { url = "https://files.pythonhosted.org/packages/49/2d/ed8bc0ab219ae8768f529597d9509d184fe8a6c4741a6864fea334d25f3f/pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632", size = 5864597, upload-time = "2025-07-03T13:10:38.404Z" }, + { url = "https://files.pythonhosted.org/packages/b5/3d/b932bb4225c80b58dfadaca9d42d08d0b7064d2d1791b6a237f87f661834/pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673", size = 7638246, upload-time = "2025-07-03T13:10:44.987Z" }, + { url = "https://files.pythonhosted.org/packages/09/b5/0487044b7c096f1b48f0d7ad416472c02e0e4bf6919541b111efd3cae690/pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027", size = 5973336, upload-time = "2025-07-01T09:15:21.237Z" }, + { url = "https://files.pythonhosted.org/packages/a8/2d/524f9318f6cbfcc79fbc004801ea6b607ec3f843977652fdee4857a7568b/pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77", size = 6642699, upload-time = "2025-07-01T09:15:23.186Z" }, + { url = "https://files.pythonhosted.org/packages/6f/d2/a9a4f280c6aefedce1e8f615baaa5474e0701d86dd6f1dede66726462bbd/pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874", size = 6083789, upload-time = "2025-07-01T09:15:25.1Z" }, + { url = "https://files.pythonhosted.org/packages/fe/54/86b0cd9dbb683a9d5e960b66c7379e821a19be4ac5810e2e5a715c09a0c0/pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a", size = 6720386, upload-time = "2025-07-01T09:15:27.378Z" }, + { url = "https://files.pythonhosted.org/packages/e7/95/88efcaf384c3588e24259c4203b909cbe3e3c2d887af9e938c2022c9dd48/pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214", size = 6370911, upload-time = "2025-07-01T09:15:29.294Z" }, + { url = "https://files.pythonhosted.org/packages/2e/cc/934e5820850ec5eb107e7b1a72dd278140731c669f396110ebc326f2a503/pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635", size = 7117383, upload-time = "2025-07-01T09:15:31.128Z" }, + { url = "https://files.pythonhosted.org/packages/d6/e9/9c0a616a71da2a5d163aa37405e8aced9a906d574b4a214bede134e731bc/pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6", size = 2511385, upload-time = "2025-07-01T09:15:33.328Z" }, + { url = "https://files.pythonhosted.org/packages/1a/33/c88376898aff369658b225262cd4f2659b13e8178e7534df9e6e1fa289f6/pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae", size = 5281129, upload-time = "2025-07-01T09:15:35.194Z" }, + { url = "https://files.pythonhosted.org/packages/1f/70/d376247fb36f1844b42910911c83a02d5544ebd2a8bad9efcc0f707ea774/pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653", size = 4689580, upload-time = "2025-07-01T09:15:37.114Z" }, + { url = "https://files.pythonhosted.org/packages/eb/1c/537e930496149fbac69efd2fc4329035bbe2e5475b4165439e3be9cb183b/pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6", size = 5902860, upload-time = "2025-07-03T13:10:50.248Z" }, + { url = "https://files.pythonhosted.org/packages/bd/57/80f53264954dcefeebcf9dae6e3eb1daea1b488f0be8b8fef12f79a3eb10/pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36", size = 7670694, upload-time = "2025-07-03T13:10:56.432Z" }, + { url = "https://files.pythonhosted.org/packages/70/ff/4727d3b71a8578b4587d9c276e90efad2d6fe0335fd76742a6da08132e8c/pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b", size = 6005888, upload-time = "2025-07-01T09:15:39.436Z" }, + { url = "https://files.pythonhosted.org/packages/05/ae/716592277934f85d3be51d7256f3636672d7b1abfafdc42cf3f8cbd4b4c8/pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477", size = 6670330, upload-time = "2025-07-01T09:15:41.269Z" }, + { url = "https://files.pythonhosted.org/packages/e7/bb/7fe6cddcc8827b01b1a9766f5fdeb7418680744f9082035bdbabecf1d57f/pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50", size = 6114089, upload-time = "2025-07-01T09:15:43.13Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f5/06bfaa444c8e80f1a8e4bff98da9c83b37b5be3b1deaa43d27a0db37ef84/pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b", size = 6748206, upload-time = "2025-07-01T09:15:44.937Z" }, + { url = "https://files.pythonhosted.org/packages/f0/77/bc6f92a3e8e6e46c0ca78abfffec0037845800ea38c73483760362804c41/pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12", size = 6377370, upload-time = "2025-07-01T09:15:46.673Z" }, + { url = "https://files.pythonhosted.org/packages/4a/82/3a721f7d69dca802befb8af08b7c79ebcab461007ce1c18bd91a5d5896f9/pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db", size = 7121500, upload-time = "2025-07-01T09:15:48.512Z" }, + { url = "https://files.pythonhosted.org/packages/89/c7/5572fa4a3f45740eaab6ae86fcdf7195b55beac1371ac8c619d880cfe948/pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa", size = 2512835, upload-time = "2025-07-01T09:15:50.399Z" }, +] + +[[package]] +name = "pip" +version = "25.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/6e/74a3f0179a4a73a53d66ce57fdb4de0080a8baa1de0063de206d6167acc2/pip-25.3.tar.gz", hash = "sha256:8d0538dbbd7babbd207f261ed969c65de439f6bc9e5dbd3b3b9a77f25d95f343", size = 1803014, upload-time = "2025-10-25T00:55:41.394Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/3c/d717024885424591d5376220b5e836c2d5293ce2011523c9de23ff7bf068/pip-25.3-py3-none-any.whl", hash = "sha256:9655943313a94722b7774661c21049070f6bbb0a1516bf02f7c8d5d9201514cd", size = 1778622, upload-time = "2025-10-25T00:55:39.247Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/61/33/9611380c2bdb1225fdef633e2a9610622310fed35ab11dac9620972ee088/platformdirs-4.5.0.tar.gz", hash = "sha256:70ddccdd7c99fc5942e9fc25636a8b34d04c24b335100223152c2803e4063312", size = 21632, upload-time = "2025-10-08T17:44:48.791Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/cb/ac7874b3e5d58441674fb70742e6c374b28b0c7cb988d37d991cde47166c/platformdirs-4.5.0-py3-none-any.whl", hash = "sha256:e578a81bb873cbb89a41fcc904c7ef523cc18284b7e3b3ccf06aca1403b7ebd3", size = 18651, upload-time = "2025-10-08T17:44:47.223Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.52" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/96/06e01a7b38dce6fe1db213e061a4602dd6032a8a97ef6c1a862537732421/prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855", size = 434198, upload-time = "2025-08-27T15:24:02.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955", size = 391431, upload-time = "2025-08-27T15:23:59.498Z" }, +] + +[[package]] +name = "propcache" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9e/da/e9fc233cf63743258bff22b3dfa7ea5baef7b5bc324af47a0ad89b8ffc6f/propcache-0.4.1.tar.gz", hash = "sha256:f48107a8c637e80362555f37ecf49abe20370e557cc4ab374f04ec4423c97c3d", size = 46442, upload-time = "2025-10-08T19:49:02.291Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/df/6d9c1b6ac12b003837dde8a10231a7344512186e87b36e855bef32241942/propcache-0.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:43eedf29202c08550aac1d14e0ee619b0430aaef78f85864c1a892294fbc28cf", size = 77750, upload-time = "2025-10-08T19:47:07.648Z" }, + { url = "https://files.pythonhosted.org/packages/8b/e8/677a0025e8a2acf07d3418a2e7ba529c9c33caf09d3c1f25513023c1db56/propcache-0.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d62cdfcfd89ccb8de04e0eda998535c406bf5e060ffd56be6c586cbcc05b3311", size = 44780, upload-time = "2025-10-08T19:47:08.851Z" }, + { url = "https://files.pythonhosted.org/packages/89/a4/92380f7ca60f99ebae761936bc48a72a639e8a47b29050615eef757cb2a7/propcache-0.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cae65ad55793da34db5f54e4029b89d3b9b9490d8abe1b4c7ab5d4b8ec7ebf74", size = 46308, upload-time = "2025-10-08T19:47:09.982Z" }, + { url = "https://files.pythonhosted.org/packages/2d/48/c5ac64dee5262044348d1d78a5f85dd1a57464a60d30daee946699963eb3/propcache-0.4.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:333ddb9031d2704a301ee3e506dc46b1fe5f294ec198ed6435ad5b6a085facfe", size = 208182, upload-time = "2025-10-08T19:47:11.319Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0c/cd762dd011a9287389a6a3eb43aa30207bde253610cca06824aeabfe9653/propcache-0.4.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:fd0858c20f078a32cf55f7e81473d96dcf3b93fd2ccdb3d40fdf54b8573df3af", size = 211215, upload-time = "2025-10-08T19:47:13.146Z" }, + { url = "https://files.pythonhosted.org/packages/30/3e/49861e90233ba36890ae0ca4c660e95df565b2cd15d4a68556ab5865974e/propcache-0.4.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:678ae89ebc632c5c204c794f8dab2837c5f159aeb59e6ed0539500400577298c", size = 218112, upload-time = "2025-10-08T19:47:14.913Z" }, + { url = "https://files.pythonhosted.org/packages/f1/8b/544bc867e24e1bd48f3118cecd3b05c694e160a168478fa28770f22fd094/propcache-0.4.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d472aeb4fbf9865e0c6d622d7f4d54a4e101a89715d8904282bb5f9a2f476c3f", size = 204442, upload-time = "2025-10-08T19:47:16.277Z" }, + { url = "https://files.pythonhosted.org/packages/50/a6/4282772fd016a76d3e5c0df58380a5ea64900afd836cec2c2f662d1b9bb3/propcache-0.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4d3df5fa7e36b3225954fba85589da77a0fe6a53e3976de39caf04a0db4c36f1", size = 199398, upload-time = "2025-10-08T19:47:17.962Z" }, + { url = "https://files.pythonhosted.org/packages/3e/ec/d8a7cd406ee1ddb705db2139f8a10a8a427100347bd698e7014351c7af09/propcache-0.4.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:ee17f18d2498f2673e432faaa71698032b0127ebf23ae5974eeaf806c279df24", size = 196920, upload-time = "2025-10-08T19:47:19.355Z" }, + { url = "https://files.pythonhosted.org/packages/f6/6c/f38ab64af3764f431e359f8baf9e0a21013e24329e8b85d2da32e8ed07ca/propcache-0.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:580e97762b950f993ae618e167e7be9256b8353c2dcd8b99ec100eb50f5286aa", size = 203748, upload-time = "2025-10-08T19:47:21.338Z" }, + { url = "https://files.pythonhosted.org/packages/d6/e3/fa846bd70f6534d647886621388f0a265254d30e3ce47e5c8e6e27dbf153/propcache-0.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:501d20b891688eb8e7aa903021f0b72d5a55db40ffaab27edefd1027caaafa61", size = 205877, upload-time = "2025-10-08T19:47:23.059Z" }, + { url = "https://files.pythonhosted.org/packages/e2/39/8163fc6f3133fea7b5f2827e8eba2029a0277ab2c5beee6c1db7b10fc23d/propcache-0.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a0bd56e5b100aef69bd8562b74b46254e7c8812918d3baa700c8a8009b0af66", size = 199437, upload-time = "2025-10-08T19:47:24.445Z" }, + { url = "https://files.pythonhosted.org/packages/93/89/caa9089970ca49c7c01662bd0eeedfe85494e863e8043565aeb6472ce8fe/propcache-0.4.1-cp313-cp313-win32.whl", hash = "sha256:bcc9aaa5d80322bc2fb24bb7accb4a30f81e90ab8d6ba187aec0744bc302ad81", size = 37586, upload-time = "2025-10-08T19:47:25.736Z" }, + { url = "https://files.pythonhosted.org/packages/f5/ab/f76ec3c3627c883215b5c8080debb4394ef5a7a29be811f786415fc1e6fd/propcache-0.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:381914df18634f5494334d201e98245c0596067504b9372d8cf93f4bb23e025e", size = 40790, upload-time = "2025-10-08T19:47:26.847Z" }, + { url = "https://files.pythonhosted.org/packages/59/1b/e71ae98235f8e2ba5004d8cb19765a74877abf189bc53fc0c80d799e56c3/propcache-0.4.1-cp313-cp313-win_arm64.whl", hash = "sha256:8873eb4460fd55333ea49b7d189749ecf6e55bf85080f11b1c4530ed3034cba1", size = 37158, upload-time = "2025-10-08T19:47:27.961Z" }, + { url = "https://files.pythonhosted.org/packages/83/ce/a31bbdfc24ee0dcbba458c8175ed26089cf109a55bbe7b7640ed2470cfe9/propcache-0.4.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:92d1935ee1f8d7442da9c0c4fa7ac20d07e94064184811b685f5c4fada64553b", size = 81451, upload-time = "2025-10-08T19:47:29.445Z" }, + { url = "https://files.pythonhosted.org/packages/25/9c/442a45a470a68456e710d96cacd3573ef26a1d0a60067e6a7d5e655621ed/propcache-0.4.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:473c61b39e1460d386479b9b2f337da492042447c9b685f28be4f74d3529e566", size = 46374, upload-time = "2025-10-08T19:47:30.579Z" }, + { url = "https://files.pythonhosted.org/packages/f4/bf/b1d5e21dbc3b2e889ea4327044fb16312a736d97640fb8b6aa3f9c7b3b65/propcache-0.4.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c0ef0aaafc66fbd87842a3fe3902fd889825646bc21149eafe47be6072725835", size = 48396, upload-time = "2025-10-08T19:47:31.79Z" }, + { url = "https://files.pythonhosted.org/packages/f4/04/5b4c54a103d480e978d3c8a76073502b18db0c4bc17ab91b3cb5092ad949/propcache-0.4.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95393b4d66bfae908c3ca8d169d5f79cd65636ae15b5e7a4f6e67af675adb0e", size = 275950, upload-time = "2025-10-08T19:47:33.481Z" }, + { url = "https://files.pythonhosted.org/packages/b4/c1/86f846827fb969c4b78b0af79bba1d1ea2156492e1b83dea8b8a6ae27395/propcache-0.4.1-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c07fda85708bc48578467e85099645167a955ba093be0a2dcba962195676e859", size = 273856, upload-time = "2025-10-08T19:47:34.906Z" }, + { url = "https://files.pythonhosted.org/packages/36/1d/fc272a63c8d3bbad6878c336c7a7dea15e8f2d23a544bda43205dfa83ada/propcache-0.4.1-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:af223b406d6d000830c6f65f1e6431783fc3f713ba3e6cc8c024d5ee96170a4b", size = 280420, upload-time = "2025-10-08T19:47:36.338Z" }, + { url = "https://files.pythonhosted.org/packages/07/0c/01f2219d39f7e53d52e5173bcb09c976609ba30209912a0680adfb8c593a/propcache-0.4.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a78372c932c90ee474559c5ddfffd718238e8673c340dc21fe45c5b8b54559a0", size = 263254, upload-time = "2025-10-08T19:47:37.692Z" }, + { url = "https://files.pythonhosted.org/packages/2d/18/cd28081658ce597898f0c4d174d4d0f3c5b6d4dc27ffafeef835c95eb359/propcache-0.4.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:564d9f0d4d9509e1a870c920a89b2fec951b44bf5ba7d537a9e7c1ccec2c18af", size = 261205, upload-time = "2025-10-08T19:47:39.659Z" }, + { url = "https://files.pythonhosted.org/packages/7a/71/1f9e22eb8b8316701c2a19fa1f388c8a3185082607da8e406a803c9b954e/propcache-0.4.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:17612831fda0138059cc5546f4d12a2aacfb9e47068c06af35c400ba58ba7393", size = 247873, upload-time = "2025-10-08T19:47:41.084Z" }, + { url = "https://files.pythonhosted.org/packages/4a/65/3d4b61f36af2b4eddba9def857959f1016a51066b4f1ce348e0cf7881f58/propcache-0.4.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:41a89040cb10bd345b3c1a873b2bf36413d48da1def52f268a055f7398514874", size = 262739, upload-time = "2025-10-08T19:47:42.51Z" }, + { url = "https://files.pythonhosted.org/packages/2a/42/26746ab087faa77c1c68079b228810436ccd9a5ce9ac85e2b7307195fd06/propcache-0.4.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e35b88984e7fa64aacecea39236cee32dd9bd8c55f57ba8a75cf2399553f9bd7", size = 263514, upload-time = "2025-10-08T19:47:43.927Z" }, + { url = "https://files.pythonhosted.org/packages/94/13/630690fe201f5502d2403dd3cfd451ed8858fe3c738ee88d095ad2ff407b/propcache-0.4.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f8b465489f927b0df505cbe26ffbeed4d6d8a2bbc61ce90eb074ff129ef0ab1", size = 257781, upload-time = "2025-10-08T19:47:45.448Z" }, + { url = "https://files.pythonhosted.org/packages/92/f7/1d4ec5841505f423469efbfc381d64b7b467438cd5a4bbcbb063f3b73d27/propcache-0.4.1-cp313-cp313t-win32.whl", hash = "sha256:2ad890caa1d928c7c2965b48f3a3815c853180831d0e5503d35cf00c472f4717", size = 41396, upload-time = "2025-10-08T19:47:47.202Z" }, + { url = "https://files.pythonhosted.org/packages/48/f0/615c30622316496d2cbbc29f5985f7777d3ada70f23370608c1d3e081c1f/propcache-0.4.1-cp313-cp313t-win_amd64.whl", hash = "sha256:f7ee0e597f495cf415bcbd3da3caa3bd7e816b74d0d52b8145954c5e6fd3ff37", size = 44897, upload-time = "2025-10-08T19:47:48.336Z" }, + { url = "https://files.pythonhosted.org/packages/fd/ca/6002e46eccbe0e33dcd4069ef32f7f1c9e243736e07adca37ae8c4830ec3/propcache-0.4.1-cp313-cp313t-win_arm64.whl", hash = "sha256:929d7cbe1f01bb7baffb33dc14eb5691c95831450a26354cd210a8155170c93a", size = 39789, upload-time = "2025-10-08T19:47:49.876Z" }, + { url = "https://files.pythonhosted.org/packages/8e/5c/bca52d654a896f831b8256683457ceddd490ec18d9ec50e97dfd8fc726a8/propcache-0.4.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3f7124c9d820ba5548d431afb4632301acf965db49e666aa21c305cbe8c6de12", size = 78152, upload-time = "2025-10-08T19:47:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/65/9b/03b04e7d82a5f54fb16113d839f5ea1ede58a61e90edf515f6577c66fa8f/propcache-0.4.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:c0d4b719b7da33599dfe3b22d3db1ef789210a0597bc650b7cee9c77c2be8c5c", size = 44869, upload-time = "2025-10-08T19:47:52.594Z" }, + { url = "https://files.pythonhosted.org/packages/b2/fa/89a8ef0468d5833a23fff277b143d0573897cf75bd56670a6d28126c7d68/propcache-0.4.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9f302f4783709a78240ebc311b793f123328716a60911d667e0c036bc5dcbded", size = 46596, upload-time = "2025-10-08T19:47:54.073Z" }, + { url = "https://files.pythonhosted.org/packages/86/bd/47816020d337f4a746edc42fe8d53669965138f39ee117414c7d7a340cfe/propcache-0.4.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c80ee5802e3fb9ea37938e7eecc307fb984837091d5fd262bb37238b1ae97641", size = 206981, upload-time = "2025-10-08T19:47:55.715Z" }, + { url = "https://files.pythonhosted.org/packages/df/f6/c5fa1357cc9748510ee55f37173eb31bfde6d94e98ccd9e6f033f2fc06e1/propcache-0.4.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ed5a841e8bb29a55fb8159ed526b26adc5bdd7e8bd7bf793ce647cb08656cdf4", size = 211490, upload-time = "2025-10-08T19:47:57.499Z" }, + { url = "https://files.pythonhosted.org/packages/80/1e/e5889652a7c4a3846683401a48f0f2e5083ce0ec1a8a5221d8058fbd1adf/propcache-0.4.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:55c72fd6ea2da4c318e74ffdf93c4fe4e926051133657459131a95c846d16d44", size = 215371, upload-time = "2025-10-08T19:47:59.317Z" }, + { url = "https://files.pythonhosted.org/packages/b2/f2/889ad4b2408f72fe1a4f6a19491177b30ea7bf1a0fd5f17050ca08cfc882/propcache-0.4.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8326e144341460402713f91df60ade3c999d601e7eb5ff8f6f7862d54de0610d", size = 201424, upload-time = "2025-10-08T19:48:00.67Z" }, + { url = "https://files.pythonhosted.org/packages/27/73/033d63069b57b0812c8bd19f311faebeceb6ba31b8f32b73432d12a0b826/propcache-0.4.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:060b16ae65bc098da7f6d25bf359f1f31f688384858204fe5d652979e0015e5b", size = 197566, upload-time = "2025-10-08T19:48:02.604Z" }, + { url = "https://files.pythonhosted.org/packages/dc/89/ce24f3dc182630b4e07aa6d15f0ff4b14ed4b9955fae95a0b54c58d66c05/propcache-0.4.1-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:89eb3fa9524f7bec9de6e83cf3faed9d79bffa560672c118a96a171a6f55831e", size = 193130, upload-time = "2025-10-08T19:48:04.499Z" }, + { url = "https://files.pythonhosted.org/packages/a9/24/ef0d5fd1a811fb5c609278d0209c9f10c35f20581fcc16f818da959fc5b4/propcache-0.4.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:dee69d7015dc235f526fe80a9c90d65eb0039103fe565776250881731f06349f", size = 202625, upload-time = "2025-10-08T19:48:06.213Z" }, + { url = "https://files.pythonhosted.org/packages/f5/02/98ec20ff5546f68d673df2f7a69e8c0d076b5abd05ca882dc7ee3a83653d/propcache-0.4.1-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5558992a00dfd54ccbc64a32726a3357ec93825a418a401f5cc67df0ac5d9e49", size = 204209, upload-time = "2025-10-08T19:48:08.432Z" }, + { url = "https://files.pythonhosted.org/packages/a0/87/492694f76759b15f0467a2a93ab68d32859672b646aa8a04ce4864e7932d/propcache-0.4.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c9b822a577f560fbd9554812526831712c1436d2c046cedee4c3796d3543b144", size = 197797, upload-time = "2025-10-08T19:48:09.968Z" }, + { url = "https://files.pythonhosted.org/packages/ee/36/66367de3575db1d2d3f3d177432bd14ee577a39d3f5d1b3d5df8afe3b6e2/propcache-0.4.1-cp314-cp314-win32.whl", hash = "sha256:ab4c29b49d560fe48b696cdcb127dd36e0bc2472548f3bf56cc5cb3da2b2984f", size = 38140, upload-time = "2025-10-08T19:48:11.232Z" }, + { url = "https://files.pythonhosted.org/packages/0c/2a/a758b47de253636e1b8aef181c0b4f4f204bf0dd964914fb2af90a95b49b/propcache-0.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:5a103c3eb905fcea0ab98be99c3a9a5ab2de60228aa5aceedc614c0281cf6153", size = 41257, upload-time = "2025-10-08T19:48:12.707Z" }, + { url = "https://files.pythonhosted.org/packages/34/5e/63bd5896c3fec12edcbd6f12508d4890d23c265df28c74b175e1ef9f4f3b/propcache-0.4.1-cp314-cp314-win_arm64.whl", hash = "sha256:74c1fb26515153e482e00177a1ad654721bf9207da8a494a0c05e797ad27b992", size = 38097, upload-time = "2025-10-08T19:48:13.923Z" }, + { url = "https://files.pythonhosted.org/packages/99/85/9ff785d787ccf9bbb3f3106f79884a130951436f58392000231b4c737c80/propcache-0.4.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:824e908bce90fb2743bd6b59db36eb4f45cd350a39637c9f73b1c1ea66f5b75f", size = 81455, upload-time = "2025-10-08T19:48:15.16Z" }, + { url = "https://files.pythonhosted.org/packages/90/85/2431c10c8e7ddb1445c1f7c4b54d886e8ad20e3c6307e7218f05922cad67/propcache-0.4.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c2b5e7db5328427c57c8e8831abda175421b709672f6cfc3d630c3b7e2146393", size = 46372, upload-time = "2025-10-08T19:48:16.424Z" }, + { url = "https://files.pythonhosted.org/packages/01/20/b0972d902472da9bcb683fa595099911f4d2e86e5683bcc45de60dd05dc3/propcache-0.4.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6f6ff873ed40292cd4969ef5310179afd5db59fdf055897e282485043fc80ad0", size = 48411, upload-time = "2025-10-08T19:48:17.577Z" }, + { url = "https://files.pythonhosted.org/packages/e2/e3/7dc89f4f21e8f99bad3d5ddb3a3389afcf9da4ac69e3deb2dcdc96e74169/propcache-0.4.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:49a2dc67c154db2c1463013594c458881a069fcf98940e61a0569016a583020a", size = 275712, upload-time = "2025-10-08T19:48:18.901Z" }, + { url = "https://files.pythonhosted.org/packages/20/67/89800c8352489b21a8047c773067644e3897f02ecbbd610f4d46b7f08612/propcache-0.4.1-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:005f08e6a0529984491e37d8dbc3dd86f84bd78a8ceb5fa9a021f4c48d4984be", size = 273557, upload-time = "2025-10-08T19:48:20.762Z" }, + { url = "https://files.pythonhosted.org/packages/e2/a1/b52b055c766a54ce6d9c16d9aca0cad8059acd9637cdf8aa0222f4a026ef/propcache-0.4.1-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5c3310452e0d31390da9035c348633b43d7e7feb2e37be252be6da45abd1abcc", size = 280015, upload-time = "2025-10-08T19:48:22.592Z" }, + { url = "https://files.pythonhosted.org/packages/48/c8/33cee30bd890672c63743049f3c9e4be087e6780906bfc3ec58528be59c1/propcache-0.4.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c3c70630930447f9ef1caac7728c8ad1c56bc5015338b20fed0d08ea2480b3a", size = 262880, upload-time = "2025-10-08T19:48:23.947Z" }, + { url = "https://files.pythonhosted.org/packages/0c/b1/8f08a143b204b418285c88b83d00edbd61afbc2c6415ffafc8905da7038b/propcache-0.4.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8e57061305815dfc910a3634dcf584f08168a8836e6999983569f51a8544cd89", size = 260938, upload-time = "2025-10-08T19:48:25.656Z" }, + { url = "https://files.pythonhosted.org/packages/cf/12/96e4664c82ca2f31e1c8dff86afb867348979eb78d3cb8546a680287a1e9/propcache-0.4.1-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:521a463429ef54143092c11a77e04056dd00636f72e8c45b70aaa3140d639726", size = 247641, upload-time = "2025-10-08T19:48:27.207Z" }, + { url = "https://files.pythonhosted.org/packages/18/ed/e7a9cfca28133386ba52278136d42209d3125db08d0a6395f0cba0c0285c/propcache-0.4.1-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:120c964da3fdc75e3731aa392527136d4ad35868cc556fd09bb6d09172d9a367", size = 262510, upload-time = "2025-10-08T19:48:28.65Z" }, + { url = "https://files.pythonhosted.org/packages/f5/76/16d8bf65e8845dd62b4e2b57444ab81f07f40caa5652b8969b87ddcf2ef6/propcache-0.4.1-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:d8f353eb14ee3441ee844ade4277d560cdd68288838673273b978e3d6d2c8f36", size = 263161, upload-time = "2025-10-08T19:48:30.133Z" }, + { url = "https://files.pythonhosted.org/packages/e7/70/c99e9edb5d91d5ad8a49fa3c1e8285ba64f1476782fed10ab251ff413ba1/propcache-0.4.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ab2943be7c652f09638800905ee1bab2c544e537edb57d527997a24c13dc1455", size = 257393, upload-time = "2025-10-08T19:48:31.567Z" }, + { url = "https://files.pythonhosted.org/packages/08/02/87b25304249a35c0915d236575bc3574a323f60b47939a2262b77632a3ee/propcache-0.4.1-cp314-cp314t-win32.whl", hash = "sha256:05674a162469f31358c30bcaa8883cb7829fa3110bf9c0991fe27d7896c42d85", size = 42546, upload-time = "2025-10-08T19:48:32.872Z" }, + { url = "https://files.pythonhosted.org/packages/cb/ef/3c6ecf8b317aa982f309835e8f96987466123c6e596646d4e6a1dfcd080f/propcache-0.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:990f6b3e2a27d683cb7602ed6c86f15ee6b43b1194736f9baaeb93d0016633b1", size = 46259, upload-time = "2025-10-08T19:48:34.226Z" }, + { url = "https://files.pythonhosted.org/packages/c4/2d/346e946d4951f37eca1e4f55be0f0174c52cd70720f84029b02f296f4a38/propcache-0.4.1-cp314-cp314t-win_arm64.whl", hash = "sha256:ecef2343af4cc68e05131e45024ba34f6095821988a9d0a02aa7c73fcc448aa9", size = 40428, upload-time = "2025-10-08T19:48:35.441Z" }, + { url = "https://files.pythonhosted.org/packages/5b/5a/bc7b4a4ef808fa59a816c17b20c4bef6884daebbdf627ff2a161da67da19/propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237", size = 13305, upload-time = "2025-10-08T19:49:00.792Z" }, +] + +[[package]] +name = "protobuf" +version = "5.29.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/29/d09e70352e4e88c9c7a198d5645d7277811448d76c23b00345670f7c8a38/protobuf-5.29.5.tar.gz", hash = "sha256:bc1463bafd4b0929216c35f437a8e28731a2b7fe3d98bb77a600efced5a15c84", size = 425226, upload-time = "2025-05-28T23:51:59.82Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/11/6e40e9fc5bba02988a214c07cf324595789ca7820160bfd1f8be96e48539/protobuf-5.29.5-cp310-abi3-win32.whl", hash = "sha256:3f1c6468a2cfd102ff4703976138844f78ebd1fb45f49011afc5139e9e283079", size = 422963, upload-time = "2025-05-28T23:51:41.204Z" }, + { url = "https://files.pythonhosted.org/packages/81/7f/73cefb093e1a2a7c3ffd839e6f9fcafb7a427d300c7f8aef9c64405d8ac6/protobuf-5.29.5-cp310-abi3-win_amd64.whl", hash = "sha256:3f76e3a3675b4a4d867b52e4a5f5b78a2ef9565549d4037e06cf7b0942b1d3fc", size = 434818, upload-time = "2025-05-28T23:51:44.297Z" }, + { url = "https://files.pythonhosted.org/packages/dd/73/10e1661c21f139f2c6ad9b23040ff36fee624310dc28fba20d33fdae124c/protobuf-5.29.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e38c5add5a311f2a6eb0340716ef9b039c1dfa428b28f25a7838ac329204a671", size = 418091, upload-time = "2025-05-28T23:51:45.907Z" }, + { url = "https://files.pythonhosted.org/packages/6c/04/98f6f8cf5b07ab1294c13f34b4e69b3722bb609c5b701d6c169828f9f8aa/protobuf-5.29.5-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:fa18533a299d7ab6c55a238bf8629311439995f2e7eca5caaff08663606e9015", size = 319824, upload-time = "2025-05-28T23:51:47.545Z" }, + { url = "https://files.pythonhosted.org/packages/85/e4/07c80521879c2d15f321465ac24c70efe2381378c00bf5e56a0f4fbac8cd/protobuf-5.29.5-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:63848923da3325e1bf7e9003d680ce6e14b07e55d0473253a690c3a8b8fd6e61", size = 319942, upload-time = "2025-05-28T23:51:49.11Z" }, + { url = "https://files.pythonhosted.org/packages/7e/cc/7e77861000a0691aeea8f4566e5d3aa716f2b1dece4a24439437e41d3d25/protobuf-5.29.5-py3-none-any.whl", hash = "sha256:6cf42630262c59b2d8de33954443d94b746c952b01434fc58a417fdbd2e84bd5", size = 172823, upload-time = "2025-05-28T23:51:58.157Z" }, +] + +[[package]] +name = "psutil" +version = "7.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/31/4723d756b59344b643542936e37a31d1d3204bcdc42a7daa8ee9eb06fb50/psutil-7.1.0.tar.gz", hash = "sha256:655708b3c069387c8b77b072fc429a57d0e214221d01c0a772df7dfedcb3bcd2", size = 497660, upload-time = "2025-09-17T20:14:52.902Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/62/ce4051019ee20ce0ed74432dd73a5bb087a6704284a470bb8adff69a0932/psutil-7.1.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:76168cef4397494250e9f4e73eb3752b146de1dd950040b29186d0cce1d5ca13", size = 245242, upload-time = "2025-09-17T20:14:56.126Z" }, + { url = "https://files.pythonhosted.org/packages/38/61/f76959fba841bf5b61123fbf4b650886dc4094c6858008b5bf73d9057216/psutil-7.1.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:5d007560c8c372efdff9e4579c2846d71de737e4605f611437255e81efcca2c5", size = 246682, upload-time = "2025-09-17T20:14:58.25Z" }, + { url = "https://files.pythonhosted.org/packages/88/7a/37c99d2e77ec30d63398ffa6a660450b8a62517cabe44b3e9bae97696e8d/psutil-7.1.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22e4454970b32472ce7deaa45d045b34d3648ce478e26a04c7e858a0a6e75ff3", size = 287994, upload-time = "2025-09-17T20:14:59.901Z" }, + { url = "https://files.pythonhosted.org/packages/9d/de/04c8c61232f7244aa0a4b9a9fbd63a89d5aeaf94b2fc9d1d16e2faa5cbb0/psutil-7.1.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c70e113920d51e89f212dd7be06219a9b88014e63a4cec69b684c327bc474e3", size = 291163, upload-time = "2025-09-17T20:15:01.481Z" }, + { url = "https://files.pythonhosted.org/packages/f4/58/c4f976234bf6d4737bc8c02a81192f045c307b72cf39c9e5c5a2d78927f6/psutil-7.1.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d4a113425c037300de3ac8b331637293da9be9713855c4fc9d2d97436d7259d", size = 293625, upload-time = "2025-09-17T20:15:04.492Z" }, + { url = "https://files.pythonhosted.org/packages/79/87/157c8e7959ec39ced1b11cc93c730c4fb7f9d408569a6c59dbd92ceb35db/psutil-7.1.0-cp37-abi3-win32.whl", hash = "sha256:09ad740870c8d219ed8daae0ad3b726d3bf9a028a198e7f3080f6a1888b99bca", size = 244812, upload-time = "2025-09-17T20:15:07.462Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e9/b44c4f697276a7a95b8e94d0e320a7bf7f3318521b23de69035540b39838/psutil-7.1.0-cp37-abi3-win_amd64.whl", hash = "sha256:57f5e987c36d3146c0dd2528cd42151cf96cd359b9d67cfff836995cc5df9a3d", size = 247965, upload-time = "2025-09-17T20:15:09.673Z" }, + { url = "https://files.pythonhosted.org/packages/26/65/1070a6e3c036f39142c2820c4b52e9243246fcfc3f96239ac84472ba361e/psutil-7.1.0-cp37-abi3-win_arm64.whl", hash = "sha256:6937cb68133e7c97b6cc9649a570c9a18ba0efebed46d8c5dae4c07fa1b67a07", size = 244971, upload-time = "2025-09-17T20:15:12.262Z" }, +] + +[[package]] +name = "pycparser" +version = "2.23" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, +] + +[[package]] +name = "pydantic" +version = "2.12.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8d/35/d319ed522433215526689bad428a94058b6dd12190ce7ddd78618ac14b28/pydantic-2.12.2.tar.gz", hash = "sha256:7b8fa15b831a4bbde9d5b84028641ac3080a4ca2cbd4a621a661687e741624fd", size = 816358, upload-time = "2025-10-14T15:02:21.842Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6c/98/468cb649f208a6f1279448e6e5247b37ae79cf5e4041186f1e2ef3d16345/pydantic-2.12.2-py3-none-any.whl", hash = "sha256:25ff718ee909acd82f1ff9b1a4acfd781bb23ab3739adaa7144f19a6a4e231ae", size = 460628, upload-time = "2025-10-14T15:02:19.623Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/18/d0944e8eaaa3efd0a91b0f1fc537d3be55ad35091b6a87638211ba691964/pydantic_core-2.41.4.tar.gz", hash = "sha256:70e47929a9d4a1905a67e4b687d5946026390568a8e952b92824118063cee4d5", size = 457557, upload-time = "2025-10-14T10:23:47.909Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/d0/c20adabd181a029a970738dfe23710b52a31f1258f591874fcdec7359845/pydantic_core-2.41.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:85e050ad9e5f6fe1004eec65c914332e52f429bc0ae12d6fa2092407a462c746", size = 2105688, upload-time = "2025-10-14T10:20:54.448Z" }, + { url = "https://files.pythonhosted.org/packages/00/b6/0ce5c03cec5ae94cca220dfecddc453c077d71363b98a4bbdb3c0b22c783/pydantic_core-2.41.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7393f1d64792763a48924ba31d1e44c2cfbc05e3b1c2c9abb4ceeadd912cced", size = 1910807, upload-time = "2025-10-14T10:20:56.115Z" }, + { url = "https://files.pythonhosted.org/packages/68/3e/800d3d02c8beb0b5c069c870cbb83799d085debf43499c897bb4b4aaff0d/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94dab0940b0d1fb28bcab847adf887c66a27a40291eedf0b473be58761c9799a", size = 1956669, upload-time = "2025-10-14T10:20:57.874Z" }, + { url = "https://files.pythonhosted.org/packages/60/a4/24271cc71a17f64589be49ab8bd0751f6a0a03046c690df60989f2f95c2c/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de7c42f897e689ee6f9e93c4bec72b99ae3b32a2ade1c7e4798e690ff5246e02", size = 2051629, upload-time = "2025-10-14T10:21:00.006Z" }, + { url = "https://files.pythonhosted.org/packages/68/de/45af3ca2f175d91b96bfb62e1f2d2f1f9f3b14a734afe0bfeff079f78181/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:664b3199193262277b8b3cd1e754fb07f2c6023289c815a1e1e8fb415cb247b1", size = 2224049, upload-time = "2025-10-14T10:21:01.801Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/ae4e1ff84672bf869d0a77af24fd78387850e9497753c432875066b5d622/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95b253b88f7d308b1c0b417c4624f44553ba4762816f94e6986819b9c273fb2", size = 2342409, upload-time = "2025-10-14T10:21:03.556Z" }, + { url = "https://files.pythonhosted.org/packages/18/62/273dd70b0026a085c7b74b000394e1ef95719ea579c76ea2f0cc8893736d/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1351f5bbdbbabc689727cb91649a00cb9ee7203e0a6e54e9f5ba9e22e384b84", size = 2069635, upload-time = "2025-10-14T10:21:05.385Z" }, + { url = "https://files.pythonhosted.org/packages/30/03/cf485fff699b4cdaea469bc481719d3e49f023241b4abb656f8d422189fc/pydantic_core-2.41.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1affa4798520b148d7182da0615d648e752de4ab1a9566b7471bc803d88a062d", size = 2194284, upload-time = "2025-10-14T10:21:07.122Z" }, + { url = "https://files.pythonhosted.org/packages/f9/7e/c8e713db32405dfd97211f2fc0a15d6bf8adb7640f3d18544c1f39526619/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7b74e18052fea4aa8dea2fb7dbc23d15439695da6cbe6cfc1b694af1115df09d", size = 2137566, upload-time = "2025-10-14T10:21:08.981Z" }, + { url = "https://files.pythonhosted.org/packages/04/f7/db71fd4cdccc8b75990f79ccafbbd66757e19f6d5ee724a6252414483fb4/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:285b643d75c0e30abda9dc1077395624f314a37e3c09ca402d4015ef5979f1a2", size = 2316809, upload-time = "2025-10-14T10:21:10.805Z" }, + { url = "https://files.pythonhosted.org/packages/76/63/a54973ddb945f1bca56742b48b144d85c9fc22f819ddeb9f861c249d5464/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f52679ff4218d713b3b33f88c89ccbf3a5c2c12ba665fb80ccc4192b4608dbab", size = 2311119, upload-time = "2025-10-14T10:21:12.583Z" }, + { url = "https://files.pythonhosted.org/packages/f8/03/5d12891e93c19218af74843a27e32b94922195ded2386f7b55382f904d2f/pydantic_core-2.41.4-cp313-cp313-win32.whl", hash = "sha256:ecde6dedd6fff127c273c76821bb754d793be1024bc33314a120f83a3c69460c", size = 1981398, upload-time = "2025-10-14T10:21:14.584Z" }, + { url = "https://files.pythonhosted.org/packages/be/d8/fd0de71f39db91135b7a26996160de71c073d8635edfce8b3c3681be0d6d/pydantic_core-2.41.4-cp313-cp313-win_amd64.whl", hash = "sha256:d081a1f3800f05409ed868ebb2d74ac39dd0c1ff6c035b5162356d76030736d4", size = 2030735, upload-time = "2025-10-14T10:21:16.432Z" }, + { url = "https://files.pythonhosted.org/packages/72/86/c99921c1cf6650023c08bfab6fe2d7057a5142628ef7ccfa9921f2dda1d5/pydantic_core-2.41.4-cp313-cp313-win_arm64.whl", hash = "sha256:f8e49c9c364a7edcbe2a310f12733aad95b022495ef2a8d653f645e5d20c1564", size = 1973209, upload-time = "2025-10-14T10:21:18.213Z" }, + { url = "https://files.pythonhosted.org/packages/36/0d/b5706cacb70a8414396efdda3d72ae0542e050b591119e458e2490baf035/pydantic_core-2.41.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ed97fd56a561f5eb5706cebe94f1ad7c13b84d98312a05546f2ad036bafe87f4", size = 1877324, upload-time = "2025-10-14T10:21:20.363Z" }, + { url = "https://files.pythonhosted.org/packages/de/2d/cba1fa02cfdea72dfb3a9babb067c83b9dff0bbcb198368e000a6b756ea7/pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a870c307bf1ee91fc58a9a61338ff780d01bfae45922624816878dce784095d2", size = 1884515, upload-time = "2025-10-14T10:21:22.339Z" }, + { url = "https://files.pythonhosted.org/packages/07/ea/3df927c4384ed9b503c9cc2d076cf983b4f2adb0c754578dfb1245c51e46/pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25e97bc1f5f8f7985bdc2335ef9e73843bb561eb1fa6831fdfc295c1c2061cf", size = 2042819, upload-time = "2025-10-14T10:21:26.683Z" }, + { url = "https://files.pythonhosted.org/packages/6a/ee/df8e871f07074250270a3b1b82aad4cd0026b588acd5d7d3eb2fcb1471a3/pydantic_core-2.41.4-cp313-cp313t-win_amd64.whl", hash = "sha256:d405d14bea042f166512add3091c1af40437c2e7f86988f3915fabd27b1e9cd2", size = 1995866, upload-time = "2025-10-14T10:21:28.951Z" }, + { url = "https://files.pythonhosted.org/packages/fc/de/b20f4ab954d6d399499c33ec4fafc46d9551e11dc1858fb7f5dca0748ceb/pydantic_core-2.41.4-cp313-cp313t-win_arm64.whl", hash = "sha256:19f3684868309db5263a11bace3c45d93f6f24afa2ffe75a647583df22a2ff89", size = 1970034, upload-time = "2025-10-14T10:21:30.869Z" }, + { url = "https://files.pythonhosted.org/packages/54/28/d3325da57d413b9819365546eb9a6e8b7cbd9373d9380efd5f74326143e6/pydantic_core-2.41.4-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:e9205d97ed08a82ebb9a307e92914bb30e18cdf6f6b12ca4bedadb1588a0bfe1", size = 2102022, upload-time = "2025-10-14T10:21:32.809Z" }, + { url = "https://files.pythonhosted.org/packages/9e/24/b58a1bc0d834bf1acc4361e61233ee217169a42efbdc15a60296e13ce438/pydantic_core-2.41.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:82df1f432b37d832709fbcc0e24394bba04a01b6ecf1ee87578145c19cde12ac", size = 1905495, upload-time = "2025-10-14T10:21:34.812Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a4/71f759cc41b7043e8ecdaab81b985a9b6cad7cec077e0b92cff8b71ecf6b/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3b4cc4539e055cfa39a3763c939f9d409eb40e85813257dcd761985a108554", size = 1956131, upload-time = "2025-10-14T10:21:36.924Z" }, + { url = "https://files.pythonhosted.org/packages/b0/64/1e79ac7aa51f1eec7c4cda8cbe456d5d09f05fdd68b32776d72168d54275/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b1eb1754fce47c63d2ff57fdb88c351a6c0150995890088b33767a10218eaa4e", size = 2052236, upload-time = "2025-10-14T10:21:38.927Z" }, + { url = "https://files.pythonhosted.org/packages/e9/e3/a3ffc363bd4287b80f1d43dc1c28ba64831f8dfc237d6fec8f2661138d48/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6ab5ab30ef325b443f379ddb575a34969c333004fca5a1daa0133a6ffaad616", size = 2223573, upload-time = "2025-10-14T10:21:41.574Z" }, + { url = "https://files.pythonhosted.org/packages/28/27/78814089b4d2e684a9088ede3790763c64693c3d1408ddc0a248bc789126/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31a41030b1d9ca497634092b46481b937ff9397a86f9f51bd41c4767b6fc04af", size = 2342467, upload-time = "2025-10-14T10:21:44.018Z" }, + { url = "https://files.pythonhosted.org/packages/92/97/4de0e2a1159cb85ad737e03306717637842c88c7fd6d97973172fb183149/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a44ac1738591472c3d020f61c6df1e4015180d6262ebd39bf2aeb52571b60f12", size = 2063754, upload-time = "2025-10-14T10:21:46.466Z" }, + { url = "https://files.pythonhosted.org/packages/0f/50/8cb90ce4b9efcf7ae78130afeb99fd1c86125ccdf9906ef64b9d42f37c25/pydantic_core-2.41.4-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d72f2b5e6e82ab8f94ea7d0d42f83c487dc159c5240d8f83beae684472864e2d", size = 2196754, upload-time = "2025-10-14T10:21:48.486Z" }, + { url = "https://files.pythonhosted.org/packages/34/3b/ccdc77af9cd5082723574a1cc1bcae7a6acacc829d7c0a06201f7886a109/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c4d1e854aaf044487d31143f541f7aafe7b482ae72a022c664b2de2e466ed0ad", size = 2137115, upload-time = "2025-10-14T10:21:50.63Z" }, + { url = "https://files.pythonhosted.org/packages/ca/ba/e7c7a02651a8f7c52dc2cff2b64a30c313e3b57c7d93703cecea76c09b71/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:b568af94267729d76e6ee5ececda4e283d07bbb28e8148bb17adad93d025d25a", size = 2317400, upload-time = "2025-10-14T10:21:52.959Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ba/6c533a4ee8aec6b812c643c49bb3bd88d3f01e3cebe451bb85512d37f00f/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:6d55fb8b1e8929b341cc313a81a26e0d48aa3b519c1dbaadec3a6a2b4fcad025", size = 2312070, upload-time = "2025-10-14T10:21:55.419Z" }, + { url = "https://files.pythonhosted.org/packages/22/ae/f10524fcc0ab8d7f96cf9a74c880243576fd3e72bd8ce4f81e43d22bcab7/pydantic_core-2.41.4-cp314-cp314-win32.whl", hash = "sha256:5b66584e549e2e32a1398df11da2e0a7eff45d5c2d9db9d5667c5e6ac764d77e", size = 1982277, upload-time = "2025-10-14T10:21:57.474Z" }, + { url = "https://files.pythonhosted.org/packages/b4/dc/e5aa27aea1ad4638f0c3fb41132f7eb583bd7420ee63204e2d4333a3bbf9/pydantic_core-2.41.4-cp314-cp314-win_amd64.whl", hash = "sha256:557a0aab88664cc552285316809cab897716a372afaf8efdbef756f8b890e894", size = 2024608, upload-time = "2025-10-14T10:21:59.557Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/51d89cc2612bd147198e120a13f150afbf0bcb4615cddb049ab10b81b79e/pydantic_core-2.41.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f1ea6f48a045745d0d9f325989d8abd3f1eaf47dd00485912d1a3a63c623a8d", size = 1967614, upload-time = "2025-10-14T10:22:01.847Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c2/472f2e31b95eff099961fa050c376ab7156a81da194f9edb9f710f68787b/pydantic_core-2.41.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6c1fe4c5404c448b13188dd8bd2ebc2bdd7e6727fa61ff481bcc2cca894018da", size = 1876904, upload-time = "2025-10-14T10:22:04.062Z" }, + { url = "https://files.pythonhosted.org/packages/4a/07/ea8eeb91173807ecdae4f4a5f4b150a520085b35454350fc219ba79e66a3/pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:523e7da4d43b113bf8e7b49fa4ec0c35bf4fe66b2230bfc5c13cc498f12c6c3e", size = 1882538, upload-time = "2025-10-14T10:22:06.39Z" }, + { url = "https://files.pythonhosted.org/packages/1e/29/b53a9ca6cd366bfc928823679c6a76c7a4c69f8201c0ba7903ad18ebae2f/pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5729225de81fb65b70fdb1907fcf08c75d498f4a6f15af005aabb1fdadc19dfa", size = 2041183, upload-time = "2025-10-14T10:22:08.812Z" }, + { url = "https://files.pythonhosted.org/packages/c7/3d/f8c1a371ceebcaf94d6dd2d77c6cf4b1c078e13a5837aee83f760b4f7cfd/pydantic_core-2.41.4-cp314-cp314t-win_amd64.whl", hash = "sha256:de2cfbb09e88f0f795fd90cf955858fc2c691df65b1f21f0aa00b99f3fbc661d", size = 1993542, upload-time = "2025-10-14T10:22:11.332Z" }, + { url = "https://files.pythonhosted.org/packages/8a/ac/9fc61b4f9d079482a290afe8d206b8f490e9fd32d4fc03ed4fc698214e01/pydantic_core-2.41.4-cp314-cp314t-win_arm64.whl", hash = "sha256:d34f950ae05a83e0ede899c595f312ca976023ea1db100cd5aa188f7005e3ab0", size = 1973897, upload-time = "2025-10-14T10:22:13.444Z" }, +] + +[[package]] +name = "pydantic-settings" +version = "2.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/20/c5/dbbc27b814c71676593d1c3f718e6cd7d4f00652cefa24b75f7aa3efb25e/pydantic_settings-2.11.0.tar.gz", hash = "sha256:d0e87a1c7d33593beb7194adb8470fc426e95ba02af83a0f23474a04c9a08180", size = 188394, upload-time = "2025-09-24T14:19:11.764Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/d6/887a1ff844e64aa823fb4905978d882a633cfe295c32eacad582b78a7d8b/pydantic_settings-2.11.0-py3-none-any.whl", hash = "sha256:fe2cea3413b9530d10f3a5875adffb17ada5c1e1bab0b2885546d7310415207c", size = 48608, upload-time = "2025-09-24T14:19:10.015Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, +] + +[[package]] +name = "pytest-cov" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coverage" }, + { name = "pluggy" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5e/f7/c933acc76f5208b3b00089573cf6a2bc26dc80a8aece8f52bb7d6b1855ca/pytest_cov-7.0.0.tar.gz", hash = "sha256:33c97eda2e049a0c5298e91f519302a1334c26ac65c1a483d6206fd458361af1", size = 54328, upload-time = "2025-09-09T10:57:02.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/49/1377b49de7d0c1ce41292161ea0f721913fa8722c19fb9c1e3aa0367eecb/pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861", size = 22424, upload-time = "2025-09-09T10:57:00.695Z" }, +] + +[[package]] +name = "pytest-mock" +version = "3.15.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/68/14/eb014d26be205d38ad5ad20d9a80f7d201472e08167f0bb4361e251084a9/pytest_mock-3.15.1.tar.gz", hash = "sha256:1849a238f6f396da19762269de72cb1814ab44416fa73a8686deac10b0d87a0f", size = 34036, upload-time = "2025-09-16T16:37:27.081Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/cc/06253936f4a7fa2e0f48dfe6d851d9c56df896a9ab09ac019d70b760619c/pytest_mock-3.15.1-py3-none-any.whl", hash = "sha256:0a25e2eb88fe5168d535041d09a4529a188176ae608a6d249ee65abc0949630d", size = 10095, upload-time = "2025-09-16T16:37:25.734Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload-time = "2025-06-24T04:21:07.341Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" }, +] + +[[package]] +name = "python-multipart" +version = "0.0.20" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158, upload-time = "2024-12-16T19:45:46.972Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" }, +] + +[[package]] +name = "pytokens" +version = "0.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d4/c2/dbadcdddb412a267585459142bfd7cc241e6276db69339353ae6e241ab2b/pytokens-0.2.0.tar.gz", hash = "sha256:532d6421364e5869ea57a9523bf385f02586d4662acbcc0342afd69511b4dd43", size = 15368, upload-time = "2025-10-15T08:02:42.738Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/5a/c269ea6b348b6f2c32686635df89f32dbe05df1088dd4579302a6f8f99af/pytokens-0.2.0-py3-none-any.whl", hash = "sha256:74d4b318c67f4295c13782ddd9abcb7e297ec5630ad060eb90abf7ebbefe59f8", size = 12038, upload-time = "2025-10-15T08:02:41.694Z" }, +] + +[[package]] +name = "pywin32" +version = "311" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/be/3fd5de0979fcb3994bfee0d65ed8ca9506a8a1260651b86174f6a86f52b3/pywin32-311-cp313-cp313-win32.whl", hash = "sha256:f95ba5a847cba10dd8c4d8fefa9f2a6cf283b8b88ed6178fa8a6c1ab16054d0d", size = 8705700, upload-time = "2025-07-14T20:13:26.471Z" }, + { url = "https://files.pythonhosted.org/packages/e3/28/e0a1909523c6890208295a29e05c2adb2126364e289826c0a8bc7297bd5c/pywin32-311-cp313-cp313-win_amd64.whl", hash = "sha256:718a38f7e5b058e76aee1c56ddd06908116d35147e133427e59a3983f703a20d", size = 9494700, upload-time = "2025-07-14T20:13:28.243Z" }, + { url = "https://files.pythonhosted.org/packages/04/bf/90339ac0f55726dce7d794e6d79a18a91265bdf3aa70b6b9ca52f35e022a/pywin32-311-cp313-cp313-win_arm64.whl", hash = "sha256:7b4075d959648406202d92a2310cb990fea19b535c7f4a78d3f5e10b926eeb8a", size = 8709318, upload-time = "2025-07-14T20:13:30.348Z" }, + { url = "https://files.pythonhosted.org/packages/c9/31/097f2e132c4f16d99a22bfb777e0fd88bd8e1c634304e102f313af69ace5/pywin32-311-cp314-cp314-win32.whl", hash = "sha256:b7a2c10b93f8986666d0c803ee19b5990885872a7de910fc460f9b0c2fbf92ee", size = 8840714, upload-time = "2025-07-14T20:13:32.449Z" }, + { url = "https://files.pythonhosted.org/packages/90/4b/07c77d8ba0e01349358082713400435347df8426208171ce297da32c313d/pywin32-311-cp314-cp314-win_amd64.whl", hash = "sha256:3aca44c046bd2ed8c90de9cb8427f581c479e594e99b5c0bb19b29c10fd6cb87", size = 9656800, upload-time = "2025-07-14T20:13:34.312Z" }, + { url = "https://files.pythonhosted.org/packages/c0/d2/21af5c535501a7233e734b8af901574572da66fcc254cb35d0609c9080dd/pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42", size = 8932540, upload-time = "2025-07-14T20:13:36.379Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" }, + { url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" }, + { url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" }, + { url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" }, + { url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" }, + { url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" }, + { url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" }, + { url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" }, + { url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" }, + { url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" }, + { url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" }, + { url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" }, + { url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" }, + { url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" }, + { url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" }, + { url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" }, + { url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" }, + { url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" }, + { url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" }, + { url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" }, + { url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" }, + { url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" }, + { url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" }, + { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, +] + +[[package]] +name = "referencing" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "responses" +version = "0.25.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyyaml" }, + { name = "requests" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0e/95/89c054ad70bfef6da605338b009b2e283485835351a9935c7bfbfaca7ffc/responses-0.25.8.tar.gz", hash = "sha256:9374d047a575c8f781b94454db5cab590b6029505f488d12899ddb10a4af1cf4", size = 79320, upload-time = "2025-08-08T19:01:46.709Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/4c/cc276ce57e572c102d9542d383b2cfd551276581dc60004cb94fe8774c11/responses-0.25.8-py3-none-any.whl", hash = "sha256:0c710af92def29c8352ceadff0c3fe340ace27cf5af1bbe46fb71275bcd2831c", size = 34769, upload-time = "2025-08-08T19:01:45.018Z" }, +] + +[[package]] +name = "rich" +version = "14.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fb/d2/8920e102050a0de7bfabeb4c4614a49248cf8d5d7a8d01885fbb24dc767a/rich-14.2.0.tar.gz", hash = "sha256:73ff50c7c0c1c77c8243079283f4edb376f0f6442433aecb8ce7e6d0b92d1fe4", size = 219990, upload-time = "2025-10-09T14:16:53.064Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/7a/b0178788f8dc6cafce37a212c99565fa1fe7872c70c6c9c1e1a372d9d88f/rich-14.2.0-py3-none-any.whl", hash = "sha256:76bc51fe2e57d2b1be1f96c524b890b816e334ab4c1e45888799bfaab0021edd", size = 243393, upload-time = "2025-10-09T14:16:51.245Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.27.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e9/dd/2c0cbe774744272b0ae725f44032c77bdcab6e8bcf544bffa3b6e70c8dba/rpds_py-0.27.1.tar.gz", hash = "sha256:26a1c73171d10b7acccbded82bf6a586ab8203601e565badc74bbbf8bc5a10f8", size = 27479, upload-time = "2025-08-27T12:16:36.024Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/77/610aeee8d41e39080c7e14afa5387138e3c9fa9756ab893d09d99e7d8e98/rpds_py-0.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e4b9fcfbc021633863a37e92571d6f91851fa656f0180246e84cbd8b3f6b329b", size = 361741, upload-time = "2025-08-27T12:13:31.039Z" }, + { url = "https://files.pythonhosted.org/packages/3a/fc/c43765f201c6a1c60be2043cbdb664013def52460a4c7adace89d6682bf4/rpds_py-0.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1441811a96eadca93c517d08df75de45e5ffe68aa3089924f963c782c4b898cf", size = 345574, upload-time = "2025-08-27T12:13:32.902Z" }, + { url = "https://files.pythonhosted.org/packages/20/42/ee2b2ca114294cd9847d0ef9c26d2b0851b2e7e00bf14cc4c0b581df0fc3/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55266dafa22e672f5a4f65019015f90336ed31c6383bd53f5e7826d21a0e0b83", size = 385051, upload-time = "2025-08-27T12:13:34.228Z" }, + { url = "https://files.pythonhosted.org/packages/fd/e8/1e430fe311e4799e02e2d1af7c765f024e95e17d651612425b226705f910/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d78827d7ac08627ea2c8e02c9e5b41180ea5ea1f747e9db0915e3adf36b62dcf", size = 398395, upload-time = "2025-08-27T12:13:36.132Z" }, + { url = "https://files.pythonhosted.org/packages/82/95/9dc227d441ff2670651c27a739acb2535ccaf8b351a88d78c088965e5996/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae92443798a40a92dc5f0b01d8a7c93adde0c4dc965310a29ae7c64d72b9fad2", size = 524334, upload-time = "2025-08-27T12:13:37.562Z" }, + { url = "https://files.pythonhosted.org/packages/87/01/a670c232f401d9ad461d9a332aa4080cd3cb1d1df18213dbd0d2a6a7ab51/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c46c9dd2403b66a2a3b9720ec4b74d4ab49d4fabf9f03dfdce2d42af913fe8d0", size = 407691, upload-time = "2025-08-27T12:13:38.94Z" }, + { url = "https://files.pythonhosted.org/packages/03/36/0a14aebbaa26fe7fab4780c76f2239e76cc95a0090bdb25e31d95c492fcd/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2efe4eb1d01b7f5f1939f4ef30ecea6c6b3521eec451fb93191bf84b2a522418", size = 386868, upload-time = "2025-08-27T12:13:40.192Z" }, + { url = "https://files.pythonhosted.org/packages/3b/03/8c897fb8b5347ff6c1cc31239b9611c5bf79d78c984430887a353e1409a1/rpds_py-0.27.1-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:15d3b4d83582d10c601f481eca29c3f138d44c92187d197aff663a269197c02d", size = 405469, upload-time = "2025-08-27T12:13:41.496Z" }, + { url = "https://files.pythonhosted.org/packages/da/07/88c60edc2df74850d496d78a1fdcdc7b54360a7f610a4d50008309d41b94/rpds_py-0.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4ed2e16abbc982a169d30d1a420274a709949e2cbdef119fe2ec9d870b42f274", size = 422125, upload-time = "2025-08-27T12:13:42.802Z" }, + { url = "https://files.pythonhosted.org/packages/6b/86/5f4c707603e41b05f191a749984f390dabcbc467cf833769b47bf14ba04f/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a75f305c9b013289121ec0f1181931975df78738cdf650093e6b86d74aa7d8dd", size = 562341, upload-time = "2025-08-27T12:13:44.472Z" }, + { url = "https://files.pythonhosted.org/packages/b2/92/3c0cb2492094e3cd9baf9e49bbb7befeceb584ea0c1a8b5939dca4da12e5/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:67ce7620704745881a3d4b0ada80ab4d99df390838839921f99e63c474f82cf2", size = 592511, upload-time = "2025-08-27T12:13:45.898Z" }, + { url = "https://files.pythonhosted.org/packages/10/bb/82e64fbb0047c46a168faa28d0d45a7851cd0582f850b966811d30f67ad8/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9d992ac10eb86d9b6f369647b6a3f412fc0075cfd5d799530e84d335e440a002", size = 557736, upload-time = "2025-08-27T12:13:47.408Z" }, + { url = "https://files.pythonhosted.org/packages/00/95/3c863973d409210da7fb41958172c6b7dbe7fc34e04d3cc1f10bb85e979f/rpds_py-0.27.1-cp313-cp313-win32.whl", hash = "sha256:4f75e4bd8ab8db624e02c8e2fc4063021b58becdbe6df793a8111d9343aec1e3", size = 221462, upload-time = "2025-08-27T12:13:48.742Z" }, + { url = "https://files.pythonhosted.org/packages/ce/2c/5867b14a81dc217b56d95a9f2a40fdbc56a1ab0181b80132beeecbd4b2d6/rpds_py-0.27.1-cp313-cp313-win_amd64.whl", hash = "sha256:f9025faafc62ed0b75a53e541895ca272815bec18abe2249ff6501c8f2e12b83", size = 232034, upload-time = "2025-08-27T12:13:50.11Z" }, + { url = "https://files.pythonhosted.org/packages/c7/78/3958f3f018c01923823f1e47f1cc338e398814b92d83cd278364446fac66/rpds_py-0.27.1-cp313-cp313-win_arm64.whl", hash = "sha256:ed10dc32829e7d222b7d3b93136d25a406ba9788f6a7ebf6809092da1f4d279d", size = 222392, upload-time = "2025-08-27T12:13:52.587Z" }, + { url = "https://files.pythonhosted.org/packages/01/76/1cdf1f91aed5c3a7bf2eba1f1c4e4d6f57832d73003919a20118870ea659/rpds_py-0.27.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:92022bbbad0d4426e616815b16bc4127f83c9a74940e1ccf3cfe0b387aba0228", size = 358355, upload-time = "2025-08-27T12:13:54.012Z" }, + { url = "https://files.pythonhosted.org/packages/c3/6f/bf142541229374287604caf3bb2a4ae17f0a580798fd72d3b009b532db4e/rpds_py-0.27.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:47162fdab9407ec3f160805ac3e154df042e577dd53341745fc7fb3f625e6d92", size = 342138, upload-time = "2025-08-27T12:13:55.791Z" }, + { url = "https://files.pythonhosted.org/packages/1a/77/355b1c041d6be40886c44ff5e798b4e2769e497b790f0f7fd1e78d17e9a8/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb89bec23fddc489e5d78b550a7b773557c9ab58b7946154a10a6f7a214a48b2", size = 380247, upload-time = "2025-08-27T12:13:57.683Z" }, + { url = "https://files.pythonhosted.org/packages/d6/a4/d9cef5c3946ea271ce2243c51481971cd6e34f21925af2783dd17b26e815/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e48af21883ded2b3e9eb48cb7880ad8598b31ab752ff3be6457001d78f416723", size = 390699, upload-time = "2025-08-27T12:13:59.137Z" }, + { url = "https://files.pythonhosted.org/packages/3a/06/005106a7b8c6c1a7e91b73169e49870f4af5256119d34a361ae5240a0c1d/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f5b7bd8e219ed50299e58551a410b64daafb5017d54bbe822e003856f06a802", size = 521852, upload-time = "2025-08-27T12:14:00.583Z" }, + { url = "https://files.pythonhosted.org/packages/e5/3e/50fb1dac0948e17a02eb05c24510a8fe12d5ce8561c6b7b7d1339ab7ab9c/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08f1e20bccf73b08d12d804d6e1c22ca5530e71659e6673bce31a6bb71c1e73f", size = 402582, upload-time = "2025-08-27T12:14:02.034Z" }, + { url = "https://files.pythonhosted.org/packages/cb/b0/f4e224090dc5b0ec15f31a02d746ab24101dd430847c4d99123798661bfc/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dc5dceeaefcc96dc192e3a80bbe1d6c410c469e97bdd47494a7d930987f18b2", size = 384126, upload-time = "2025-08-27T12:14:03.437Z" }, + { url = "https://files.pythonhosted.org/packages/54/77/ac339d5f82b6afff1df8f0fe0d2145cc827992cb5f8eeb90fc9f31ef7a63/rpds_py-0.27.1-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:d76f9cc8665acdc0c9177043746775aa7babbf479b5520b78ae4002d889f5c21", size = 399486, upload-time = "2025-08-27T12:14:05.443Z" }, + { url = "https://files.pythonhosted.org/packages/d6/29/3e1c255eee6ac358c056a57d6d6869baa00a62fa32eea5ee0632039c50a3/rpds_py-0.27.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:134fae0e36022edad8290a6661edf40c023562964efea0cc0ec7f5d392d2aaef", size = 414832, upload-time = "2025-08-27T12:14:06.902Z" }, + { url = "https://files.pythonhosted.org/packages/3f/db/6d498b844342deb3fa1d030598db93937a9964fcf5cb4da4feb5f17be34b/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:eb11a4f1b2b63337cfd3b4d110af778a59aae51c81d195768e353d8b52f88081", size = 557249, upload-time = "2025-08-27T12:14:08.37Z" }, + { url = "https://files.pythonhosted.org/packages/60/f3/690dd38e2310b6f68858a331399b4d6dbb9132c3e8ef8b4333b96caf403d/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:13e608ac9f50a0ed4faec0e90ece76ae33b34c0e8656e3dceb9a7db994c692cd", size = 587356, upload-time = "2025-08-27T12:14:10.034Z" }, + { url = "https://files.pythonhosted.org/packages/86/e3/84507781cccd0145f35b1dc32c72675200c5ce8d5b30f813e49424ef68fc/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dd2135527aa40f061350c3f8f89da2644de26cd73e4de458e79606384f4f68e7", size = 555300, upload-time = "2025-08-27T12:14:11.783Z" }, + { url = "https://files.pythonhosted.org/packages/e5/ee/375469849e6b429b3516206b4580a79e9ef3eb12920ddbd4492b56eaacbe/rpds_py-0.27.1-cp313-cp313t-win32.whl", hash = "sha256:3020724ade63fe320a972e2ffd93b5623227e684315adce194941167fee02688", size = 216714, upload-time = "2025-08-27T12:14:13.629Z" }, + { url = "https://files.pythonhosted.org/packages/21/87/3fc94e47c9bd0742660e84706c311a860dcae4374cf4a03c477e23ce605a/rpds_py-0.27.1-cp313-cp313t-win_amd64.whl", hash = "sha256:8ee50c3e41739886606388ba3ab3ee2aae9f35fb23f833091833255a31740797", size = 228943, upload-time = "2025-08-27T12:14:14.937Z" }, + { url = "https://files.pythonhosted.org/packages/70/36/b6e6066520a07cf029d385de869729a895917b411e777ab1cde878100a1d/rpds_py-0.27.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:acb9aafccaae278f449d9c713b64a9e68662e7799dbd5859e2c6b3c67b56d334", size = 362472, upload-time = "2025-08-27T12:14:16.333Z" }, + { url = "https://files.pythonhosted.org/packages/af/07/b4646032e0dcec0df9c73a3bd52f63bc6c5f9cda992f06bd0e73fe3fbebd/rpds_py-0.27.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b7fb801aa7f845ddf601c49630deeeccde7ce10065561d92729bfe81bd21fb33", size = 345676, upload-time = "2025-08-27T12:14:17.764Z" }, + { url = "https://files.pythonhosted.org/packages/b0/16/2f1003ee5d0af4bcb13c0cf894957984c32a6751ed7206db2aee7379a55e/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe0dd05afb46597b9a2e11c351e5e4283c741237e7f617ffb3252780cca9336a", size = 385313, upload-time = "2025-08-27T12:14:19.829Z" }, + { url = "https://files.pythonhosted.org/packages/05/cd/7eb6dd7b232e7f2654d03fa07f1414d7dfc980e82ba71e40a7c46fd95484/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b6dfb0e058adb12d8b1d1b25f686e94ffa65d9995a5157afe99743bf7369d62b", size = 399080, upload-time = "2025-08-27T12:14:21.531Z" }, + { url = "https://files.pythonhosted.org/packages/20/51/5829afd5000ec1cb60f304711f02572d619040aa3ec033d8226817d1e571/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed090ccd235f6fa8bb5861684567f0a83e04f52dfc2e5c05f2e4b1309fcf85e7", size = 523868, upload-time = "2025-08-27T12:14:23.485Z" }, + { url = "https://files.pythonhosted.org/packages/05/2c/30eebca20d5db95720ab4d2faec1b5e4c1025c473f703738c371241476a2/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf876e79763eecf3e7356f157540d6a093cef395b65514f17a356f62af6cc136", size = 408750, upload-time = "2025-08-27T12:14:24.924Z" }, + { url = "https://files.pythonhosted.org/packages/90/1a/cdb5083f043597c4d4276eae4e4c70c55ab5accec078da8611f24575a367/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12ed005216a51b1d6e2b02a7bd31885fe317e45897de81d86dcce7d74618ffff", size = 387688, upload-time = "2025-08-27T12:14:27.537Z" }, + { url = "https://files.pythonhosted.org/packages/7c/92/cf786a15320e173f945d205ab31585cc43969743bb1a48b6888f7a2b0a2d/rpds_py-0.27.1-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:ee4308f409a40e50593c7e3bb8cbe0b4d4c66d1674a316324f0c2f5383b486f9", size = 407225, upload-time = "2025-08-27T12:14:28.981Z" }, + { url = "https://files.pythonhosted.org/packages/33/5c/85ee16df5b65063ef26017bef33096557a4c83fbe56218ac7cd8c235f16d/rpds_py-0.27.1-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0b08d152555acf1f455154d498ca855618c1378ec810646fcd7c76416ac6dc60", size = 423361, upload-time = "2025-08-27T12:14:30.469Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8e/1c2741307fcabd1a334ecf008e92c4f47bb6f848712cf15c923becfe82bb/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:dce51c828941973a5684d458214d3a36fcd28da3e1875d659388f4f9f12cc33e", size = 562493, upload-time = "2025-08-27T12:14:31.987Z" }, + { url = "https://files.pythonhosted.org/packages/04/03/5159321baae9b2222442a70c1f988cbbd66b9be0675dd3936461269be360/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:c1476d6f29eb81aa4151c9a31219b03f1f798dc43d8af1250a870735516a1212", size = 592623, upload-time = "2025-08-27T12:14:33.543Z" }, + { url = "https://files.pythonhosted.org/packages/ff/39/c09fd1ad28b85bc1d4554a8710233c9f4cefd03d7717a1b8fbfd171d1167/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:3ce0cac322b0d69b63c9cdb895ee1b65805ec9ffad37639f291dd79467bee675", size = 558800, upload-time = "2025-08-27T12:14:35.436Z" }, + { url = "https://files.pythonhosted.org/packages/c5/d6/99228e6bbcf4baa764b18258f519a9035131d91b538d4e0e294313462a98/rpds_py-0.27.1-cp314-cp314-win32.whl", hash = "sha256:dfbfac137d2a3d0725758cd141f878bf4329ba25e34979797c89474a89a8a3a3", size = 221943, upload-time = "2025-08-27T12:14:36.898Z" }, + { url = "https://files.pythonhosted.org/packages/be/07/c802bc6b8e95be83b79bdf23d1aa61d68324cb1006e245d6c58e959e314d/rpds_py-0.27.1-cp314-cp314-win_amd64.whl", hash = "sha256:a6e57b0abfe7cc513450fcf529eb486b6e4d3f8aee83e92eb5f1ef848218d456", size = 233739, upload-time = "2025-08-27T12:14:38.386Z" }, + { url = "https://files.pythonhosted.org/packages/c8/89/3e1b1c16d4c2d547c5717377a8df99aee8099ff050f87c45cb4d5fa70891/rpds_py-0.27.1-cp314-cp314-win_arm64.whl", hash = "sha256:faf8d146f3d476abfee026c4ae3bdd9ca14236ae4e4c310cbd1cf75ba33d24a3", size = 223120, upload-time = "2025-08-27T12:14:39.82Z" }, + { url = "https://files.pythonhosted.org/packages/62/7e/dc7931dc2fa4a6e46b2a4fa744a9fe5c548efd70e0ba74f40b39fa4a8c10/rpds_py-0.27.1-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:ba81d2b56b6d4911ce735aad0a1d4495e808b8ee4dc58715998741a26874e7c2", size = 358944, upload-time = "2025-08-27T12:14:41.199Z" }, + { url = "https://files.pythonhosted.org/packages/e6/22/4af76ac4e9f336bfb1a5f240d18a33c6b2fcaadb7472ac7680576512b49a/rpds_py-0.27.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:84f7d509870098de0e864cad0102711c1e24e9b1a50ee713b65928adb22269e4", size = 342283, upload-time = "2025-08-27T12:14:42.699Z" }, + { url = "https://files.pythonhosted.org/packages/1c/15/2a7c619b3c2272ea9feb9ade67a45c40b3eeb500d503ad4c28c395dc51b4/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e960fc78fecd1100539f14132425e1d5fe44ecb9239f8f27f079962021523e", size = 380320, upload-time = "2025-08-27T12:14:44.157Z" }, + { url = "https://files.pythonhosted.org/packages/a2/7d/4c6d243ba4a3057e994bb5bedd01b5c963c12fe38dde707a52acdb3849e7/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:62f85b665cedab1a503747617393573995dac4600ff51869d69ad2f39eb5e817", size = 391760, upload-time = "2025-08-27T12:14:45.845Z" }, + { url = "https://files.pythonhosted.org/packages/b4/71/b19401a909b83bcd67f90221330bc1ef11bc486fe4e04c24388d28a618ae/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fed467af29776f6556250c9ed85ea5a4dd121ab56a5f8b206e3e7a4c551e48ec", size = 522476, upload-time = "2025-08-27T12:14:47.364Z" }, + { url = "https://files.pythonhosted.org/packages/e4/44/1a3b9715c0455d2e2f0f6df5ee6d6f5afdc423d0773a8a682ed2b43c566c/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2729615f9d430af0ae6b36cf042cb55c0936408d543fb691e1a9e36648fd35a", size = 403418, upload-time = "2025-08-27T12:14:49.991Z" }, + { url = "https://files.pythonhosted.org/packages/1c/4b/fb6c4f14984eb56673bc868a66536f53417ddb13ed44b391998100a06a96/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b207d881a9aef7ba753d69c123a35d96ca7cb808056998f6b9e8747321f03b8", size = 384771, upload-time = "2025-08-27T12:14:52.159Z" }, + { url = "https://files.pythonhosted.org/packages/c0/56/d5265d2d28b7420d7b4d4d85cad8ef891760f5135102e60d5c970b976e41/rpds_py-0.27.1-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:639fd5efec029f99b79ae47e5d7e00ad8a773da899b6309f6786ecaf22948c48", size = 400022, upload-time = "2025-08-27T12:14:53.859Z" }, + { url = "https://files.pythonhosted.org/packages/8f/e9/9f5fc70164a569bdd6ed9046486c3568d6926e3a49bdefeeccfb18655875/rpds_py-0.27.1-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fecc80cb2a90e28af8a9b366edacf33d7a91cbfe4c2c4544ea1246e949cfebeb", size = 416787, upload-time = "2025-08-27T12:14:55.673Z" }, + { url = "https://files.pythonhosted.org/packages/d4/64/56dd03430ba491db943a81dcdef115a985aac5f44f565cd39a00c766d45c/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:42a89282d711711d0a62d6f57d81aa43a1368686c45bc1c46b7f079d55692734", size = 557538, upload-time = "2025-08-27T12:14:57.245Z" }, + { url = "https://files.pythonhosted.org/packages/3f/36/92cc885a3129993b1d963a2a42ecf64e6a8e129d2c7cc980dbeba84e55fb/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:cf9931f14223de59551ab9d38ed18d92f14f055a5f78c1d8ad6493f735021bbb", size = 588512, upload-time = "2025-08-27T12:14:58.728Z" }, + { url = "https://files.pythonhosted.org/packages/dd/10/6b283707780a81919f71625351182b4f98932ac89a09023cb61865136244/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f39f58a27cc6e59f432b568ed8429c7e1641324fbe38131de852cd77b2d534b0", size = 555813, upload-time = "2025-08-27T12:15:00.334Z" }, + { url = "https://files.pythonhosted.org/packages/04/2e/30b5ea18c01379da6272a92825dd7e53dc9d15c88a19e97932d35d430ef7/rpds_py-0.27.1-cp314-cp314t-win32.whl", hash = "sha256:d5fa0ee122dc09e23607a28e6d7b150da16c662e66409bbe85230e4c85bb528a", size = 217385, upload-time = "2025-08-27T12:15:01.937Z" }, + { url = "https://files.pythonhosted.org/packages/32/7d/97119da51cb1dd3f2f3c0805f155a3aa4a95fa44fe7d78ae15e69edf4f34/rpds_py-0.27.1-cp314-cp314t-win_amd64.whl", hash = "sha256:6567d2bb951e21232c2f660c24cf3470bb96de56cdcb3f071a83feeaff8a2772", size = 230097, upload-time = "2025-08-27T12:15:03.961Z" }, +] + +[[package]] +name = "s3transfer" +version = "0.14.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/62/74/8d69dcb7a9efe8baa2046891735e5dfe433ad558ae23d9e3c14c633d1d58/s3transfer-0.14.0.tar.gz", hash = "sha256:eff12264e7c8b4985074ccce27a3b38a485bb7f7422cc8046fee9be4983e4125", size = 151547, upload-time = "2025-09-09T19:23:31.089Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/f0/ae7ca09223a81a1d890b2557186ea015f6e0502e9b8cb8e1813f1d8cfa4e/s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456", size = 85712, upload-time = "2025-09-09T19:23:30.041Z" }, +] + +[[package]] +name = "setuptools" +version = "80.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "slack-bolt" +version = "1.26.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "slack-sdk" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/14/0f490731fbfc95b5711e8124b30bb6e2a4be5edad22256891adad66f8b79/slack_bolt-1.26.0.tar.gz", hash = "sha256:b0b806b9dcf009ee50172830c1d170e231cd873c5b819703bbcdc59a0fe5ff3e", size = 129915, upload-time = "2025-10-06T23:41:51.708Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/77/57aff95f88f2f1a959088ff29c45ceaf8dcad540e9966b647d6942a007f0/slack_bolt-1.26.0-py2.py3-none-any.whl", hash = "sha256:d8386ecb27aaa487c1a5e4b43a4125f532100fc3a26e49dd2a66f5837ff2e3be", size = 230084, upload-time = "2025-10-06T23:41:50.118Z" }, +] + +[[package]] +name = "slack-sdk" +version = "3.37.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/c2/0a174a155623d7dc3ed4d1360cdf755590acdc2c3fc9ce0d2340f468909f/slack_sdk-3.37.0.tar.gz", hash = "sha256:242d6cffbd9e843af807487ff04853189b812081aeaa22f90a8f159f20220ed9", size = 241612, upload-time = "2025-10-06T23:07:20.856Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/fd/a502ee24d8c7d12a8f749878ae0949b8eeb50aeac22dc5a613d417a256d0/slack_sdk-3.37.0-py2.py3-none-any.whl", hash = "sha256:e108a0836eafda74d8a95e76c12c2bcb010e645d504d8497451e4c7ebb229c87", size = 302751, upload-time = "2025-10-06T23:07:19.542Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "soupsieve" +version = "2.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/e6/21ccce3262dd4889aa3332e5a119a3491a95e8f60939870a3a035aabac0d/soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f", size = 103472, upload-time = "2025-08-27T15:39:51.78Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/a0/bb38d3b76b8cae341dad93a2dd83ab7462e6dbcdd84d43f54ee60a8dc167/soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c", size = 36679, upload-time = "2025-08-27T15:39:50.179Z" }, +] + +[[package]] +name = "sse-starlette" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/6f/22ed6e33f8a9e76ca0a412405f31abb844b779d52c5f96660766edcd737c/sse_starlette-3.0.2.tar.gz", hash = "sha256:ccd60b5765ebb3584d0de2d7a6e4f745672581de4f5005ab31c3a25d10b52b3a", size = 20985, upload-time = "2025-07-27T09:07:44.565Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/10/c78f463b4ef22eef8491f218f692be838282cd65480f6e423d7730dfd1fb/sse_starlette-3.0.2-py3-none-any.whl", hash = "sha256:16b7cbfddbcd4eaca11f7b586f3b8a080f1afe952c15813455b162edea619e5a", size = 11297, upload-time = "2025-07-27T09:07:43.268Z" }, +] + +[[package]] +name = "starlette" +version = "0.48.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/a5/d6f429d43394057b67a6b5bbe6eae2f77a6bf7459d961fdb224bf206eee6/starlette-0.48.0.tar.gz", hash = "sha256:7e8cee469a8ab2352911528110ce9088fdc6a37d9876926e73da7ce4aa4c7a46", size = 2652949, upload-time = "2025-09-13T08:41:05.699Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/72/2db2f49247d0a18b4f1bb9a5a39a0162869acf235f3a96418363947b3d46/starlette-0.48.0-py3-none-any.whl", hash = "sha256:0764ca97b097582558ecb498132ed0c7d942f233f365b86ba37770e026510659", size = 73736, upload-time = "2025-09-13T08:41:03.869Z" }, +] + +[[package]] +name = "strands-agents" +version = "1.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "boto3" }, + { name = "botocore" }, + { name = "docstring-parser" }, + { name = "mcp" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation-threading" }, + { name = "opentelemetry-sdk" }, + { name = "pydantic" }, + { name = "typing-extensions" }, + { name = "watchdog" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/56/3d3cb9bf62d45f97befe82fbb73ad65b46e9a6efd21151c38c466cd87c11/strands_agents-1.12.0.tar.gz", hash = "sha256:8f17e775971505ab7841a3139cde9879632a26cdd9cd55be74de83f0e7f804c0", size = 418141, upload-time = "2025-10-10T15:16:45.753Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/05/2f0fbce4a3acd7b9c042368bbe8038409a7b30d65138bd3b37a06d1a4cc4/strands_agents-1.12.0-py3-none-any.whl", hash = "sha256:af0f9c8a175666009863d0fb4438e71000ea3a2f0cbda3dc308c35dd4f9a1eb0", size = 216043, upload-time = "2025-10-10T15:16:44.043Z" }, +] + +[package.optional-dependencies] +otel = [ + { name = "opentelemetry-exporter-otlp-proto-http" }, +] + +[[package]] +name = "strands-agents-tools" +version = "0.2.11" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "aws-requests-auth" }, + { name = "botocore" }, + { name = "dill" }, + { name = "markdownify" }, + { name = "pillow" }, + { name = "prompt-toolkit" }, + { name = "pyjwt" }, + { name = "requests" }, + { name = "rich" }, + { name = "slack-bolt" }, + { name = "strands-agents" }, + { name = "sympy" }, + { name = "tenacity" }, + { name = "tzdata", marker = "sys_platform == 'win32'" }, + { name = "watchdog" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/37/a2/5cd71b9db26c98d6289883fe969e884964fbd2a4b78cb75073d6651f0041/strands_agents_tools-0.2.11.tar.gz", hash = "sha256:5ef192b68eddeccb96c47227ca841ccce3aedff5db0953a0af7b7212a09428df", size = 445792, upload-time = "2025-10-10T16:58:26.396Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/1c/a903b4947e3e0e56c2a1db7008c286c399b2e37c6362c569e8a07006e046/strands_agents_tools-0.2.11-py3-none-any.whl", hash = "sha256:ebff41ba782e1ce59530e11321780eae0ffdb5b61e7aee7408c46c1a8f29f18d", size = 297958, upload-time = "2025-10-10T16:58:24.213Z" }, +] + +[[package]] +name = "sympy" +version = "1.14.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mpmath" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921, upload-time = "2025-04-27T18:05:01.611Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" }, +] + +[[package]] +name = "tenacity" +version = "9.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/d4/2b0cd0fe285e14b36db076e78c93766ff1d529d70408bd1d2a5a84f1d929/tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb", size = 48036, upload-time = "2025-04-02T08:25:09.966Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + +[[package]] +name = "tzdata" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, +] + +[[package]] +name = "urllib3" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/57/1616c8274c3442d802621abf5deb230771c7a0fec9414cb6763900eb3868/uvicorn-0.37.0.tar.gz", hash = "sha256:4115c8add6d3fd536c8ee77f0e14a7fd2ebba939fed9b02583a97f80648f9e13", size = 80367, upload-time = "2025-09-23T13:33:47.486Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/cd/584a2ceb5532af99dd09e50919e3615ba99aa127e9850eafe5f31ddfdb9a/uvicorn-0.37.0-py3-none-any.whl", hash = "sha256:913b2b88672343739927ce381ff9e2ad62541f9f8289664fa1d1d3803fa2ce6c", size = 67976, upload-time = "2025-09-23T13:33:45.842Z" }, +] + +[[package]] +name = "watchdog" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/98/b0345cabdce2041a01293ba483333582891a3bd5769b08eceb0d406056ef/watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c", size = 96480, upload-time = "2024-11-01T14:06:42.952Z" }, + { url = "https://files.pythonhosted.org/packages/85/83/cdf13902c626b28eedef7ec4f10745c52aad8a8fe7eb04ed7b1f111ca20e/watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134", size = 88451, upload-time = "2024-11-01T14:06:45.084Z" }, + { url = "https://files.pythonhosted.org/packages/fe/c4/225c87bae08c8b9ec99030cd48ae9c4eca050a59bf5c2255853e18c87b50/watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b", size = 89057, upload-time = "2024-11-01T14:06:47.324Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" }, + { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" }, + { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" }, + { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" }, + { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" }, + { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" }, + { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" }, + { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" }, + { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" }, +] + +[[package]] +name = "wcwidth" +version = "0.2.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/30/6b0809f4510673dc723187aeaf24c7f5459922d01e2f794277a3dfb90345/wcwidth-0.2.14.tar.gz", hash = "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605", size = 102293, upload-time = "2025-09-22T16:29:53.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" }, +] + +[[package]] +name = "werkzeug" +version = "3.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/69/83029f1f6300c5fb2471d621ab06f6ec6b3324685a2ce0f9777fd4a8b71e/werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746", size = 806925, upload-time = "2024-11-08T15:52:18.093Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/24/ab44c871b0f07f491e5d2ad12c9bd7358e527510618cb1b803a88e986db1/werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e", size = 224498, upload-time = "2024-11-08T15:52:16.132Z" }, +] + +[[package]] +name = "wheel" +version = "0.45.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/98/2d9906746cdc6a6ef809ae6338005b3f21bb568bea3165cfc6a243fdc25c/wheel-0.45.1.tar.gz", hash = "sha256:661e1abd9198507b1409a20c02106d9670b2576e916d58f520316666abca6729", size = 107545, upload-time = "2024-11-23T00:18:23.513Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0b/2c/87f3254fd8ffd29e4c02732eee68a83a1d3c346ae39bc6822dcbcb697f2b/wheel-0.45.1-py3-none-any.whl", hash = "sha256:708e7481cc80179af0e556bbf0cc00b8444c7321e2700b8d8580231d13017248", size = 72494, upload-time = "2024-11-23T00:18:21.207Z" }, +] + +[[package]] +name = "wrapt" +version = "1.17.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/8f/aeb76c5b46e273670962298c23e7ddde79916cb74db802131d49a85e4b7d/wrapt-1.17.3.tar.gz", hash = "sha256:f66eb08feaa410fe4eebd17f2a2c8e2e46d3476e9f8c783daa8e09e0faa666d0", size = 55547, upload-time = "2025-08-12T05:53:21.714Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/f6/759ece88472157acb55fc195e5b116e06730f1b651b5b314c66291729193/wrapt-1.17.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a47681378a0439215912ef542c45a783484d4dd82bac412b71e59cf9c0e1cea0", size = 54003, upload-time = "2025-08-12T05:51:48.627Z" }, + { url = "https://files.pythonhosted.org/packages/4f/a9/49940b9dc6d47027dc850c116d79b4155f15c08547d04db0f07121499347/wrapt-1.17.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a30837587c6ee3cd1a4d1c2ec5d24e77984d44e2f34547e2323ddb4e22eb77", size = 39025, upload-time = "2025-08-12T05:51:37.156Z" }, + { url = "https://files.pythonhosted.org/packages/45/35/6a08de0f2c96dcdd7fe464d7420ddb9a7655a6561150e5fc4da9356aeaab/wrapt-1.17.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:16ecf15d6af39246fe33e507105d67e4b81d8f8d2c6598ff7e3ca1b8a37213f7", size = 39108, upload-time = "2025-08-12T05:51:58.425Z" }, + { url = "https://files.pythonhosted.org/packages/0c/37/6faf15cfa41bf1f3dba80cd3f5ccc6622dfccb660ab26ed79f0178c7497f/wrapt-1.17.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6fd1ad24dc235e4ab88cda009e19bf347aabb975e44fd5c2fb22a3f6e4141277", size = 88072, upload-time = "2025-08-12T05:52:37.53Z" }, + { url = "https://files.pythonhosted.org/packages/78/f2/efe19ada4a38e4e15b6dff39c3e3f3f73f5decf901f66e6f72fe79623a06/wrapt-1.17.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ed61b7c2d49cee3c027372df5809a59d60cf1b6c2f81ee980a091f3afed6a2d", size = 88214, upload-time = "2025-08-12T05:52:15.886Z" }, + { url = "https://files.pythonhosted.org/packages/40/90/ca86701e9de1622b16e09689fc24b76f69b06bb0150990f6f4e8b0eeb576/wrapt-1.17.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:423ed5420ad5f5529db9ce89eac09c8a2f97da18eb1c870237e84c5a5c2d60aa", size = 87105, upload-time = "2025-08-12T05:52:17.914Z" }, + { url = "https://files.pythonhosted.org/packages/fd/e0/d10bd257c9a3e15cbf5523025252cc14d77468e8ed644aafb2d6f54cb95d/wrapt-1.17.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e01375f275f010fcbf7f643b4279896d04e571889b8a5b3f848423d91bf07050", size = 87766, upload-time = "2025-08-12T05:52:39.243Z" }, + { url = "https://files.pythonhosted.org/packages/e8/cf/7d848740203c7b4b27eb55dbfede11aca974a51c3d894f6cc4b865f42f58/wrapt-1.17.3-cp313-cp313-win32.whl", hash = "sha256:53e5e39ff71b3fc484df8a522c933ea2b7cdd0d5d15ae82e5b23fde87d44cbd8", size = 36711, upload-time = "2025-08-12T05:53:10.074Z" }, + { url = "https://files.pythonhosted.org/packages/57/54/35a84d0a4d23ea675994104e667ceff49227ce473ba6a59ba2c84f250b74/wrapt-1.17.3-cp313-cp313-win_amd64.whl", hash = "sha256:1f0b2f40cf341ee8cc1a97d51ff50dddb9fcc73241b9143ec74b30fc4f44f6cb", size = 38885, upload-time = "2025-08-12T05:53:08.695Z" }, + { url = "https://files.pythonhosted.org/packages/01/77/66e54407c59d7b02a3c4e0af3783168fff8e5d61def52cda8728439d86bc/wrapt-1.17.3-cp313-cp313-win_arm64.whl", hash = "sha256:7425ac3c54430f5fc5e7b6f41d41e704db073309acfc09305816bc6a0b26bb16", size = 36896, upload-time = "2025-08-12T05:52:55.34Z" }, + { url = "https://files.pythonhosted.org/packages/02/a2/cd864b2a14f20d14f4c496fab97802001560f9f41554eef6df201cd7f76c/wrapt-1.17.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cf30f6e3c077c8e6a9a7809c94551203c8843e74ba0c960f4a98cd80d4665d39", size = 54132, upload-time = "2025-08-12T05:51:49.864Z" }, + { url = "https://files.pythonhosted.org/packages/d5/46/d011725b0c89e853dc44cceb738a307cde5d240d023d6d40a82d1b4e1182/wrapt-1.17.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e228514a06843cae89621384cfe3a80418f3c04aadf8a3b14e46a7be704e4235", size = 39091, upload-time = "2025-08-12T05:51:38.935Z" }, + { url = "https://files.pythonhosted.org/packages/2e/9e/3ad852d77c35aae7ddebdbc3b6d35ec8013af7d7dddad0ad911f3d891dae/wrapt-1.17.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5ea5eb3c0c071862997d6f3e02af1d055f381b1d25b286b9d6644b79db77657c", size = 39172, upload-time = "2025-08-12T05:51:59.365Z" }, + { url = "https://files.pythonhosted.org/packages/c3/f7/c983d2762bcce2326c317c26a6a1e7016f7eb039c27cdf5c4e30f4160f31/wrapt-1.17.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:281262213373b6d5e4bb4353bc36d1ba4084e6d6b5d242863721ef2bf2c2930b", size = 87163, upload-time = "2025-08-12T05:52:40.965Z" }, + { url = "https://files.pythonhosted.org/packages/e4/0f/f673f75d489c7f22d17fe0193e84b41540d962f75fce579cf6873167c29b/wrapt-1.17.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc4a8d2b25efb6681ecacad42fca8859f88092d8732b170de6a5dddd80a1c8fa", size = 87963, upload-time = "2025-08-12T05:52:20.326Z" }, + { url = "https://files.pythonhosted.org/packages/df/61/515ad6caca68995da2fac7a6af97faab8f78ebe3bf4f761e1b77efbc47b5/wrapt-1.17.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:373342dd05b1d07d752cecbec0c41817231f29f3a89aa8b8843f7b95992ed0c7", size = 86945, upload-time = "2025-08-12T05:52:21.581Z" }, + { url = "https://files.pythonhosted.org/packages/d3/bd/4e70162ce398462a467bc09e768bee112f1412e563620adc353de9055d33/wrapt-1.17.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d40770d7c0fd5cbed9d84b2c3f2e156431a12c9a37dc6284060fb4bec0b7ffd4", size = 86857, upload-time = "2025-08-12T05:52:43.043Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b8/da8560695e9284810b8d3df8a19396a6e40e7518059584a1a394a2b35e0a/wrapt-1.17.3-cp314-cp314-win32.whl", hash = "sha256:fbd3c8319de8e1dc79d346929cd71d523622da527cca14e0c1d257e31c2b8b10", size = 37178, upload-time = "2025-08-12T05:53:12.605Z" }, + { url = "https://files.pythonhosted.org/packages/db/c8/b71eeb192c440d67a5a0449aaee2310a1a1e8eca41676046f99ed2487e9f/wrapt-1.17.3-cp314-cp314-win_amd64.whl", hash = "sha256:e1a4120ae5705f673727d3253de3ed0e016f7cd78dc463db1b31e2463e1f3cf6", size = 39310, upload-time = "2025-08-12T05:53:11.106Z" }, + { url = "https://files.pythonhosted.org/packages/45/20/2cda20fd4865fa40f86f6c46ed37a2a8356a7a2fde0773269311f2af56c7/wrapt-1.17.3-cp314-cp314-win_arm64.whl", hash = "sha256:507553480670cab08a800b9463bdb881b2edeed77dc677b0a5915e6106e91a58", size = 37266, upload-time = "2025-08-12T05:52:56.531Z" }, + { url = "https://files.pythonhosted.org/packages/77/ed/dd5cf21aec36c80443c6f900449260b80e2a65cf963668eaef3b9accce36/wrapt-1.17.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ed7c635ae45cfbc1a7371f708727bf74690daedc49b4dba310590ca0bd28aa8a", size = 56544, upload-time = "2025-08-12T05:51:51.109Z" }, + { url = "https://files.pythonhosted.org/packages/8d/96/450c651cc753877ad100c7949ab4d2e2ecc4d97157e00fa8f45df682456a/wrapt-1.17.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:249f88ed15503f6492a71f01442abddd73856a0032ae860de6d75ca62eed8067", size = 40283, upload-time = "2025-08-12T05:51:39.912Z" }, + { url = "https://files.pythonhosted.org/packages/d1/86/2fcad95994d9b572db57632acb6f900695a648c3e063f2cd344b3f5c5a37/wrapt-1.17.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5a03a38adec8066d5a37bea22f2ba6bbf39fcdefbe2d91419ab864c3fb515454", size = 40366, upload-time = "2025-08-12T05:52:00.693Z" }, + { url = "https://files.pythonhosted.org/packages/64/0e/f4472f2fdde2d4617975144311f8800ef73677a159be7fe61fa50997d6c0/wrapt-1.17.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5d4478d72eb61c36e5b446e375bbc49ed002430d17cdec3cecb36993398e1a9e", size = 108571, upload-time = "2025-08-12T05:52:44.521Z" }, + { url = "https://files.pythonhosted.org/packages/cc/01/9b85a99996b0a97c8a17484684f206cbb6ba73c1ce6890ac668bcf3838fb/wrapt-1.17.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:223db574bb38637e8230eb14b185565023ab624474df94d2af18f1cdb625216f", size = 113094, upload-time = "2025-08-12T05:52:22.618Z" }, + { url = "https://files.pythonhosted.org/packages/25/02/78926c1efddcc7b3aa0bc3d6b33a822f7d898059f7cd9ace8c8318e559ef/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e405adefb53a435f01efa7ccdec012c016b5a1d3f35459990afc39b6be4d5056", size = 110659, upload-time = "2025-08-12T05:52:24.057Z" }, + { url = "https://files.pythonhosted.org/packages/dc/ee/c414501ad518ac3e6fe184753632fe5e5ecacdcf0effc23f31c1e4f7bfcf/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:88547535b787a6c9ce4086917b6e1d291aa8ed914fdd3a838b3539dc95c12804", size = 106946, upload-time = "2025-08-12T05:52:45.976Z" }, + { url = "https://files.pythonhosted.org/packages/be/44/a1bd64b723d13bb151d6cc91b986146a1952385e0392a78567e12149c7b4/wrapt-1.17.3-cp314-cp314t-win32.whl", hash = "sha256:41b1d2bc74c2cac6f9074df52b2efbef2b30bdfe5f40cb78f8ca22963bc62977", size = 38717, upload-time = "2025-08-12T05:53:15.214Z" }, + { url = "https://files.pythonhosted.org/packages/79/d9/7cfd5a312760ac4dd8bf0184a6ee9e43c33e47f3dadc303032ce012b8fa3/wrapt-1.17.3-cp314-cp314t-win_amd64.whl", hash = "sha256:73d496de46cd2cdbdbcce4ae4bcdb4afb6a11234a1df9c085249d55166b95116", size = 41334, upload-time = "2025-08-12T05:53:14.178Z" }, + { url = "https://files.pythonhosted.org/packages/46/78/10ad9781128ed2f99dbc474f43283b13fea8ba58723e98844367531c18e9/wrapt-1.17.3-cp314-cp314t-win_arm64.whl", hash = "sha256:f38e60678850c42461d4202739f9bf1e3a737c7ad283638251e79cc49effb6b6", size = 38471, upload-time = "2025-08-12T05:52:57.784Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f6/a933bd70f98e9cf3e08167fc5cd7aaaca49147e48411c0bd5ae701bb2194/wrapt-1.17.3-py3-none-any.whl", hash = "sha256:7171ae35d2c33d326ac19dd8facb1e82e5fd04ef8c6c0e394d7af55a55051c22", size = 23591, upload-time = "2025-08-12T05:53:20.674Z" }, +] + +[[package]] +name = "xmltodict" +version = "1.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/aa/917ceeed4dbb80d2f04dbd0c784b7ee7bba8ae5a54837ef0e5e062cd3cfb/xmltodict-1.0.2.tar.gz", hash = "sha256:54306780b7c2175a3967cad1db92f218207e5bc1aba697d887807c0fb68b7649", size = 25725, upload-time = "2025-09-17T21:59:26.459Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/20/69a0e6058bc5ea74892d089d64dfc3a62ba78917ec5e2cfa70f7c92ba3a5/xmltodict-1.0.2-py3-none-any.whl", hash = "sha256:62d0fddb0dcbc9f642745d8bbf4d81fd17d6dfaec5a15b5c1876300aad92af0d", size = 13893, upload-time = "2025-09-17T21:59:24.859Z" }, +] + +[[package]] +name = "yarl" +version = "1.22.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "multidict" }, + { name = "propcache" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/57/63/0c6ebca57330cd313f6102b16dd57ffaf3ec4c83403dcb45dbd15c6f3ea1/yarl-1.22.0.tar.gz", hash = "sha256:bebf8557577d4401ba8bd9ff33906f1376c877aa78d1fe216ad01b4d6745af71", size = 187169, upload-time = "2025-10-06T14:12:55.963Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ea/f3/d67de7260456ee105dc1d162d43a019ecad6b91e2f51809d6cddaa56690e/yarl-1.22.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8dee9c25c74997f6a750cd317b8ca63545169c098faee42c84aa5e506c819b53", size = 139980, upload-time = "2025-10-06T14:10:14.601Z" }, + { url = "https://files.pythonhosted.org/packages/01/88/04d98af0b47e0ef42597b9b28863b9060bb515524da0a65d5f4db160b2d5/yarl-1.22.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01e73b85a5434f89fc4fe27dcda2aff08ddf35e4d47bbbea3bdcd25321af538a", size = 93424, upload-time = "2025-10-06T14:10:16.115Z" }, + { url = "https://files.pythonhosted.org/packages/18/91/3274b215fd8442a03975ce6bee5fe6aa57a8326b29b9d3d56234a1dca244/yarl-1.22.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:22965c2af250d20c873cdbee8ff958fb809940aeb2e74ba5f20aaf6b7ac8c70c", size = 93821, upload-time = "2025-10-06T14:10:17.993Z" }, + { url = "https://files.pythonhosted.org/packages/61/3a/caf4e25036db0f2da4ca22a353dfeb3c9d3c95d2761ebe9b14df8fc16eb0/yarl-1.22.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b4f15793aa49793ec8d1c708ab7f9eded1aa72edc5174cae703651555ed1b601", size = 373243, upload-time = "2025-10-06T14:10:19.44Z" }, + { url = "https://files.pythonhosted.org/packages/6e/9e/51a77ac7516e8e7803b06e01f74e78649c24ee1021eca3d6a739cb6ea49c/yarl-1.22.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5542339dcf2747135c5c85f68680353d5cb9ffd741c0f2e8d832d054d41f35a", size = 342361, upload-time = "2025-10-06T14:10:21.124Z" }, + { url = "https://files.pythonhosted.org/packages/d4/f8/33b92454789dde8407f156c00303e9a891f1f51a0330b0fad7c909f87692/yarl-1.22.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5c401e05ad47a75869c3ab3e35137f8468b846770587e70d71e11de797d113df", size = 387036, upload-time = "2025-10-06T14:10:22.902Z" }, + { url = "https://files.pythonhosted.org/packages/d9/9a/c5db84ea024f76838220280f732970aa4ee154015d7f5c1bfb60a267af6f/yarl-1.22.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:243dda95d901c733f5b59214d28b0120893d91777cb8aa043e6ef059d3cddfe2", size = 397671, upload-time = "2025-10-06T14:10:24.523Z" }, + { url = "https://files.pythonhosted.org/packages/11/c9/cd8538dc2e7727095e0c1d867bad1e40c98f37763e6d995c1939f5fdc7b1/yarl-1.22.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bec03d0d388060058f5d291a813f21c011041938a441c593374da6077fe21b1b", size = 377059, upload-time = "2025-10-06T14:10:26.406Z" }, + { url = "https://files.pythonhosted.org/packages/a1/b9/ab437b261702ced75122ed78a876a6dec0a1b0f5e17a4ac7a9a2482d8abe/yarl-1.22.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0748275abb8c1e1e09301ee3cf90c8a99678a4e92e4373705f2a2570d581273", size = 365356, upload-time = "2025-10-06T14:10:28.461Z" }, + { url = "https://files.pythonhosted.org/packages/b2/9d/8e1ae6d1d008a9567877b08f0ce4077a29974c04c062dabdb923ed98e6fe/yarl-1.22.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:47fdb18187e2a4e18fda2c25c05d8251a9e4a521edaed757fef033e7d8498d9a", size = 361331, upload-time = "2025-10-06T14:10:30.541Z" }, + { url = "https://files.pythonhosted.org/packages/ca/5a/09b7be3905962f145b73beb468cdd53db8aa171cf18c80400a54c5b82846/yarl-1.22.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c7044802eec4524fde550afc28edda0dd5784c4c45f0be151a2d3ba017daca7d", size = 382590, upload-time = "2025-10-06T14:10:33.352Z" }, + { url = "https://files.pythonhosted.org/packages/aa/7f/59ec509abf90eda5048b0bc3e2d7b5099dffdb3e6b127019895ab9d5ef44/yarl-1.22.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:139718f35149ff544caba20fce6e8a2f71f1e39b92c700d8438a0b1d2a631a02", size = 385316, upload-time = "2025-10-06T14:10:35.034Z" }, + { url = "https://files.pythonhosted.org/packages/e5/84/891158426bc8036bfdfd862fabd0e0fa25df4176ec793e447f4b85cf1be4/yarl-1.22.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e1b51bebd221006d3d2f95fbe124b22b247136647ae5dcc8c7acafba66e5ee67", size = 374431, upload-time = "2025-10-06T14:10:37.76Z" }, + { url = "https://files.pythonhosted.org/packages/bb/49/03da1580665baa8bef5e8ed34c6df2c2aca0a2f28bf397ed238cc1bbc6f2/yarl-1.22.0-cp313-cp313-win32.whl", hash = "sha256:d3e32536234a95f513bd374e93d717cf6b2231a791758de6c509e3653f234c95", size = 81555, upload-time = "2025-10-06T14:10:39.649Z" }, + { url = "https://files.pythonhosted.org/packages/9a/ee/450914ae11b419eadd067c6183ae08381cfdfcb9798b90b2b713bbebddda/yarl-1.22.0-cp313-cp313-win_amd64.whl", hash = "sha256:47743b82b76d89a1d20b83e60d5c20314cbd5ba2befc9cda8f28300c4a08ed4d", size = 86965, upload-time = "2025-10-06T14:10:41.313Z" }, + { url = "https://files.pythonhosted.org/packages/98/4d/264a01eae03b6cf629ad69bae94e3b0e5344741e929073678e84bf7a3e3b/yarl-1.22.0-cp313-cp313-win_arm64.whl", hash = "sha256:5d0fcda9608875f7d052eff120c7a5da474a6796fe4d83e152e0e4d42f6d1a9b", size = 81205, upload-time = "2025-10-06T14:10:43.167Z" }, + { url = "https://files.pythonhosted.org/packages/88/fc/6908f062a2f77b5f9f6d69cecb1747260831ff206adcbc5b510aff88df91/yarl-1.22.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:719ae08b6972befcba4310e49edb1161a88cdd331e3a694b84466bd938a6ab10", size = 146209, upload-time = "2025-10-06T14:10:44.643Z" }, + { url = "https://files.pythonhosted.org/packages/65/47/76594ae8eab26210b4867be6f49129861ad33da1f1ebdf7051e98492bf62/yarl-1.22.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:47d8a5c446df1c4db9d21b49619ffdba90e77c89ec6e283f453856c74b50b9e3", size = 95966, upload-time = "2025-10-06T14:10:46.554Z" }, + { url = "https://files.pythonhosted.org/packages/ab/ce/05e9828a49271ba6b5b038b15b3934e996980dd78abdfeb52a04cfb9467e/yarl-1.22.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:cfebc0ac8333520d2d0423cbbe43ae43c8838862ddb898f5ca68565e395516e9", size = 97312, upload-time = "2025-10-06T14:10:48.007Z" }, + { url = "https://files.pythonhosted.org/packages/d1/c5/7dffad5e4f2265b29c9d7ec869c369e4223166e4f9206fc2243ee9eea727/yarl-1.22.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4398557cbf484207df000309235979c79c4356518fd5c99158c7d38203c4da4f", size = 361967, upload-time = "2025-10-06T14:10:49.997Z" }, + { url = "https://files.pythonhosted.org/packages/50/b2/375b933c93a54bff7fc041e1a6ad2c0f6f733ffb0c6e642ce56ee3b39970/yarl-1.22.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2ca6fd72a8cd803be290d42f2dec5cdcd5299eeb93c2d929bf060ad9efaf5de0", size = 323949, upload-time = "2025-10-06T14:10:52.004Z" }, + { url = "https://files.pythonhosted.org/packages/66/50/bfc2a29a1d78644c5a7220ce2f304f38248dc94124a326794e677634b6cf/yarl-1.22.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca1f59c4e1ab6e72f0a23c13fca5430f889634166be85dbf1013683e49e3278e", size = 361818, upload-time = "2025-10-06T14:10:54.078Z" }, + { url = "https://files.pythonhosted.org/packages/46/96/f3941a46af7d5d0f0498f86d71275696800ddcdd20426298e572b19b91ff/yarl-1.22.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6c5010a52015e7c70f86eb967db0f37f3c8bd503a695a49f8d45700144667708", size = 372626, upload-time = "2025-10-06T14:10:55.767Z" }, + { url = "https://files.pythonhosted.org/packages/c1/42/8b27c83bb875cd89448e42cd627e0fb971fa1675c9ec546393d18826cb50/yarl-1.22.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d7672ecf7557476642c88497c2f8d8542f8e36596e928e9bcba0e42e1e7d71f", size = 341129, upload-time = "2025-10-06T14:10:57.985Z" }, + { url = "https://files.pythonhosted.org/packages/49/36/99ca3122201b382a3cf7cc937b95235b0ac944f7e9f2d5331d50821ed352/yarl-1.22.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3b7c88eeef021579d600e50363e0b6ee4f7f6f728cd3486b9d0f3ee7b946398d", size = 346776, upload-time = "2025-10-06T14:10:59.633Z" }, + { url = "https://files.pythonhosted.org/packages/85/b4/47328bf996acd01a4c16ef9dcd2f59c969f495073616586f78cd5f2efb99/yarl-1.22.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:f4afb5c34f2c6fecdcc182dfcfc6af6cccf1aa923eed4d6a12e9d96904e1a0d8", size = 334879, upload-time = "2025-10-06T14:11:01.454Z" }, + { url = "https://files.pythonhosted.org/packages/c2/ad/b77d7b3f14a4283bffb8e92c6026496f6de49751c2f97d4352242bba3990/yarl-1.22.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:59c189e3e99a59cf8d83cbb31d4db02d66cda5a1a4374e8a012b51255341abf5", size = 350996, upload-time = "2025-10-06T14:11:03.452Z" }, + { url = "https://files.pythonhosted.org/packages/81/c8/06e1d69295792ba54d556f06686cbd6a7ce39c22307100e3fb4a2c0b0a1d/yarl-1.22.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:5a3bf7f62a289fa90f1990422dc8dff5a458469ea71d1624585ec3a4c8d6960f", size = 356047, upload-time = "2025-10-06T14:11:05.115Z" }, + { url = "https://files.pythonhosted.org/packages/4b/b8/4c0e9e9f597074b208d18cef227d83aac36184bfbc6eab204ea55783dbc5/yarl-1.22.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:de6b9a04c606978fdfe72666fa216ffcf2d1a9f6a381058d4378f8d7b1e5de62", size = 342947, upload-time = "2025-10-06T14:11:08.137Z" }, + { url = "https://files.pythonhosted.org/packages/e0/e5/11f140a58bf4c6ad7aca69a892bff0ee638c31bea4206748fc0df4ebcb3a/yarl-1.22.0-cp313-cp313t-win32.whl", hash = "sha256:1834bb90991cc2999f10f97f5f01317f99b143284766d197e43cd5b45eb18d03", size = 86943, upload-time = "2025-10-06T14:11:10.284Z" }, + { url = "https://files.pythonhosted.org/packages/31/74/8b74bae38ed7fe6793d0c15a0c8207bbb819cf287788459e5ed230996cdd/yarl-1.22.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ff86011bd159a9d2dfc89c34cfd8aff12875980e3bd6a39ff097887520e60249", size = 93715, upload-time = "2025-10-06T14:11:11.739Z" }, + { url = "https://files.pythonhosted.org/packages/69/66/991858aa4b5892d57aef7ee1ba6b4d01ec3b7eb3060795d34090a3ca3278/yarl-1.22.0-cp313-cp313t-win_arm64.whl", hash = "sha256:7861058d0582b847bc4e3a4a4c46828a410bca738673f35a29ba3ca5db0b473b", size = 83857, upload-time = "2025-10-06T14:11:13.586Z" }, + { url = "https://files.pythonhosted.org/packages/46/b3/e20ef504049f1a1c54a814b4b9bed96d1ac0e0610c3b4da178f87209db05/yarl-1.22.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:34b36c2c57124530884d89d50ed2c1478697ad7473efd59cfd479945c95650e4", size = 140520, upload-time = "2025-10-06T14:11:15.465Z" }, + { url = "https://files.pythonhosted.org/packages/e4/04/3532d990fdbab02e5ede063676b5c4260e7f3abea2151099c2aa745acc4c/yarl-1.22.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:0dd9a702591ca2e543631c2a017e4a547e38a5c0f29eece37d9097e04a7ac683", size = 93504, upload-time = "2025-10-06T14:11:17.106Z" }, + { url = "https://files.pythonhosted.org/packages/11/63/ff458113c5c2dac9a9719ac68ee7c947cb621432bcf28c9972b1c0e83938/yarl-1.22.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:594fcab1032e2d2cc3321bb2e51271e7cd2b516c7d9aee780ece81b07ff8244b", size = 94282, upload-time = "2025-10-06T14:11:19.064Z" }, + { url = "https://files.pythonhosted.org/packages/a7/bc/315a56aca762d44a6aaaf7ad253f04d996cb6b27bad34410f82d76ea8038/yarl-1.22.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f3d7a87a78d46a2e3d5b72587ac14b4c16952dd0887dbb051451eceac774411e", size = 372080, upload-time = "2025-10-06T14:11:20.996Z" }, + { url = "https://files.pythonhosted.org/packages/3f/3f/08e9b826ec2e099ea6e7c69a61272f4f6da62cb5b1b63590bb80ca2e4a40/yarl-1.22.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:852863707010316c973162e703bddabec35e8757e67fcb8ad58829de1ebc8590", size = 338696, upload-time = "2025-10-06T14:11:22.847Z" }, + { url = "https://files.pythonhosted.org/packages/e3/9f/90360108e3b32bd76789088e99538febfea24a102380ae73827f62073543/yarl-1.22.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:131a085a53bfe839a477c0845acf21efc77457ba2bcf5899618136d64f3303a2", size = 387121, upload-time = "2025-10-06T14:11:24.889Z" }, + { url = "https://files.pythonhosted.org/packages/98/92/ab8d4657bd5b46a38094cfaea498f18bb70ce6b63508fd7e909bd1f93066/yarl-1.22.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:078a8aefd263f4d4f923a9677b942b445a2be970ca24548a8102689a3a8ab8da", size = 394080, upload-time = "2025-10-06T14:11:27.307Z" }, + { url = "https://files.pythonhosted.org/packages/f5/e7/d8c5a7752fef68205296201f8ec2bf718f5c805a7a7e9880576c67600658/yarl-1.22.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bca03b91c323036913993ff5c738d0842fc9c60c4648e5c8d98331526df89784", size = 372661, upload-time = "2025-10-06T14:11:29.387Z" }, + { url = "https://files.pythonhosted.org/packages/b6/2e/f4d26183c8db0bb82d491b072f3127fb8c381a6206a3a56332714b79b751/yarl-1.22.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:68986a61557d37bb90d3051a45b91fa3d5c516d177dfc6dd6f2f436a07ff2b6b", size = 364645, upload-time = "2025-10-06T14:11:31.423Z" }, + { url = "https://files.pythonhosted.org/packages/80/7c/428e5812e6b87cd00ee8e898328a62c95825bf37c7fa87f0b6bb2ad31304/yarl-1.22.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:4792b262d585ff0dff6bcb787f8492e40698443ec982a3568c2096433660c694", size = 355361, upload-time = "2025-10-06T14:11:33.055Z" }, + { url = "https://files.pythonhosted.org/packages/ec/2a/249405fd26776f8b13c067378ef4d7dd49c9098d1b6457cdd152a99e96a9/yarl-1.22.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ebd4549b108d732dba1d4ace67614b9545b21ece30937a63a65dd34efa19732d", size = 381451, upload-time = "2025-10-06T14:11:35.136Z" }, + { url = "https://files.pythonhosted.org/packages/67/a8/fb6b1adbe98cf1e2dd9fad71003d3a63a1bc22459c6e15f5714eb9323b93/yarl-1.22.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:f87ac53513d22240c7d59203f25cc3beac1e574c6cd681bbfd321987b69f95fd", size = 383814, upload-time = "2025-10-06T14:11:37.094Z" }, + { url = "https://files.pythonhosted.org/packages/d9/f9/3aa2c0e480fb73e872ae2814c43bc1e734740bb0d54e8cb2a95925f98131/yarl-1.22.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:22b029f2881599e2f1b06f8f1db2ee63bd309e2293ba2d566e008ba12778b8da", size = 370799, upload-time = "2025-10-06T14:11:38.83Z" }, + { url = "https://files.pythonhosted.org/packages/50/3c/af9dba3b8b5eeb302f36f16f92791f3ea62e3f47763406abf6d5a4a3333b/yarl-1.22.0-cp314-cp314-win32.whl", hash = "sha256:6a635ea45ba4ea8238463b4f7d0e721bad669f80878b7bfd1f89266e2ae63da2", size = 82990, upload-time = "2025-10-06T14:11:40.624Z" }, + { url = "https://files.pythonhosted.org/packages/ac/30/ac3a0c5bdc1d6efd1b41fa24d4897a4329b3b1e98de9449679dd327af4f0/yarl-1.22.0-cp314-cp314-win_amd64.whl", hash = "sha256:0d6e6885777af0f110b0e5d7e5dda8b704efed3894da26220b7f3d887b839a79", size = 88292, upload-time = "2025-10-06T14:11:42.578Z" }, + { url = "https://files.pythonhosted.org/packages/df/0a/227ab4ff5b998a1b7410abc7b46c9b7a26b0ca9e86c34ba4b8d8bc7c63d5/yarl-1.22.0-cp314-cp314-win_arm64.whl", hash = "sha256:8218f4e98d3c10d683584cb40f0424f4b9fd6e95610232dd75e13743b070ee33", size = 82888, upload-time = "2025-10-06T14:11:44.863Z" }, + { url = "https://files.pythonhosted.org/packages/06/5e/a15eb13db90abd87dfbefb9760c0f3f257ac42a5cac7e75dbc23bed97a9f/yarl-1.22.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:45c2842ff0e0d1b35a6bf1cd6c690939dacb617a70827f715232b2e0494d55d1", size = 146223, upload-time = "2025-10-06T14:11:46.796Z" }, + { url = "https://files.pythonhosted.org/packages/18/82/9665c61910d4d84f41a5bf6837597c89e665fa88aa4941080704645932a9/yarl-1.22.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:d947071e6ebcf2e2bee8fce76e10faca8f7a14808ca36a910263acaacef08eca", size = 95981, upload-time = "2025-10-06T14:11:48.845Z" }, + { url = "https://files.pythonhosted.org/packages/5d/9a/2f65743589809af4d0a6d3aa749343c4b5f4c380cc24a8e94a3c6625a808/yarl-1.22.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:334b8721303e61b00019474cc103bdac3d7b1f65e91f0bfedeec2d56dfe74b53", size = 97303, upload-time = "2025-10-06T14:11:50.897Z" }, + { url = "https://files.pythonhosted.org/packages/b0/ab/5b13d3e157505c43c3b43b5a776cbf7b24a02bc4cccc40314771197e3508/yarl-1.22.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e7ce67c34138a058fd092f67d07a72b8e31ff0c9236e751957465a24b28910c", size = 361820, upload-time = "2025-10-06T14:11:52.549Z" }, + { url = "https://files.pythonhosted.org/packages/fb/76/242a5ef4677615cf95330cfc1b4610e78184400699bdda0acb897ef5e49a/yarl-1.22.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d77e1b2c6d04711478cb1c4ab90db07f1609ccf06a287d5607fcd90dc9863acf", size = 323203, upload-time = "2025-10-06T14:11:54.225Z" }, + { url = "https://files.pythonhosted.org/packages/8c/96/475509110d3f0153b43d06164cf4195c64d16999e0c7e2d8a099adcd6907/yarl-1.22.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c4647674b6150d2cae088fc07de2738a84b8bcedebef29802cf0b0a82ab6face", size = 363173, upload-time = "2025-10-06T14:11:56.069Z" }, + { url = "https://files.pythonhosted.org/packages/c9/66/59db471aecfbd559a1fd48aedd954435558cd98c7d0da8b03cc6c140a32c/yarl-1.22.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efb07073be061c8f79d03d04139a80ba33cbd390ca8f0297aae9cce6411e4c6b", size = 373562, upload-time = "2025-10-06T14:11:58.783Z" }, + { url = "https://files.pythonhosted.org/packages/03/1f/c5d94abc91557384719da10ff166b916107c1b45e4d0423a88457071dd88/yarl-1.22.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e51ac5435758ba97ad69617e13233da53908beccc6cfcd6c34bbed8dcbede486", size = 339828, upload-time = "2025-10-06T14:12:00.686Z" }, + { url = "https://files.pythonhosted.org/packages/5f/97/aa6a143d3afba17b6465733681c70cf175af89f76ec8d9286e08437a7454/yarl-1.22.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:33e32a0dd0c8205efa8e83d04fc9f19313772b78522d1bdc7d9aed706bfd6138", size = 347551, upload-time = "2025-10-06T14:12:02.628Z" }, + { url = "https://files.pythonhosted.org/packages/43/3c/45a2b6d80195959239a7b2a8810506d4eea5487dce61c2a3393e7fc3c52e/yarl-1.22.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:bf4a21e58b9cde0e401e683ebd00f6ed30a06d14e93f7c8fd059f8b6e8f87b6a", size = 334512, upload-time = "2025-10-06T14:12:04.871Z" }, + { url = "https://files.pythonhosted.org/packages/86/a0/c2ab48d74599c7c84cb104ebd799c5813de252bea0f360ffc29d270c2caa/yarl-1.22.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:e4b582bab49ac33c8deb97e058cd67c2c50dac0dd134874106d9c774fd272529", size = 352400, upload-time = "2025-10-06T14:12:06.624Z" }, + { url = "https://files.pythonhosted.org/packages/32/75/f8919b2eafc929567d3d8411f72bdb1a2109c01caaab4ebfa5f8ffadc15b/yarl-1.22.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:0b5bcc1a9c4839e7e30b7b30dd47fe5e7e44fb7054ec29b5bb8d526aa1041093", size = 357140, upload-time = "2025-10-06T14:12:08.362Z" }, + { url = "https://files.pythonhosted.org/packages/cf/72/6a85bba382f22cf78add705d8c3731748397d986e197e53ecc7835e76de7/yarl-1.22.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c0232bce2170103ec23c454e54a57008a9a72b5d1c3105dc2496750da8cfa47c", size = 341473, upload-time = "2025-10-06T14:12:10.994Z" }, + { url = "https://files.pythonhosted.org/packages/35/18/55e6011f7c044dc80b98893060773cefcfdbf60dfefb8cb2f58b9bacbd83/yarl-1.22.0-cp314-cp314t-win32.whl", hash = "sha256:8009b3173bcd637be650922ac455946197d858b3630b6d8787aa9e5c4564533e", size = 89056, upload-time = "2025-10-06T14:12:13.317Z" }, + { url = "https://files.pythonhosted.org/packages/f9/86/0f0dccb6e59a9e7f122c5afd43568b1d31b8ab7dda5f1b01fb5c7025c9a9/yarl-1.22.0-cp314-cp314t-win_amd64.whl", hash = "sha256:9fb17ea16e972c63d25d4a97f016d235c78dd2344820eb35bc034bc32012ee27", size = 96292, upload-time = "2025-10-06T14:12:15.398Z" }, + { url = "https://files.pythonhosted.org/packages/48/b7/503c98092fb3b344a179579f55814b613c1fbb1c23b3ec14a7b008a66a6e/yarl-1.22.0-cp314-cp314t-win_arm64.whl", hash = "sha256:9f6d73c1436b934e3f01df1e1b21ff765cd1d28c77dfb9ace207f746d4610ee1", size = 85171, upload-time = "2025-10-06T14:12:16.935Z" }, + { url = "https://files.pythonhosted.org/packages/73/ae/b48f95715333080afb75a4504487cbe142cae1268afc482d06692d605ae6/yarl-1.22.0-py3-none-any.whl", hash = "sha256:1380560bdba02b6b6c90de54133c81c9f2a453dee9912fe58c1dcced1edb7cff", size = 46814, upload-time = "2025-10-06T14:12:53.872Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +] diff --git a/deployment/ecr/gaab-strands-common/README.md b/deployment/ecr/gaab-strands-common/README.md new file mode 100644 index 00000000..683a8c29 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/README.md @@ -0,0 +1,47 @@ +# GAAB Strands Common Library + +Shared library for GAAB Strands agents providing common functionality for: +- Runtime streaming +- DynamoDB configuration management +- Data models +- Tool wrapping and event emission +- Base agent patterns + +## Installation + +```bash +uv sync +``` + +## Usage + +```python +from gaab_strands_common.runtime_streaming import RuntimeStreaming +from gaab_strands_common.ddb_helper import DynamoDBHelper +from gaab_strands_common.models import AgentConfig +from gaab_strands_common.tool_wrapper import wrap_tool_with_events +from gaab_strands_common.base_agent import BaseAgent +``` + +## Development + +Install dependencies: +```bash +uv sync +``` + +Run tests: +```bash +uv run pytest +``` + +Run tests with coverage: +```bash +uv run pytest --cov=src/gaab_strands_common --cov-report=term-missing +``` + +Format code: +```bash +uv run black src/ test/ +uv run isort src/ test/ +``` diff --git a/deployment/ecr/gaab-strands-common/pyproject.toml b/deployment/ecr/gaab-strands-common/pyproject.toml new file mode 100644 index 00000000..242a12ff --- /dev/null +++ b/deployment/ecr/gaab-strands-common/pyproject.toml @@ -0,0 +1,32 @@ +[project] +name = "gaab-strands-common" +version = "0.1.0" +description = "Shared library for GAAB Strands agents" +readme = "README.md" +requires-python = ">=3.13" +license = { text = "Apache-2.0" } +authors = [{ name = "AWS" }] + +dependencies = [ + "pydantic>=2.0.0", + "boto3>=1.34.0", + "bedrock-agentcore>=0.1.0", + "strands-agents>=1.10.0", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src/gaab_strands_common"] + +[tool.uv] +dev-dependencies = [ + "pytest>=8.0.0", + "pytest-cov>=4.1.0", + "pytest-asyncio>=0.23.0", + "black>=24.0.0", + "isort>=5.13.0", + "moto>=5.1.0", +] diff --git a/deployment/ecr/gaab-strands-common/pytest.ini b/deployment/ecr/gaab-strands-common/pytest.ini new file mode 100644 index 00000000..dd5843ba --- /dev/null +++ b/deployment/ecr/gaab-strands-common/pytest.ini @@ -0,0 +1,12 @@ +[pytest] +testpaths = test +python_files = test_*.py +python_classes = Test* +python_functions = test_* +addopts = + -v + --strict-markers + --tb=short +markers = + asyncio: marks tests as async (deselect with '-m "not asyncio"') +asyncio_mode = auto diff --git a/deployment/ecr/gaab-strands-common/scripts/run_unit_tests.sh b/deployment/ecr/gaab-strands-common/scripts/run_unit_tests.sh new file mode 100755 index 00000000..693e7e53 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/scripts/run_unit_tests.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# Script to run unit tests for GAAB Strands Common + +set -e # Exit on any error + +# Change to the project root directory (one level up from scripts/) +cd "$(dirname "$0")/.." + +echo "🧪 Running tests for gaab-strands-common..." + +# Check if UV is available +if ! command -v uv &> /dev/null; then + echo "❌ ERROR: UV is not installed or not in PATH" + echo "" + echo "UV is required for running tests. Please install UV:" + echo " - pip install uv>=0.5.0" + echo " - brew install uv (macOS)" + echo " - https://docs.astral.sh/uv/getting-started/installation/" + echo "" + exit 1 +fi + +echo "ℹ️ Using UV version: $(uv --version)" + +# Sync dependencies and run tests +echo "ℹ️ Syncing dependencies..." +uv sync + +echo "ℹ️ Running tests with coverage..." +uv run pytest test/ -v --cov --cov-report=term-missing --cov-report=xml + +echo "" +echo "✅ Test run complete!" diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/__init__.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/__init__.py new file mode 100644 index 00000000..532eee79 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/__init__.py @@ -0,0 +1,104 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +GAAB Strands Common Library +Shared functionality for GAAB Strands agents +""" + +# Import submodules to ensure they get registered +import gaab_strands_common.custom_tools +import gaab_strands_common.multimodal +import gaab_strands_common.utils +from gaab_strands_common.base_agent import BaseAgent +from gaab_strands_common.constants import ( + ENV_AWS_REGION, + ENV_MEMORY_ID, + ENV_MEMORY_STRATEGY_ID, + ENV_USE_CASE_CONFIG_KEY, + ENV_USE_CASE_TABLE_NAME, +) + +# Import custom tools setup components +from gaab_strands_common.custom_tools.setup import ( + BaseCustomTool, + CustomToolsRegistry, + ToolCategory, + ToolMetadata, + ToolRequirements, + auto_attach_when, + custom_tool, + requires, +) +from gaab_strands_common.ddb_helper import DynamoDBHelper +from gaab_strands_common.mcp_tools_loader import MCPToolsLoader +from gaab_strands_common.models import ( + AgentBuilderParams, + AgentReference, + AgentsAsToolsParams, + BedrockLlmParams, + FileReference, + LlmParams, + MCPServerConfig, + MemoryConfig, + UseCaseConfig, + WorkflowConfig, + WorkflowParams, +) +from gaab_strands_common.multimodal.file_handler import FileHandler +from gaab_strands_common.multimodal.multimodal_processor import MultimodalRequestProcessor +from gaab_strands_common.runtime_streaming import RuntimeStreaming +from gaab_strands_common.strands_tools_registry import StrandsToolsRegistry +from gaab_strands_common.tool_wrapper import ToolEventEmitter, wrap_tool_with_events +from gaab_strands_common.tools_manager import ToolsManager +from gaab_strands_common.utils.constants import ( + MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR, + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR, + RETRY_CONFIG, + USE_CASE_CONFIG_RECORD_KEY_ENV_VAR, + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, +) +from gaab_strands_common.utils.helpers import retry_with_backoff + +__all__ = [ + "RuntimeStreaming", + "DynamoDBHelper", + "UseCaseConfig", + "LlmParams", + "BedrockLlmParams", + "AgentBuilderParams", + "MemoryConfig", + "MCPServerConfig", + "WorkflowConfig", + "WorkflowParams", + "AgentReference", + "AgentsAsToolsParams", + "FileReference", + "wrap_tool_with_events", + "ToolEventEmitter", + "BaseAgent", + "ENV_USE_CASE_TABLE_NAME", + "ENV_USE_CASE_CONFIG_KEY", + "ENV_AWS_REGION", + "ENV_MEMORY_ID", + "ENV_MEMORY_STRATEGY_ID", + "MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR", + "MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR", + "USE_CASE_CONFIG_RECORD_KEY_ENV_VAR", + "USE_CASE_CONFIG_TABLE_NAME_ENV_VAR", + "ToolsManager", + "StrandsToolsRegistry", + "MCPToolsLoader", + "retry_with_backoff", + "RETRY_CONFIG", + "FileHandler", + "MultimodalRequestProcessor", + "BaseCustomTool", + "ToolCategory", + "ToolMetadata", + "CustomToolsRegistry", + "ToolRequirements", + "auto_attach_when", + "custom_tool", + "requires", +] diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/base_agent.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/base_agent.py new file mode 100644 index 00000000..1e7d960d --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/base_agent.py @@ -0,0 +1,112 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Base Agent - Common patterns for agent initialization and configuration +""" + +import logging +import os +from typing import Optional + +from strands.models import BedrockModel + +from gaab_strands_common.models import LlmParams, UseCaseConfig +from gaab_strands_common.utils.helpers import build_guardrail_config, create_boto_config + +logger = logging.getLogger(__name__) + + +class BaseAgent: + """Base class providing common agent initialization patterns""" + + def __init__(self, region: str): + """ + Initialize base agent + + Args: + region: AWS region for Bedrock + """ + self.region = region + self.config: Optional[UseCaseConfig] = None + + def _create_model(self, llm_params: LlmParams) -> BedrockModel: + """ + Create Bedrock model from LLM parameters + + Args: + llm_params: LLM configuration parameters + + Returns: + BedrockModel: Configured Bedrock model instance + """ + bedrock_params = llm_params.bedrock_llm_params + + # Log environment and configuration for debugging + logger.debug(f"Environment AWS_REGION: {os.getenv('AWS_REGION')}") + logger.debug(f"Configured region: {self.region}") + logger.info(f"Inference type: {bedrock_params.bedrock_inference_type}") + logger.info(f"Model identifier: {bedrock_params.model_identifier}") + + # Check if this is a cross-region inference profile + is_cross_region_profile = bedrock_params.model_identifier.startswith("us.") + if is_cross_region_profile: + logger.info(f"Cross-region inference profile detected: {bedrock_params.model_identifier}") + + # Log detailed model configuration + logger.debug( + f"🔧 BedrockModel configuration:\n" + f" - model_id: {bedrock_params.model_identifier}\n" + f" - region_name: {self.region}\n" + f" - temperature: {llm_params.temperature}\n" + f" - streaming: {llm_params.streaming}" + ) + + # Create Botocore Config with retry settings and user agent + boto_config = create_boto_config(self.region) + + # Build guardrail configuration if available + guardrail_config = build_guardrail_config(bedrock_params) + + model_config = { + "model_id": bedrock_params.model_identifier, + "region_name": self.region, + "temperature": llm_params.temperature, + "streaming": llm_params.streaming, + "boto_client_config": boto_config, + **guardrail_config, + } + + bedrock_model = BedrockModel(**model_config) + logger.info("BedrockModel instance created successfully") + + return bedrock_model + + def _validate_use_case_type(self, config_dict: dict, expected_type: str): + """ + Validate use case type from configuration + + Args: + config_dict: Configuration dictionary + expected_type: Expected use case type + + Raises: + ValueError: If use case type doesn't match expected + """ + use_case_type = config_dict.get("UseCaseType") + if use_case_type != expected_type: + raise ValueError(f"Expected {expected_type}, got {use_case_type}") + + def get_config(self) -> UseCaseConfig: + """ + Get the agent configuration + + Returns: + UseCaseConfig: Use case configuration + + Raises: + ValueError: If configuration not loaded + """ + if not self.config: + raise ValueError("Configuration not loaded") + return self.config diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/constants.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/constants.py new file mode 100644 index 00000000..11cf1e42 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/constants.py @@ -0,0 +1,16 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Constants for GAAB Strands agents. + +This module defines shared constants used across all GAAB Strands agents, +including environment variable names and configuration keys. +""" + +# Environment variable names +ENV_USE_CASE_TABLE_NAME = "USE_CASE_TABLE_NAME" +ENV_USE_CASE_CONFIG_KEY = "USE_CASE_CONFIG_KEY" +ENV_AWS_REGION = "AWS_REGION" +ENV_MEMORY_ID = "MEMORY_ID" +ENV_MEMORY_STRATEGY_ID = "MEMORY_STRATEGY_ID" diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/__init__.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/__init__.py new file mode 100644 index 00000000..347c718c --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/__init__.py @@ -0,0 +1,81 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Custom Tools - Extensible custom tools system for GAAB Strands agents +""" + +# Auto-import all custom tools to ensure they get registered +import importlib +import logging +import pkgutil + +# Import setup components +from .setup import ( + BaseCustomTool, + CustomToolsRegistry, + ToolMetadata, + ToolRequirements, + auto_attach_when, + custom_tool, + requires, +) + +logger = logging.getLogger(__name__) + + +def _discover_and_import_tools(): + """ + Auto-discover and automatically import all custom tools in this package + to ensure they get registered with the CustomToolsRegistry. + Manually importing each one is not required. + """ + try: + # Get the current package path + package_path = __path__ + package_name = __name__ + + logger.info(f"Starting custom tools discovery in package: {package_name}") + + # Iterate through all modules in the custom_tools package + for module_info in pkgutil.iter_modules(package_path): + module_name = module_info.name + + logger.debug(f"Found module: {module_name}") + + # Skip the setup package + if module_name == "setup": + logger.debug("Skipping setup module") + continue + + try: + # Import the module to trigger tool registration + full_module_name = f"{package_name}.{module_name}" + logger.info(f"Importing custom tool module: {full_module_name}") + importlib.import_module(full_module_name) + logger.info(f"Successfully imported custom tool module: {module_name}") + except Exception as e: + logger.warning(f"Failed to import custom tool module {module_name}: {e}") + + # Log the final registry state + from .setup import CustomToolsRegistry + + registered_tools = CustomToolsRegistry.list_tool_ids() + logger.info(f"Custom tools discovery complete. Registered tools: {registered_tools}") + + except Exception as e: + logger.error(f"Error during custom tools discovery: {e}") + + +# Perform auto-discovery when this package is imported +_discover_and_import_tools() + +__all__ = [ + "CustomToolsRegistry", + "BaseCustomTool", + "custom_tool", + "requires", + "auto_attach_when", + "ToolMetadata", + "ToolRequirements", +] diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/s3_file_reader.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/s3_file_reader.py new file mode 100644 index 00000000..d901c738 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/s3_file_reader.py @@ -0,0 +1,226 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +S3 File Reader tool for multimodal content processing. + +This module provides functionality to read files from S3 buckets and return them +in appropriate formats for use with AI models. It supports both image and document +formats with automatic format detection and proper error handling. +""" + +import logging +import os +from typing import Any, Dict, cast + +import boto3 +from botocore.exceptions import ClientError +from strands import tool +from strands.types.tools import ToolResult, ToolUse + +from ..utils.constants import ( + MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR, + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR, + SUPPORTED_DOCUMENT_FORMATS, + SUPPORTED_IMAGE_FORMATS, + USE_CASE_UUID, +) +from .setup import BaseCustomTool, auto_attach_when, custom_tool, requires +from .setup.metadata import ToolCategory + +logger = logging.getLogger(__name__) + + +@custom_tool( + tool_id="s3_file_reader", + name="S3 File Reader", + description="Read files from S3 bucket for multimodal content", + category=ToolCategory.MULTIMODAL, +) +@requires( + config_params=["LlmParams.MultimodalParams.MultimodalEnabled"], + env_vars=[MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR, MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR, USE_CASE_UUID], +) +@auto_attach_when( + lambda config: config.get("LlmParams", {}).get("MultimodalParams", {}).get("MultimodalEnabled", False) +) +class S3FileReaderTool(BaseCustomTool): + """ + S3 File Reader tool that automatically attaches when multimodal is enabled + """ + + def __init__(self, config: Dict[str, Any], region: str): + super().__init__(config, region) + self.bucket_name = os.getenv(MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR) + self.metadata_table_name = os.getenv(MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR) + self.use_case_uuid = os.getenv(USE_CASE_UUID) + self.s3_client = boto3.client("s3", region_name=self.region) + + logger.debug(f"Initialized S3FileReaderTool for bucket: {self.bucket_name}") + + @tool + def s3_file_reader(self, tool_input: ToolUse) -> ToolResult: + """ + Read files from S3 and return content in model-readable format. + + Supports images (png, jpg, jpeg, gif, webp) and documents (pdf, csv, doc, docx, + xls, xlsx, html, txt, md). Provide only the S3 key (e.g., 'folder/file.jpg'), + not the full S3 URI. Do not include the bucket name or s3:// prefix. The tool + automatically detects file type and formats content appropriately for processing. + + Args: + tool_input: ToolUse object containing: + - s3_key (str, required): S3 object key/path (e.g., 'uploads/document.pdf') + + + Returns: + ToolResult with status "success" or "error": + - Success: Returns image or document block with format and binary content + - Error: Returns descriptive error message for invalid input, unsupported format, + file not found, or S3 access issues + """ + # Initialize variables with defaults + tool_use_id = "unknown" + s3_key = "unknown" + + try: + tool_use_id = tool_input["toolUseId"] + tool_use_input = tool_input["input"] + + if "s3_key" not in tool_use_input: + return self._create_error_result(tool_use_id, "S3 key is required") + + s3_key = tool_use_input["s3_key"] + + # Validate and normalize the S3 key + validation_result = self._validate_and_normalize_s3_key(s3_key) + if validation_result.startswith("Error:"): # Error message + return self._create_error_result(tool_use_id, validation_result) + + s3_key = validation_result # Normalized key + logger.debug(f"Reading S3 file: s3://{self.bucket_name}/{s3_key}") + + response = self.s3_client.get_object(Bucket=self.bucket_name, Key=s3_key) + content = response["Body"].read() + + file_extension = self.get_file_extension(s3_key) + + if file_extension == "unsupported": + supported_formats = " | ".join(sorted(SUPPORTED_IMAGE_FORMATS | SUPPORTED_DOCUMENT_FORMATS)) + error_msg = f"Unsupported file type for '{s3_key}'. Supported formats: {supported_formats}" + return self._create_error_result(tool_use_id, error_msg) + + file_type = self.determine_file_type(file_extension) + + logger.debug(f"Successfully read file: {s3_key} ({len(content)} bytes)") + + if file_type == "image": + return self._create_image_result(tool_use_id, content, file_extension) + + if file_type == "document": + return self._create_document_result(tool_use_id, content, file_extension, s3_key) + + except ClientError as e: + error_code = e.response.get("Error", {}).get("Code", "Unknown") + error_message = e.response.get("Error", {}).get("Message", str(e)) + + if error_code == "NoSuchKey" or error_code == "AccessDenied": + logger.warning(f"File not found in S3: s3://{self.bucket_name}/{s3_key}") + error_msg = f"File '{s3_key}' not found. The file may have been deleted or moved." + return self._create_error_result(tool_use_id, error_msg) + + logger.error(f"S3 ClientError reading file {s3_key}: {error_code} - {error_message}") + error_msg = f"Error reading file '{s3_key}': {error_code} - {error_message}" + return self._create_error_result(tool_use_id, error_msg) + except Exception as e: + logger.error(f"Unexpected error reading S3 file {s3_key}: {str(e)}") + error_msg = f"Unexpected error reading file '{s3_key}': {str(e)}" + return self._create_error_result(tool_use_id, error_msg) + + def get_file_extension(self, s3_key: str) -> str: + """Extract file extension from S3 key""" + if "." in s3_key: + extension = s3_key.split(".")[-1] + return extension + return "unsupported" + + def determine_file_type(self, file_extension: str) -> str: + """Determine if file is document or image based on extension""" + if file_extension in SUPPORTED_IMAGE_FORMATS: + return "image" + if file_extension in SUPPORTED_DOCUMENT_FORMATS: + return "document" + raise ValueError(f"Unsupported file type with extension {file_extension}.") + + def _validate_and_normalize_s3_key(self, s3_key: str) -> str: + """ + Validate S3 key input. + + This method only accepts the key portion (e.g., "folder/file.jpg"). + S3 URIs (s3://bucket/key) are rejected to encourage proper usage. + + Args: + s3_key: The input S3 key (key portion only) + + Returns: + Either the normalized key string, or an error message string + """ + # Validate inputs + if not s3_key or not s3_key.strip(): + return "Error: S3 key cannot be empty" + + original_s3_key = s3_key + s3_key = s3_key.strip() + + # Reject S3 URIs - only accept key portion + if s3_key.startswith("s3://"): + logger.error( + f"Received S3 URI instead of key. Please provide only the key portion (without s3://bucket/): {s3_key}" + ) + return f"Error: Invalid input '{original_s3_key}'. Please provide only the S3 key (e.g., 'folder/file.jpg'), not the full S3 URI" + + return s3_key + + def _create_error_result(self, tool_use_id: str, error_message: str) -> ToolResult: + """Create a ToolResult for error cases.""" + return { + "toolUseId": tool_use_id, + "status": "error", + "content": [{"text": error_message}], + } + + def _create_image_result(self, tool_use_id: str, content: bytes, image_extension: str) -> ToolResult: + """Create a ToolResult for image files.""" + if image_extension not in SUPPORTED_IMAGE_FORMATS: + raise ValueError( + f"Unsupported image extension: {image_extension}. Supported extensions: {SUPPORTED_IMAGE_FORMATS}" + ) + + # Normalize for Bedrock Converse API compatibility + if image_extension == "jpg": + image_extension = "jpeg" + + return { + "toolUseId": tool_use_id, + "status": "success", + "content": [{"image": {"format": image_extension, "source": {"bytes": content}}}], + } + + def _create_document_result( + self, tool_use_id: str, content: bytes, document_extension: str, s3_key: str + ) -> ToolResult: + """Create a ToolResult for document files""" + if document_extension not in SUPPORTED_DOCUMENT_FORMATS: + raise ValueError( + f"Unsupported document extension: {document_extension}. Supported extensions: {SUPPORTED_DOCUMENT_FORMATS}" + ) + + document_name = os.path.splitext(os.path.basename(s3_key))[0] # Use filename without extension + + document_block = {"name": document_name, "format": document_extension, "source": {"bytes": content}} + + return { + "toolUseId": tool_use_id, + "status": "success", + "content": [{"document": cast(Dict[str, Any], document_block)}], + } diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/__init__.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/__init__.py new file mode 100644 index 00000000..4414ed0c --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/__init__.py @@ -0,0 +1,22 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Custom Tools Setup - Core components for custom tools system +""" + +from .base_tool import BaseCustomTool +from .decorators import auto_attach_when, custom_tool, requires +from .metadata import ToolCategory, ToolMetadata, ToolRequirements +from .registry import CustomToolsRegistry + +__all__ = [ + "CustomToolsRegistry", + "BaseCustomTool", + "custom_tool", + "requires", + "auto_attach_when", + "ToolCategory", + "ToolMetadata", + "ToolRequirements", +] diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/base_tool.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/base_tool.py new file mode 100644 index 00000000..6e1a32c1 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/base_tool.py @@ -0,0 +1,127 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Base class for custom tools that are also strands tools +""" + +import logging +import os +from typing import Any, Dict + +logger = logging.getLogger(__name__) + + +class BaseCustomTool: + """ + Base class that all custom tools must inherit from. + + This class provides: + - Consistent initialization pattern + - Automatic requirement validation + - Access to system configuration + - Standard interface for tool creation + + Ensure that all child classes have a tool with @tool decorator to actually + allow tool invocation + """ + + def __init__(self, config: Dict[str, Any], region: str): + """ + Initialize the custom tool + + Args: + config: Full configuration dictionary from DynamoDB + region: AWS region + """ + self.config = config + self.region = region + self._validate_requirements() + + def _get_tool_identity(self): + """Get tool name and ID for error messages.""" + tool_name = "Unknown Tool" + tool_id = "unknown" + if hasattr(self.__class__, "metadata") and self.__class__.metadata is not None: + tool_name = getattr(self.__class__.metadata, "name", self.__class__.metadata.tool_id) + tool_id = self.__class__.metadata.tool_id + return tool_name, tool_id + + def _log_validation_error(self, tool_name, tool_id, error_msg, missing_items, item_type): + """Log detailed validation error with formatting.""" + logger.error("=" * 80) + logger.error(f"TOOL SETUP ERROR: {tool_name}") + logger.error(error_msg) + logger.error(f"Tool '{tool_id}' will NOT be available.") + logger.error(f"Please configure these {item_type} to enable this tool:") + for item in missing_items: + logger.error(f"- {item}") + logger.error("=" * 80) + + def _validate_env_vars(self, env_vars): + """Validate required environment variables exist.""" + missing_env_vars = [env_var for env_var in env_vars if not os.getenv(env_var)] + + if missing_env_vars: + tool_name, tool_id = self._get_tool_identity() + error_msg = f"Missing required environment variables for {tool_name}: {', '.join(missing_env_vars)}" + self._log_validation_error(tool_name, tool_id, error_msg, missing_env_vars, "environment variables") + raise ValueError(error_msg) + + def _validate_config_params(self, config_params): + """Validate required configuration parameters exist.""" + missing_config_params = [param for param in config_params if self._get_config_param(param) is None] + + if missing_config_params: + tool_name, tool_id = self._get_tool_identity() + error_msg = f"Missing required configuration parameters for {tool_name}: {', '.join(missing_config_params)}" + self._log_validation_error( + tool_name, tool_id, error_msg, missing_config_params, "parameters in your use case configuration" + ) + raise ValueError(error_msg) + + def _validate_requirements(self): + """ + Validate all requirements are met before tool can be used. + + This method checks: + - Environment variables exist and have values + - Configuration parameters exist in the config dictionary + + Raises: + ValueError: If any required dependency is missing + """ + if not hasattr(self.__class__, "_requirements") or self.__class__._requirements is None: + return + + req = self.__class__._requirements + + if req.env_vars: + self._validate_env_vars(req.env_vars) + + if req.config_params: + self._validate_config_params(req.config_params) + + logger.info(f"Successfully validated {self.__class__.metadata.tool_id} tool to be used.") + + def _get_config_param(self, param_path: str, default=None): + """ + Get config parameter using dot notation like 'LlmParams.MultimodalParams.MultimodalEnabled' + + Args: + param_path: Dot-separated path to the parameter + default: Default value to return if parameter doesn't exist + + Returns: + Parameter value or default if not found + """ + keys = param_path.split(".") + current = self.config + + for key in keys: + if isinstance(current, dict) and key in current: + current = current[key] + else: + return default + + return current diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/decorators.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/decorators.py new file mode 100644 index 00000000..55b752aa --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/decorators.py @@ -0,0 +1,62 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Decorators for custom tools +""" + +from typing import Any, Callable, Dict, List + +from .metadata import ToolCategory, ToolMetadata, ToolRequirements +from .registry import CustomToolsRegistry + + +def custom_tool(tool_id: str, name: str, description: str, category: ToolCategory = ToolCategory.GENERAL, **kwargs): + """ + Define tool metadata and register the tool. Sets the _metadata class variable + + Args: + id: Unique tool identifier + name: Human-readable tool name + description: Tool description + category: Tool category (ToolCategory enum or string, default: ToolCategory.GENERAL) + **kwargs: Additional metadata fields + """ + + def decorator(cls): + cls.metadata = ToolMetadata(tool_id=tool_id, name=name, description=description, category=category, **kwargs) + return CustomToolsRegistry.register(cls) + + return decorator + + +def requires(env_vars: List[str] = None, config_params: List[str] = None): + """ + Define tool requirements. Sets the _requirements class variable + + Args: + env_vars: List of required environment variables (defaults to empty list) + config_params: List of required config parameters (dot notation) which the tool might use (defaults to empty list) + """ + + def decorator(cls): + cls._requirements = ToolRequirements(env_vars=env_vars, config_params=config_params) + return cls + + return decorator + + +def auto_attach_when(condition: Callable[[Dict[str, Any]], bool]): + """ + Define auto-attachment condition. Sets the _auto_condition class variable + + Args: + condition: Function that takes config dict and returns bool. + If the condition evaluates to true, then the custom tool is auto-attached at the time of loading tools. + """ + + def decorator(cls): + cls._auto_condition = condition + return cls + + return decorator diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/metadata.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/metadata.py new file mode 100644 index 00000000..a6507d08 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/metadata.py @@ -0,0 +1,46 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Metadata classes for custom tools +""" + +from dataclasses import dataclass, field +from enum import Enum +from typing import List + + +class ToolCategory(Enum): + """Categories for custom tools based on their interface and functionality""" + + MULTIMODAL = "Multimodal" + """Content that bridges different media types - Image-to-text, document parsing, media conversion""" + + GENERAL = "General" + """General purpose tools that don't fit into specific categories""" + + +@dataclass +class ToolMetadata: + """Metadata for custom tools""" + + tool_id: str + name: str + description: str + category: ToolCategory = ToolCategory.GENERAL + version: str = "1.0.0" + + +@dataclass +class ToolRequirements: + """Requirements for custom tools""" + + env_vars: List[str] = field(default_factory=list) + config_params: List[str] = field(default_factory=list) + + def __post_init__(self): + """Initialize empty lists if None""" + if self.env_vars is None: + self.env_vars = [] + if self.config_params is None: + self.config_params = [] diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/registry.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/registry.py new file mode 100644 index 00000000..efb941b6 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/custom_tools/setup/registry.py @@ -0,0 +1,184 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Tool Registry - Centralized registry for custom tools +""" + +import logging +from typing import Any, Dict, List + +logger = logging.getLogger(__name__) + + +class CustomToolsRegistry: + """ + Centralized registry that automatically collects all custom tools + when they are decorated with @custom_tool + """ + + def __init__(self): + """Initialize registry with available custom tools""" + self._available_tools: Dict[str, "BaseCustomTool"] = {} + self._tool_names: Dict[str, str] = {} # Maps tool name to tool_id for deduplication + self._tool_method_names: Dict[str, List[str]] = {} # Maps tool_id to list of @tool method names + + @classmethod + def register(cls, tool_class): + """ + Register a tool class in the registry with deduplication. + + Also discovers all @tool decorated methods in the class during registration. + + Args: + tool_class: The tool class to register + + Returns: + The tool class (for decorator chaining) + + Raises: + ValueError: If tool ID already exists + """ + # Get the singleton instance + instance = cls._get_instance() + + tool_id = tool_class.metadata.tool_id + logger.info(f"Attempting to register custom tool: {tool_id}") + + if tool_id in instance._available_tools: + raise ValueError(f"Tool ID '{tool_id}' already registered") + + # Check for tool name conflicts (deduplication) + tool_name = tool_class.metadata.name + if tool_name in instance._tool_names: + existing_tool_id = instance._tool_names[tool_name] + logger.warning( + f"Tool name conflict detected: '{tool_name}' is used by both '{existing_tool_id}' and '{tool_id}'. " + f"Keeping first registration: '{existing_tool_id}'" + ) + # Don't register the duplicate, but return the class for decorator chaining + return tool_class + + tool_method_names = cls._discover_tool_methods(tool_class) # Discover all @tool decorated methods + + instance._available_tools[tool_id] = tool_class + instance._tool_names[tool_name] = tool_id + instance._tool_method_names[tool_id] = tool_method_names + + logger.info( + f"Successfully registered custom tool: {tool_id} with {len(tool_method_names)} method(s): " + f"{', '.join(tool_method_names)} (total tools: {len(instance._available_tools)})" + ) + + # Log auto-attach condition if present + if hasattr(tool_class, "_auto_condition"): + logger.info(f"Tool {tool_id} has auto-attach condition") + else: + logger.info(f"Tool {tool_id} has no auto-attach condition") + + return tool_class + + @staticmethod + def _discover_tool_methods(tool_class) -> List[str]: + """ + Discover all @tool decorated methods in a tool class using MRO. + + Args: + tool_class: The tool class to inspect + + Returns: + List of method names that are decorated with @tool + """ + tool_method_names = [] + + for cls in tool_class.__mro__: + # Stop at BaseCustomTool + if cls.__name__ == "BaseCustomTool" or cls is object: + continue + + for name, attr in cls.__dict__.items(): + # Skip private methods and duplicates + if name.startswith("_") or name in tool_method_names: + continue + + # Check if it has tool_spec (decorated with @tool) + if callable(attr) and hasattr(attr, "tool_spec"): + tool_method_names.append(name) + + return tool_method_names + + @classmethod + def get_tool(cls, tool_id: str): + """Get a tool class by ID""" + instance = cls._get_instance() + return instance._available_tools.get(tool_id) + + @classmethod + def get_tool_method_names(cls, tool_id: str) -> List[str]: + """ + Get the list of @tool decorated method names for a tool. + Such as "s3_file_reader" can be passed as an ID and it will fetch the names of any + tools inside this custom tool + + Args: + tool_id: Tool identifier + + Returns: + List of method names, or empty list if tool not found + """ + instance = cls._get_instance() + return instance._tool_method_names.get(tool_id, []) + + @classmethod + def get_all_tools(cls) -> Dict[str, "BaseCustomTool"]: + """Get all registered tools""" + instance = cls._get_instance() + return instance._available_tools.copy() + + @classmethod + def list_tool_ids(cls) -> List[str]: + """Get list of all registered tool IDs""" + instance = cls._get_instance() + return list(instance._available_tools.keys()) + + @classmethod + def has_tool(cls, tool_id: str) -> bool: + """ + Check if a tool is available in the registry + + Args: + tool_id: Tool identifier + + Returns: + True if tool is available, False otherwise + """ + instance = cls._get_instance() + return tool_id in instance._available_tools + + @classmethod + def get_available_tool_ids(cls) -> List[str]: + """ + Get list of all available tool IDs + + Returns: + List of tool IDs + """ + return cls.list_tool_ids() + + @classmethod + def clear(cls): + """Clear all registered tools (useful for testing)""" + instance = cls._get_instance() + instance._available_tools.clear() + instance._tool_names.clear() + instance._tool_method_names.clear() + + # Singleton pattern for class-level access + _instance = None + + @classmethod + def _get_instance(cls): + """Get singleton instance""" + if cls._instance is None: + cls._instance = cls() + return cls._instance diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/ddb_helper.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/ddb_helper.py new file mode 100644 index 00000000..6b4a301b --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/ddb_helper.py @@ -0,0 +1,132 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +DynamoDB helper for use case management +""" +import logging +import os +from typing import Any, Dict, List, Optional + +import boto3 +from bedrock_agentcore.identity.auth import requires_access_token + +logger = logging.getLogger(__name__) + + +class DynamoDBHelper: + """Simple DynamoDB helper""" + + def __init__(self, table_name: str, region: str): + """Initialize DynamoDB helper""" + dynamodb = boto3.resource("dynamodb", region_name=region) + self.table = dynamodb.Table(table_name) + + @requires_access_token( + provider_name=os.environ.get("M2M_IDENTITY_NAME", ""), + scopes=[], + auth_flow="M2M", + ) + def get_config(self, key: str, access_token: Optional[str] = None) -> Dict[str, Any]: + """Get config from DDB item by key""" + try: + response = self.table.get_item(Key={"key": key}) + item = response.get("Item") + + if not item: + raise ValueError(f"Configuration not found: {key}") + + config = item.get("config") + if not config: + raise ValueError(f"No config field found for key: {key}") + + return config + except Exception as e: + logger.error(f"Error fetching config for key {key}: {e}") + raise + + @requires_access_token( + provider_name=os.environ.get("M2M_IDENTITY_NAME", ""), + scopes=[], + auth_flow="M2M", + ) + def get_mcp_configs(self, mcp_ids: List[str], access_token: Optional[str] = None) -> List[Dict[str, Any]]: + """ + Fetch multiple MCP server configurations by McpId list. + + Args: + mcp_ids: List of MCP server IDs to fetch + access_token: Access token for authentication (injected by decorator) + + Returns: + List of MCP server configuration dictionaries + + Raises: + ValueError: If a configuration has invalid UseCaseType + """ + if not mcp_ids: + logger.info("No MCP server IDs provided, returning empty list") + return [] + + configs = [] + errors = [] + + for mcp_id in mcp_ids: + try: + config = self._fetch_and_validate_mcp_config(mcp_id) + if config: + configs.append(config) + except ValueError as e: + logger.error(f"Validation error for MCP server {mcp_id}: {e}") + errors.append((mcp_id, str(e))) + raise + except Exception as e: + logger.warning(f"Failed to fetch MCP server {mcp_id}: {e}") + errors.append((mcp_id, str(e))) + + if errors: + logger.warning(f"Failed to load {len(errors)} MCP server(s) out of {len(mcp_ids)}") + + logger.info(f"Successfully loaded {len(configs)} MCP server configuration(s)") + return configs + + def _fetch_and_validate_mcp_config(self, mcp_id: str) -> Optional[Dict[str, Any]]: + """ + Fetch and validate a single MCP server configuration. + + Args: + mcp_id: MCP server ID to fetch + + Returns: + MCP server configuration dictionary or None if not found + + Raises: + ValueError: If UseCaseType is not "MCPServer" + """ + try: + response = self.table.get_item(Key={"key": mcp_id}) + item = response.get("Item") + + if not item: + logger.warning(f"MCP server configuration not found: {mcp_id}") + return None + + config = item.get("config") + if not config: + logger.warning(f"No config field found for MCP server: {mcp_id}") + return None + + use_case_type = config.get("UseCaseType") + if use_case_type != "MCPServer": + raise ValueError( + f"Invalid UseCaseType for MCP server {mcp_id}: expected 'MCPServer', got '{use_case_type}'" + ) + + logger.debug(f"Successfully fetched and validated MCP server: {mcp_id}") + return config + + except ValueError: + raise + except Exception as e: + logger.error(f"Error fetching MCP server {mcp_id}: {e}") + raise diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/mcp_tools_loader.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/mcp_tools_loader.py new file mode 100644 index 00000000..b5e3cfae --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/mcp_tools_loader.py @@ -0,0 +1,334 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +MCPToolsLoader - Orchestrates MCP tool discovery from multiple servers + +This module provides the main orchestrator for loading MCP tools from both +Gateway and Runtime MCP servers. It handles configuration fetching, server +categorization, parallel tool discovery, and comprehensive error handling. +""" + +import logging +import os +import time +from concurrent.futures import ThreadPoolExecutor, as_completed +from typing import Any, Dict, List, Tuple + +from bedrock_agentcore.identity.auth import requires_access_token +from gaab_strands_common.ddb_helper import DynamoDBHelper +from gaab_strands_common.models import GatewayMCPParams, MCPServerConfig, RuntimeMCPParams +from mcp.client.streamable_http import streamablehttp_client +from strands.tools.mcp import MCPClient + +logger = logging.getLogger(__name__) + + +class MCPToolsLoader: + """ + Orchestrates MCP tool discovery and loading from Gateway and Runtime MCP servers. + + This class handles: + - Categorizing servers by type (Gateway and Runtime) + - Parallel tool discovery for performance + - Comprehensive error handling and logging + """ + + def __init__(self, region: str): + """ + Initialize MCPToolsLoader with AWS region. + + Args: + region: AWS region for MCP clients + """ + self.region = region + self._active_mcp_clients = [] # Keep MCP clients alive for tool execution + logger.info(f"Initialized MCPToolsLoader for region: {region}") + + def load_tools(self, mcp_servers: List[Dict[str, str]]) -> List[Any]: + """ + Load tools from all configured MCP servers. + + This is the main entry point for MCP tool loading. It orchestrates: + 1. Categorizing servers by type (Gateway vs Runtime) + 2. Discovering tools from each server in parallel + 3. Converting tools to Strands format + 4. Handling errors gracefully + + Args: + mcp_servers: List of MCP server dicts with keys: + - use_case_id: Server identifier + - url: MCP server endpoint URL + - type: Either 'gateway' or 'runtime' + + Returns: + List of Strands-compatible tool objects + + Raises: + No exceptions are raised - errors are logged and processing continues + """ + if not mcp_servers: + logger.info("No MCP servers provided, returning empty tools list") + return [] + + logger.info(f"Starting MCP tool loading for {len(mcp_servers)} server(s)") + + try: + gateway_servers, runtime_servers = self._categorize_servers(mcp_servers) + + logger.info(f"Categorized servers: {len(gateway_servers)} Gateway, {len(runtime_servers)} Runtime") + + all_tools = self._discover_tools_parallel(gateway_servers, runtime_servers) + + logger.info(f"Successfully loaded {len(all_tools)} total tools from MCP servers") + return all_tools + + except Exception as e: + logger.error(f"Unexpected error in load_tools: {e}") + return [] + + def _categorize_servers( + self, mcp_servers: List[Dict[str, str]] + ) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]: + """ + Separate Gateway and Runtime servers based on type field. + + Args: + mcp_servers: List of MCP server dicts with use_case_id, url, type + + Returns: + Tuple of (gateway_servers, runtime_servers) where each is a list of dicts + containing server name and url + """ + gateway_servers = [] + runtime_servers = [] + + for server in mcp_servers: + try: + server_type = server.get("type") + use_case_id = server.get("use_case_id") + url = server.get("url") + + # Validate required fields + if not all([server_type, use_case_id, url]): + logger.warning(f"MCP server missing required fields: {server}, skipping") + continue + + if server_type == "gateway": + gateway_servers.append({"name": use_case_id, "url": url}) + logger.debug(f"Categorized '{use_case_id}' as Gateway server") + elif server_type == "runtime": + runtime_servers.append({"name": use_case_id, "url": url}) + logger.debug(f"Categorized '{use_case_id}' as Runtime server") + else: + logger.warning(f"Invalid server type '{server_type}' for '{use_case_id}', skipping") + + except Exception as e: + logger.error(f"Error categorizing server: {e}") + continue + + return gateway_servers, runtime_servers + + def _discover_tools_parallel( + self, gateway_servers: List[Dict[str, Any]], runtime_servers: List[Dict[str, Any]] + ) -> List[Any]: + """ + Discover tools from all servers in parallel for performance. + + Uses ThreadPoolExecutor to parallelize network calls to multiple MCP servers. + Each server's tool discovery is independent and failures don't affect others. + + Args: + gateway_servers: List of Gateway server configurations + runtime_servers: List of Runtime server configurations + + Returns: + Combined list of all Strands-compatible tools from all servers + """ + all_tools = [] + total_servers = len(gateway_servers) + len(runtime_servers) + + if total_servers == 0: + logger.info("No servers to discover tools from") + return [] + + logger.info(f"Starting parallel tool discovery for {total_servers} server(s)") + + max_workers = min(total_servers, 10) + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [] + + for server in gateway_servers: + future = executor.submit(self._discover_gateway_tools, server["name"], server["url"]) + futures.append(("Gateway", server["name"], future)) + + for server in runtime_servers: + future = executor.submit(self._discover_runtime_tools, server["name"], server["url"]) + futures.append(("Runtime", server["name"], future)) + + for server_type, server_name, future in futures: + try: + tools = future.result(timeout=60) + if tools: + all_tools.extend(tools) + logger.info(f"Discovered {len(tools)} tool(s) from {server_type} server '{server_name}'") + else: + logger.info(f"No tools discovered from {server_type} server '{server_name}'") + except Exception as e: + logger.error(f"Error discovering tools from {server_type} server '{server_name}': {e}") + continue + + logger.info(f"Parallel tool discovery complete: {len(all_tools)} total tools") + return all_tools + + def _discover_gateway_tools(self, server_name: str, gateway_url: str) -> List[Any]: + """ + Discover tools from a Gateway MCP server using provided URL. + + Keeps the MCP client alive by storing it so tools remain functional. + + Args: + server_name: Name of the MCP server for logging + gateway_url: Gateway endpoint URL + + Returns: + List of Strands-compatible tools from this server + """ + logger.info(f"[MCP SERVER PROCESSING] Server: '{server_name}', Type: Gateway, Status: Starting discovery") + + try: + logger.debug(f"Gateway URL: {gateway_url}") + + captured_token = None + + @requires_access_token( + provider_name=os.environ.get("M2M_IDENTITY_NAME", ""), + scopes=[], + auth_flow="M2M", + ) + def get_token(access_token: str = None) -> str: + nonlocal captured_token + captured_token = access_token + return access_token + + get_token() + + if not captured_token: + logger.error(f"Failed to retrieve access token for '{server_name}'") + return [] + + gateway_client = MCPClient( + lambda: streamablehttp_client(gateway_url, headers={"Authorization": f"Bearer {captured_token}"}) + ) + + max_retries = 3 + retry_delay = 2 + + for attempt in range(max_retries): + try: + gateway_client.start() + break + except Exception as e: + if "429" in str(e) and attempt < max_retries - 1: + logger.warning(f"Rate limited, retrying in {retry_delay}s...") + time.sleep(retry_delay) + retry_delay *= 2 + else: + raise + + strands_tools = gateway_client.list_tools_sync() + + self._active_mcp_clients.append(gateway_client) + logger.debug(f"Stored active MCP client for '{server_name}'") + + if not strands_tools: + logger.info(f"[MCP SERVER PROCESSING] Server: '{server_name}', Type: Gateway, Tool Count: 0") + return [] + + logger.info( + f"[MCP SERVER PROCESSING] Server: '{server_name}', Type: Gateway, Tool Count: {len(strands_tools)}, Status: Success" + ) + return strands_tools + + except Exception as e: + logger.error(f"[MCP SERVER PROCESSING] Server: '{server_name}', Type: Gateway, Status: Failed") + logger.error(f"[TOOL DISCOVERY FAILURE] Server: '{server_name}', Error: {str(e)}") + logger.error(f"Error: {e}", exc_info=True) + return [] + + def _discover_runtime_tools(self, server_name: str, runtime_url: str) -> List[Any]: + """ + Discover tools from a Runtime MCP server using provided URL. + + Connects to AgentCore Runtime endpoint which proxies to ECS container. + Uses M2M token authentication. + + Args: + server_name: Name of the MCP server for logging + runtime_url: Runtime endpoint URL (already constructed) + + Returns: + List of Strands-compatible tools from this server + """ + logger.info(f"[MCP SERVER PROCESSING] Server: '{server_name}', Type: Runtime, Status: Starting discovery") + + try: + logger.debug(f"Runtime URL: {runtime_url}") + + captured_token = None + + @requires_access_token( + provider_name=os.environ.get("M2M_IDENTITY_NAME", ""), + scopes=[], + auth_flow="M2M", + ) + def get_token(access_token: str = None) -> str: + nonlocal captured_token + captured_token = access_token + return access_token + + get_token() + + if not captured_token: + logger.error(f"Failed to retrieve access token for Runtime server '{server_name}'") + return [] + + runtime_client = MCPClient( + lambda: streamablehttp_client(runtime_url, headers={"Authorization": f"Bearer {captured_token}"}) + ) + + max_retries = 3 + retry_delay = 2 + + for attempt in range(max_retries): + try: + runtime_client.start() + break + except Exception as e: + if "429" in str(e) and attempt < max_retries - 1: + logger.warning(f"Rate limited on Runtime server '{server_name}', retrying in {retry_delay}s...") + time.sleep(retry_delay) + retry_delay *= 2 + else: + raise + + strands_tools = runtime_client.list_tools_sync() + + self._active_mcp_clients.append(runtime_client) + logger.debug(f"Stored active Runtime MCP client for '{server_name}'") + + if not strands_tools: + logger.info(f"[MCP SERVER PROCESSING] Server: '{server_name}', Type: Runtime, Tool Count: 0") + return [] + + logger.info( + f"[MCP SERVER PROCESSING] Server: '{server_name}', Type: Runtime, Tool Count: {len(strands_tools)}, Status: Success" + ) + return strands_tools + + except Exception as e: + logger.error(f"[MCP SERVER PROCESSING] Server: '{server_name}', Type: Runtime, Status: Failed") + logger.error(f"[TOOL DISCOVERY FAILURE] Server: '{server_name}', Error: {str(e)}") + logger.error(f"Error details: {e}", exc_info=True) + return [] diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/models.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/models.py new file mode 100644 index 00000000..1e77f3e5 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/models.py @@ -0,0 +1,374 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Data models using Pydantic with auto-parsing +Includes workflow-specific extensions for agent orchestration +""" +import logging +from typing import Any, Dict, List, Optional + +from pydantic import BaseModel, Field, field_validator, model_validator + +logger = logging.getLogger(__name__) + + +class FileReference(BaseModel): + """Reference to a file in multimodal request""" + + file_reference: str = Field(alias="fileReference") + file_name: str = Field(alias="fileName") + + +class MemoryConfig(BaseModel): + """Memory configuration""" + + long_term_enabled: bool = Field(default=False, alias="LongTermEnabled") + + +# Tool and MCP Server Reference Models + + +class StrandsToolReference(BaseModel): + """Reference to a built-in Strands tool""" + + tool_id: str = Field(alias="ToolId") + + +class CustomToolReference(BaseModel): + """Reference to a custom tool""" + + tool_id: str = Field(alias="ToolId") + + +class MCPServerReference(BaseModel): + """Reference to an MCP server with complete connection details""" + + use_case_id: str = Field(alias="UseCaseId") + url: str = Field(alias="Url") + type: str = Field(alias="Type") + + @field_validator("type") + @classmethod + def validate_type(cls, v): + """Validate type is either 'gateway' or 'runtime'""" + if v not in ["gateway", "runtime"]: + raise ValueError(f"Type must be 'gateway' or 'runtime', got '{v}'") + return v + + +# MCP Server Configuration Models + + +class OutboundAuthParams(BaseModel): + """OAuth authentication parameters for Gateway MCP servers""" + + outbound_auth_provider_arn: str = Field(alias="OutboundAuthProviderArn") + outbound_auth_provider_type: str = Field(alias="OutboundAuthProviderType") + + +class TargetParams(BaseModel): + """Gateway target configuration""" + + target_name: str = Field(alias="TargetName") + target_type: str = Field(alias="TargetType") + target_description: Optional[str] = Field(default="", alias="TargetDescription") + schema_uri: Optional[str] = Field(default=None, alias="SchemaUri") + outbound_auth_params: Optional[OutboundAuthParams] = Field(default=None, alias="OutboundAuthParams") + + +class GatewayMCPParams(BaseModel): + """Gateway MCP server parameters""" + + gateway_url: str = Field(alias="GatewayUrl") + gateway_arn: str = Field(alias="GatewayArn") + gateway_id: str = Field(alias="GatewayId") + gateway_name: str = Field(alias="GatewayName") + target_params: List[TargetParams] = Field(default_factory=list, alias="TargetParams") + + +class RuntimeMCPParams(BaseModel): + """Runtime MCP server parameters""" + + ecr_uri: str = Field(alias="EcrUri") + runtime_arn: str = Field(alias="RuntimeArn") + runtime_url: str = Field(alias="RuntimeUrl") + + +class MCPParams(BaseModel): + """MCP server parameters (Gateway or Runtime)""" + + gateway_params: Optional[GatewayMCPParams] = Field(default=None, alias="GatewayParams") + runtime_params: Optional[RuntimeMCPParams] = Field(default=None, alias="RuntimeParams") + + +class MCPServerConfig(BaseModel): + """Complete MCP server configuration from DynamoDB""" + + use_case_name: str = Field(alias="UseCaseName") + use_case_type: str = Field(alias="UseCaseType") + mcp_params: MCPParams = Field(alias="MCPParams") + + +# Workflow-specific Models + + +class AgentParams(BaseModel): + """Parameters for a selected agent in workflow""" + + system_prompt: Optional[str] = Field(default=None, alias="SystemPrompt") + tools: Optional[List[StrandsToolReference]] = Field(default=None, alias="Tools") + mcp_servers: Optional[List[MCPServerReference]] = Field(default=None, alias="MCPServers") + memory_config: Optional[MemoryConfig] = Field(default=None, alias="MemoryConfig") + llm_params: Optional["LlmParams"] = Field(default=None, alias="LlmParams") + + @field_validator("tools", mode="before") + @classmethod + def parse_tools(cls, v): + """Parse tools field - supports dict, string, and StrandsToolReference formats""" + if v is None: + return None + if isinstance(v, list): + result = [] + for tool in v: + if isinstance(tool, dict): + result.append(StrandsToolReference(**tool)) + elif isinstance(tool, str): + # Support backward compatibility with string format + result.append(StrandsToolReference(ToolId=tool)) + elif isinstance(tool, StrandsToolReference): + result.append(tool) + return result + return [] + + @field_validator("mcp_servers", mode="before") + @classmethod + def parse_mcp_servers(cls, v): + """Parse mcp_servers field to handle dict format""" + if v is None: + return None + if isinstance(v, list): + result = [] + for server in v: + if isinstance(server, dict): + result.append(MCPServerReference(**server)) + elif isinstance(server, MCPServerReference): + result.append(server) + return result + return [] + + +class AgentReference(BaseModel): + """Reference to an agent with its parameters + + This represents a full agent configuration within a workflow, including: + - UseCaseId for unique identification + - UseCaseType, UseCaseName, UseCaseDescription for identification + - AgentBuilderParams for agent configuration (system prompt, tools, etc.) + - LlmParams for model configuration (optional, can override workflow default) + """ + + model_config = {"populate_by_name": True} + + use_case_id: str = Field(alias="UseCaseId") + use_case_type: str = Field(alias="UseCaseType") + use_case_name: str = Field(alias="UseCaseName") + use_case_description: Optional[str] = Field(default=None, alias="UseCaseDescription") + agent_builder_params: "AgentBuilderParams" = Field(alias="AgentBuilderParams") + llm_params: Optional["LlmParams"] = Field(default=None, alias="LlmParams") + + +class AgentsAsToolsParams(BaseModel): + """Parameters for agents-as-tools orchestration""" + + model_config = {"populate_by_name": True} + + agents: List[AgentReference] = Field(default_factory=list, alias="Agents") + + +class WorkflowParams(BaseModel): + """Workflow-specific parameters""" + + orchestration_pattern: Optional[str] = Field(default=None, alias="OrchestrationPattern") + system_prompt: Optional[str] = Field(default=None, alias="SystemPrompt") + memory_config: Optional[MemoryConfig] = Field(default=None, alias="MemoryConfig") + agents_as_tools_params: Optional[AgentsAsToolsParams] = Field(default=None, alias="AgentsAsToolsParams") + custom_tools: List[CustomToolReference] = Field(default_factory=list, alias="CustomTools") + + +class WorkflowConfig(BaseModel): + """Workflow configuration""" + + workflow_type: str = Field(alias="WorkflowType") + workflow_params: WorkflowParams = Field(alias="WorkflowParams") + + @classmethod + def from_ddb_config(cls, config: Dict[str, Any]) -> "WorkflowConfig": + """Create WorkflowConfig from DDB config dict""" + try: + return cls(**config) + except Exception as e: + logger.error(f"Error parsing workflow config: {e}") + raise ValueError(f"Error parsing workflow configuration: {e}") from e + + +class AgentBuilderParams(BaseModel): + """Agent builder parameters""" + + system_prompt: str = Field(alias="SystemPrompt") + tools: List[StrandsToolReference] = Field(default_factory=list, alias="Tools") + custom_tools: List[CustomToolReference] = Field(default_factory=list, alias="CustomTools") + mcp_servers: List[MCPServerReference] = Field(default_factory=list, alias="MCPServers") + memory_config: MemoryConfig = Field(default_factory=MemoryConfig, alias="MemoryConfig") + llm_params: Optional["LlmParams"] = Field(default=None, alias="LlmParams") + + @field_validator("tools", mode="before") + @classmethod + def parse_tools(cls, v): + """Parse tools field - supports dict, string, and StrandsToolReference formats""" + if isinstance(v, list): + result = [] + for tool in v: + if isinstance(tool, dict): + result.append(StrandsToolReference(**tool)) + elif isinstance(tool, str): + # Support backward compatibility with string format + result.append(StrandsToolReference(ToolId=tool)) + elif isinstance(tool, StrandsToolReference): + result.append(tool) + return result + return [] + + @field_validator("custom_tools", mode="before") + @classmethod + def parse_custom_tools(cls, v): + """Parse custom_tools field to handle dict format""" + if isinstance(v, list): + result = [] + for tool in v: + if isinstance(tool, dict): + result.append(CustomToolReference(**tool)) + elif isinstance(tool, CustomToolReference): + result.append(tool) + return result + return [] + + @field_validator("mcp_servers", mode="before") + @classmethod + def parse_mcp_servers(cls, v): + """Parse mcp_servers field to handle dict format""" + if isinstance(v, list): + result = [] + for server in v: + if isinstance(server, dict): + result.append(MCPServerReference(**server)) + elif isinstance(server, MCPServerReference): + result.append(server) + return result + return [] + + def get_tool_ids(self) -> List[str]: + """Extract tool IDs from tool references""" + return [tool.tool_id for tool in self.tools] + + def get_mcp_servers(self) -> List[Dict[str, str]]: + """ + Extract MCP server details as dictionaries for tool loading. + + Returns: + List of dicts with keys: use_case_id, url, type + """ + return [ + {"use_case_id": server.use_case_id, "url": server.url, "type": server.type} for server in self.mcp_servers + ] + + def get_custom_tool_ids(self) -> List[str]: + """Extract custom tool IDs""" + return [tool.tool_id for tool in self.custom_tools] + + def get_mcp_server_ids(self) -> List[str]: + """ + DEPRECATED: Use get_mcp_servers() instead. + Extract MCP server IDs from MCP server references. + """ + logger.warning("get_mcp_server_ids() is deprecated, use get_mcp_servers() instead") + return [server.use_case_id for server in self.mcp_servers] + + +class BedrockLlmParams(BaseModel): + """Bedrock LLM parameters with support for all inference types""" + + model_id: Optional[str] = Field(default=None, alias="ModelId") + model_arn: Optional[str] = Field(default=None, alias="ModelArn") + inference_profile_id: Optional[str] = Field(default=None, alias="InferenceProfileId") + bedrock_inference_type: Optional[str] = Field(default=None, alias="BedrockInferenceType") + guardrail_identifier: Optional[str] = Field(default=None, alias="GuardrailIdentifier") + guardrail_version: Optional[str] = Field(default=None, alias="GuardrailVersion") + + @model_validator(mode="after") + def validate_inference_type_requirements(self): + """Validate that the correct fields are present based on inference type""" + inference_type = self.bedrock_inference_type + + if inference_type in ["QUICK_START", "OTHER_FOUNDATION"] and not self.model_id: + raise ValueError(f"ModelId is required for inference type {inference_type}") + elif inference_type == "INFERENCE_PROFILE" and not self.inference_profile_id: + raise ValueError(f"InferenceProfileId is required for inference type {inference_type}") + elif inference_type == "PROVISIONED" and not self.model_arn: + raise ValueError(f"ModelArn is required for inference type {inference_type}") + + return self + + @property + def model_identifier(self) -> str: + """Get the appropriate model identifier based on inference type""" + if self.bedrock_inference_type == "INFERENCE_PROFILE": + return self.inference_profile_id + elif self.bedrock_inference_type == "PROVISIONED": + return self.model_arn + else: + return self.model_id + + +class MultimodalParams(BaseModel): + """Multimodal parameters""" + + multimodal_enabled: bool = Field(default=False, alias="MultimodalEnabled") + + +class LlmParams(BaseModel): + """LLM parameters""" + + model_provider: str = Field(alias="ModelProvider") + temperature: float = Field(default=0.7, alias="Temperature") + streaming: bool = Field(default=True, alias="Streaming") + verbose: bool = Field(default=False, alias="Verbose") + bedrock_llm_params: BedrockLlmParams = Field(alias="BedrockLlmParams") + model_params: Dict[str, Any] = Field(default_factory=dict, alias="ModelParams") + multimodal_params: Optional[MultimodalParams] = Field(default=None, alias="MultimodalParams") + + +class UseCaseConfig(BaseModel): + """Complete use case configuration following DDB structure + + Supports both AgentBuilder and WorkflowBuilder use case types: + - AgentBuilder: Has agent_builder_params + - WorkflowBuilder: Has workflow_config + """ + + use_case_name: str = Field(alias="UseCaseName") + use_case_type: str = Field(alias="UseCaseType") + agent_builder_params: Optional[AgentBuilderParams] = Field(default=None, alias="AgentBuilderParams") + workflow_params: Optional[WorkflowParams] = Field(default=None, alias="WorkflowParams") + llm_params: LlmParams = Field(alias="LlmParams") + + model_config = {"populate_by_name": True} + + @classmethod + def from_ddb_config(cls, config: Dict[str, Any]) -> "UseCaseConfig": + """Create UseCaseConfig directly from DDB config dict""" + try: + return cls(**config) + except Exception as e: + logger.error(f"Error parsing DDB config: {e}") + raise ValueError(f"Error parsing configuration: {e}") from e diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/multimodal/__init__.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/multimodal/__init__.py new file mode 100644 index 00000000..41687a70 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/multimodal/__init__.py @@ -0,0 +1,14 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Multimodal - Multimodal processing components for GAAB Strands +""" + +from .file_handler import FileHandler +from .multimodal_processor import MultimodalRequestProcessor + +__all__ = [ + "FileHandler", + "MultimodalRequestProcessor", +] diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/multimodal/file_handler.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/multimodal/file_handler.py new file mode 100644 index 00000000..e8c3d521 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/multimodal/file_handler.py @@ -0,0 +1,287 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import logging +import os +import time +from concurrent.futures import ThreadPoolExecutor, as_completed +from typing import Any, Dict, List + +import boto3 +from gaab_strands_common.models import FileReference +from gaab_strands_common.utils.constants import ( + MAX_PARALLEL_FILE_PROCESSING_THREADS, + MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR, + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR, + REMAINING_SECONDS_FOR_FILE_ACCESS, + USE_CASE_UUID, + FileStatus, +) +from gaab_strands_common.utils.helpers import retry_with_backoff + +logger = logging.getLogger(__name__) + + +class FileHandler: + """Handles multimodal file validation and processing""" + + def __init__(self, region: str): + """ + Initialize file handler + + Args: + region: AWS region + """ + self.region = region + # Get environment variables - no validation needed since S3FileReaderTool already validated + self.metadata_table_name = os.getenv(MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR) + self.bucket_name = os.getenv(MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR) + + self.dynamodb = boto3.resource("dynamodb", region_name=region) + self.metadata_table = self.dynamodb.Table(self.metadata_table_name) + + def _convert_to_file_references(self, files: List[Dict[str, Any]]) -> List[FileReference]: + """Convert file dictionaries to FileReference objects""" + file_references = [] + for file_data in files: + if isinstance(file_data, dict) and "fileReference" in file_data and "fileName" in file_data: + file_references.append(FileReference(**file_data)) + else: + logger.warning(f"Skipping invalid file data: {file_data}") + return file_references + + def _process_validation_result(self, validation_result: Dict[str, Any], file_ref: FileReference) -> Dict[str, str]: + """Process a single file validation result and return content block""" + if validation_result["is_valid"]: + s3_key = validation_result["s3_key"] + logger.debug(f"Successfully validated file: {file_ref.file_name}") + return {"text": f"File available for reading: {file_ref.file_name} with S3 key '{s3_key}'"} + else: + reason = validation_result.get("reason", "Unknown reason") + logger.warning(f"File validation failed for: {file_ref.file_name} - {reason}") + return { + "text": f"File {file_ref.file_name} is not available. It was either deleted or it has expired." + } + + def _validate_files_in_parallel( + self, + file_references: List[FileReference], + usecase_id: str, + user_id: str, + conversation_id: str, + message_id: str, + ) -> List[Dict[str, str]]: + """Validate multiple files in parallel using ThreadPoolExecutor""" + file_content_blocks = [] + max_workers = min(len(file_references), MAX_PARALLEL_FILE_PROCESSING_THREADS) + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_file = { + executor.submit( + self._validate_single_file, file_ref, usecase_id, user_id, conversation_id, message_id + ): file_ref + for file_ref in file_references + } + + for future in as_completed(future_to_file): + file_ref = future_to_file[future] + try: + validation_result = future.result() + content_block = self._process_validation_result(validation_result, file_ref) + file_content_blocks.append(content_block) + except Exception as e: + logger.error(f"Error processing file {file_ref.file_name}: {e}") + + return file_content_blocks + + def validate_all_files(self, payload: Dict[str, Any]) -> List[Dict[str, str]]: + """ + Validate all files in parallel and create content blocks + + Args: + payload: Request payload containing files and metadata + + Returns: + content_blocks + """ + files = payload.get("files", []) + if not files: + return [] + + logger.debug(f"Processing {len(files)} files in parallel") + + file_references = self._convert_to_file_references(files) + if not file_references: + return [] + + usecase_id = os.getenv(USE_CASE_UUID) + user_id = payload.get("userId") + conversation_id = payload.get("conversationId") + message_id = payload.get("messageId") + + file_content_blocks = self._validate_files_in_parallel( + file_references, usecase_id, user_id, conversation_id, message_id + ) + + logger.info(f"Created {len(file_content_blocks)} file content blocks from {len(files)} files") + return file_content_blocks + + def _check_ttl_validity(self, file_name: str, ttl: int) -> Dict[str, Any]: + """Check if file TTL is valid and has sufficient time remaining""" + current_utc_timestamp = int(time.time()) + + if current_utc_timestamp > ttl: + logger.warning(f"File {file_name} has expired (current UTC: {current_utc_timestamp}, TTL: {ttl})") + return {"valid": False, "reason": "File has expired"} + + if (current_utc_timestamp + REMAINING_SECONDS_FOR_FILE_ACCESS) > ttl: + time_remaining = ttl - current_utc_timestamp + logger.warning(f"File {file_name} rejected - less than 1 hour remaining ({time_remaining} seconds left)") + return {"valid": False, "reason": f"File expires in {time_remaining} seconds (less than 1 hour)"} + + return {"valid": True} + + def _handle_file_status(self, file_name: str, status: str, ttl: int) -> Dict[str, Any]: + """Handle different file status types and return appropriate result""" + current_utc_timestamp = int(time.time()) + + if status == FileStatus.UPLOADED: + if ttl: + time_remaining = ttl - current_utc_timestamp + hours_remaining = time_remaining / 3600 + logger.debug(f"File {file_name} is ready for use ({hours_remaining:.1f} hours remaining)") + else: + logger.debug(f"File {file_name} is ready for use (no TTL set)") + return {"status": FileStatus.UPLOADED, "valid": True} + + if status == FileStatus.DELETED: + logger.error(f"File {file_name} has been deleted") + return {"status": FileStatus.DELETED, "valid": False, "reason": "File has been deleted."} + + if status == FileStatus.INVALID: + logger.error(f"File {file_name} is marked as invalid") + return { + "status": FileStatus.INVALID, + "valid": False, + "reason": "File is not available for use due to constraint violations.", + } + + if status == FileStatus.PENDING: + logger.info(f"File {file_name} is still pending") + return { + "status": FileStatus.PENDING, + "valid": False, + "reason": "File is still being processed or hasn't been uploaded yet.", + } + + logger.warning(f"File {file_name} has unknown status: {status}") + return {"status": "unknown", "valid": False, "reason": f"Unknown status: {status}"} + + def _check_file_metadata(self, metadata_key: str, file_name: str) -> Dict[str, Any]: + """Check file metadata in DynamoDB and validate status""" + response = self.metadata_table.get_item(Key={"fileKey": metadata_key, "fileName": file_name}) + + item = response.get("Item") + if not item: + logger.warning(f"No metadata found for file: {file_name}") + return {"status": FileStatus.NOT_FOUND, "valid": False, "reason": "No metadata found"} + + status = item.get("status", "") + ttl = item.get("ttl") + + if ttl: + ttl_result = self._check_ttl_validity(file_name, ttl) + if not ttl_result["valid"]: + return {"status": "expired", "valid": False, "reason": ttl_result["reason"]} + + return self._handle_file_status(file_name, status, ttl) + + def _validate_single_file( + self, + file_reference: FileReference, + usecase_id: str, + user_id: str, + conversation_id: str, + message_id: str, + ) -> Dict[str, Any]: + """ + Validate a single file with retry logic for pending status and S3 existence verification + + Args: + file_reference: File reference to validate + usecase_id: Use case UUID + user_id: User ID + conversation_id: Conversation UUID + message_id: Message UUID + + Returns: + Dict with validation result: + { + "is_valid": bool, + "s3_key": str (if valid), + "reason": str (if invalid) + } + """ + try: + metadata_key = self._generate_metadata_key(usecase_id, user_id, conversation_id, message_id) + logger.debug(f"Validating file: {file_reference.file_name} with key: {metadata_key}") + + metadata_result = retry_with_backoff( + func=lambda: self._check_file_metadata(metadata_key, file_reference.file_name), + retry_condition=lambda result: result["status"] == FileStatus.PENDING, + ) + + if not metadata_result["valid"]: + return {"is_valid": False, "reason": metadata_result["reason"]} + + s3_key = self._generate_s3_key( + usecase_id, user_id, conversation_id, message_id, file_reference.file_reference + ) + logger.debug(f"File {file_reference.file_name} fully validated (metadata)") + return {"is_valid": True, "s3_key": s3_key} + + except Exception as e: + logger.error(f"Error validating file {file_reference.file_name}: {e}") + return {"is_valid": False, "reason": f"Validation error: {str(e)}"} + + def _generate_s3_key( + self, + usecase_id: str, + user_id: str, + conversation_id: str, + message_id: str, + file_reference: str, + ) -> str: + """ + Generate S3 key for file access + + Format: usecase_id/user_id/conversation_id/message_id/file_reference + + Args: + usecase_id: Use case UUID + user_id: User ID + conversation_id: Conversation UUID + message_id: Message UUID + file_reference: File UUID reference + + Returns: + S3 key string + """ + return f"{usecase_id}/{user_id}/{conversation_id}/{message_id}/{file_reference}" + + def _generate_metadata_key(self, usecase_id: str, user_id: str, conversation_id: str, message_id: str) -> str: + """ + Generate metadata key for DynamoDB lookup + + Format: usecase_id/user_id/conversation_id/message_id + + Args: + usecase_id: Use case UUID + user_id: User ID extracted from request + conversation_id: Conversation UUID + message_id: Message UUID + + Returns: + Metadata key string + """ + return f"{usecase_id}/{user_id}/{conversation_id}/{message_id}" diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/multimodal/multimodal_processor.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/multimodal/multimodal_processor.py new file mode 100644 index 00000000..27794568 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/multimodal/multimodal_processor.py @@ -0,0 +1,71 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Multimodal Request Processor - Handles requests with files and multimodal content +""" + +import logging +from typing import Any, Dict, List, Union + +from gaab_strands_common.models import UseCaseConfig +from gaab_strands_common.multimodal.file_handler import FileHandler +from gaab_strands_common.utils.helpers import extract_user_message + +logger = logging.getLogger(__name__) + + +class MultimodalRequestProcessor: + """Processes multimodal requests with file handling""" + + def __init__(self, region): + """Initialize multimodal request processor""" + self.region = region + + def process_multimodal_request(self, payload: Dict[str, Any]) -> Union[str, List[Dict[str, str]]]: + """ + Process multimodal request with files + + Args: + payload: Request payload containing files + + Returns: + User message or content blocks for agent processing + + Raises: + ValueError: When file processing fails (handled by main error handling) + """ + logger.debug("Processing multimodal request with files") + query = extract_user_message(payload) + content_blocks = [{"text": query}] + + try: + file_handler = FileHandler(region=self.region) + + # Validate all files in parallel and get content blocks + file_content_blocks = file_handler.validate_all_files(payload) + + if not file_content_blocks: + logger.warning("No files were processed") + raise ValueError("No files were provided for processing.") + + # Create enhanced content blocks with query and file references + content_blocks.extend(file_content_blocks) + logger.debug("Created multimodal content with %d file content blocks", len(file_content_blocks)) + return content_blocks + + except Exception as e: + logger.error(f"Error processing multimodal files: {e}") + # Re-raise to be handled by main error handling (respects streaming/non-streaming) + raise + + def is_multimodal_enabled(self, config: UseCaseConfig) -> bool: + """Check if multimodal is enabled in configuration""" + if config.llm_params.multimodal_params: + return config.llm_params.multimodal_params.multimodal_enabled + return False + + def has_files(self, payload: Dict[str, Any]) -> bool: + """Check if payload contains files""" + files = payload.get("files", []) + return isinstance(files, list) and len(files) > 0 diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/runtime_streaming.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/runtime_streaming.py new file mode 100644 index 00000000..3bea84a3 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/runtime_streaming.py @@ -0,0 +1,237 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Runtime Streaming - Handles streaming responses for AgentCore Runtime +""" + +import asyncio +import logging +import time +from typing import Any, AsyncGenerator, Dict, Optional + +from gaab_strands_common.tool_wrapper import ToolEventEmitter + +logger = logging.getLogger(__name__) + + +class RuntimeStreaming: + """Handles streaming responses for AgentCore Runtime integration""" + + @staticmethod + def extract_event_text(event) -> str: + """ + Extract text content from Strands streaming events. + + Args: + event: Strands streaming event object + + Returns: + str: Extracted raw text content, or empty string if no text found + """ + try: + text = "" + + if hasattr(event, "data") and event.data: + text = str(event.data) + elif isinstance(event, dict) and "data" in event and event["data"]: + text = str(event["data"]) + else: + return "" + + return text + + except Exception as e: + logger.warning(f"Error extracting text from event: {e}") + return "" + + @staticmethod + def _create_content_chunk(text: str, config) -> Dict[str, Any]: + """Create content chunk with agent metadata.""" + return { + "type": "content", + "text": text, + "agent_name": config.use_case_name, + "model_id": config.llm_params.bedrock_llm_params.model_id, + } + + @staticmethod + def _create_completion_chunk(config, usage_metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: + """Create completion signal chunk.""" + completion_chunk = { + "type": "completion", + "agent_name": config.use_case_name, + "model_id": config.llm_params.bedrock_llm_params.model_id, + } + + # Add usage metadata if available + if usage_metadata: + completion_chunk["usage"] = usage_metadata + + return completion_chunk + + @staticmethod + def _create_error_chunk(error: Exception, config) -> Dict[str, Any]: + """Create error chunk.""" + return { + "type": "error", + "error": "Streaming response failed", + "message": str(error), + "agent_name": config.use_case_name, + "model_id": config.llm_params.bedrock_llm_params.model_id, + } + + @staticmethod + def _yield_tool_events(): + """Get and yield tool events from emitter.""" + tool_events = ToolEventEmitter.get_events() + for tool_event in tool_events: + tool_chunk = {"type": "tool_use", "toolUsage": tool_event.to_dict()} + logger.info(f"Emitting tool usage: {tool_event.tool_name} - {tool_event.status}") + yield tool_chunk + + @staticmethod + def _should_skip_event(event_text: str, last_event_text: Optional[str]) -> bool: + """Check if event should be skipped.""" + return not event_text or event_text == last_event_text + + @staticmethod + async def _handle_stream_fallback(strands_agent, user_message: str, config) -> AsyncGenerator[Dict[str, Any], None]: + """Handle fallback when stream_async is not supported.""" + logger.warning("Strands agent doesn't support stream_async, falling back to single response") + response = strands_agent(user_message) + yield RuntimeStreaming._create_content_chunk(str(response), config) + yield RuntimeStreaming._create_completion_chunk(config) + + @staticmethod + def _extract_usage_metadata(event) -> Optional[Dict[str, Any]]: + """Extract usage metadata from event if present.""" + try: + # Handle dictionary events (most common case) + if isinstance(event, dict): + # Check nested structure: {'event': {'metadata': {'usage': {...}}}} + if "event" in event and isinstance(event["event"], dict): + metadata = event["event"].get("metadata", {}) + if "usage" in metadata: + return metadata["usage"] + + # Check direct structure: {'metadata': {'usage': {...}}} + if "metadata" in event: + metadata = event["metadata"] + if "usage" in metadata: + return metadata["usage"] + + return None + except Exception as e: + logger.debug(f"Could not extract usage metadata: {e}") + return None + + @staticmethod + async def _process_agent_stream(agent_stream, config): + """Process the agent stream and yield chunks. Returns usage metadata as last item.""" + last_event_text = None + usage_metadata = None # Initialize to preserve across loop iterations + + async for event in agent_stream: + # Yield tool events + for tool_chunk in RuntimeStreaming._yield_tool_events(): + yield tool_chunk + + # Extract usage metadata if present and preserve it + event_usage = RuntimeStreaming._extract_usage_metadata(event) + if event_usage: + usage_metadata = event_usage # Keep the last found usage metadata + logger.info(f"Captured usage metadata: {usage_metadata}") + + # Process content + event_text = RuntimeStreaming.extract_event_text(event) + if RuntimeStreaming._should_skip_event(event_text, last_event_text): + continue + + last_event_text = event_text + yield RuntimeStreaming._create_content_chunk(event_text, config) + + # Yield remaining tool events + for tool_chunk in RuntimeStreaming._yield_tool_events(): + yield tool_chunk + + # Yield a special marker with usage metadata (will be None if never found) + yield {"_usage_metadata": usage_metadata} + + @staticmethod + async def stream_response_async(strands_agent, user_message: str, config) -> AsyncGenerator[Dict[str, Any], None]: + """ + Simplified async generator that streams raw response chunks. + No thinking tag filtering - frontend handles that. + Tool events are captured via tool wrappers, not stream events. + + Args: + strands_agent: The configured Strands agent instance + user_message: User input message + config: Agent configuration containing metadata + + Yields: + Dict: Response chunks in AgentCore Runtime expected format + """ + start_time = time.time() + ToolEventEmitter.clear() + + try: + logger.info(f"[RUNTIME_STREAMING] Starting stream for message: {user_message[:100]}...") + + agent_stream = strands_agent.stream_async(user_message) + + usage_metadata = None + async for chunk in RuntimeStreaming._process_agent_stream(agent_stream, config): + # Check if this is the usage metadata marker + if isinstance(chunk, dict) and "_usage_metadata" in chunk: + usage_metadata = chunk["_usage_metadata"] + else: + yield chunk + + # Stream completion + total_elapsed = time.time() - start_time + logger.info(f"[RUNTIME_STREAMING] Stream complete in {total_elapsed:.3f}s") + yield RuntimeStreaming._create_completion_chunk(config, usage_metadata) + + except AttributeError: + async for chunk in RuntimeStreaming._handle_stream_fallback(strands_agent, user_message, config): + yield chunk + + except Exception as e: + logger.error(f"Error during async streaming response: {e}") + yield RuntimeStreaming._create_error_chunk(e, config) + + @staticmethod + def stream_response(strands_agent, user_message: str, config): + """ + Synchronous wrapper that runs the async streaming function. + + Args: + strands_agent: The configured Strands agent instance + user_message: User input message + config: Agent configuration containing metadata + + Yields: + Dict: Response chunks in AgentCore Runtime expected format + """ + + # Run the async generator in a new event loop + async def _async_wrapper(): + async for chunk in RuntimeStreaming.stream_response_async(strands_agent, user_message, config): + yield chunk + + # Create and run the async generator + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + try: + async_gen = _async_wrapper() + while True: + try: + chunk = loop.run_until_complete(async_gen.__anext__()) + yield chunk + except StopAsyncIteration: + break + finally: + loop.close() diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/strands_tools_registry.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/strands_tools_registry.py new file mode 100644 index 00000000..107e0578 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/strands_tools_registry.py @@ -0,0 +1,245 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/usr/bin/env python3 +""" +StrandsToolsRegistry - Registry for built-in Strands tools +""" + +import logging +from typing import Dict, List, Type, Any +import importlib +import pkgutil + +from strands.tools.decorator import DecoratedFunctionTool + +logger = logging.getLogger(__name__) + + +class StrandsToolsRegistry: + """Registry for built-in Strands tools from strands-agents-tools package""" + + def __init__(self): + """Initialize registry with available Strands tools""" + self._available_tools: Dict[str, Type] = {} + self._discover_strands_tools() + logger.info(f"StrandsToolsRegistry initialized with {len(self._available_tools)} available tools") + + def _discover_strands_tools(self) -> None: + """ + Discover available tools from strands-agents-tools package + + This method imports the strands_tools package and discovers all submodules. + Each submodule represents a tool (e.g., calculator, http_request, file_read). + """ + try: + import strands_tools + + for module_info in pkgutil.iter_modules(strands_tools.__path__): + tool_module_name = module_info.name + + try: + tool_module = importlib.import_module(f"strands_tools.{tool_module_name}") + + tool_id = tool_module_name + + # Store the module itself as the tool + self._available_tools[tool_id] = tool_module + logger.debug(f"Discovered tool: {tool_id} from strands_tools.{tool_module_name}") + + except Exception as e: + logger.debug(f"Could not import strands_tools.{tool_module_name}: {e}") + continue + + logger.info(f"Discovered {len(self._available_tools)} tools from strands_tools") + + except ImportError as e: + logger.warning(f"Could not import strands-agents-tools package (strands_tools): {e}") + logger.warning("No built-in Strands tools will be available") + except Exception as e: + logger.error(f"Error discovering Strands tools: {e}") + logger.warning("No built-in Strands tools will be available") + + def get_tools(self, tool_ids: List[str]) -> List[Any]: + """ + Get instances of specified Strands tools + + Args: + tool_ids: List of tool identifiers (e.g., ['web_search', 'calculator']) + + Returns: + List of instantiated Strands tool objects + + Raises: + ValueError: If a tool_id is not found in the registry + """ + logger.info(f"Loading {len(tool_ids)} built-in Strands tool(s): {', '.join(tool_ids)}") + + tools = [] + missing_tools = [] + + for tool_id in tool_ids: + tool_instance = self._load_single_tool(tool_id, missing_tools) + if tool_instance is not None: + tools.append(tool_instance) + + self._log_missing_tools(missing_tools) + logger.info(f"Successfully loaded {len(tools)} built-in Strands tool(s)") + return tools + + def _load_single_tool(self, tool_id: str, missing_tools: List[str]) -> Any: + """ + Load a single tool by ID + + Args: + tool_id: Tool identifier + missing_tools: List to append to if tool fails to load + + Returns: + Tool instance or None if loading failed + """ + if tool_id not in self._available_tools: + self._log_tool_not_found(tool_id) + missing_tools.append(tool_id) + return None + + try: + tool_ref = self._available_tools[tool_id] + tool_instance = self._instantiate_tool(tool_ref) + logger.debug(f"Successfully loaded Strands tool: {tool_id}") + return tool_instance + except Exception as e: + self._log_tool_load_error(tool_id, e) + missing_tools.append(tool_id) + return None + + def _instantiate_tool(self, tool_ref: Any) -> Any: + """ + Instantiate a tool from its reference + + Args: + tool_ref: Tool reference (module or class) + + Returns: + Instantiated tool object + """ + if self._is_module(tool_ref): + return self._instantiate_from_module(tool_ref) + return tool_ref() + + def _is_module(self, tool_ref: Any) -> bool: + """Check if tool reference is a module""" + return hasattr(tool_ref, "__file__") and hasattr(tool_ref, "__name__") + + def _instantiate_from_module(self, tool_module: Any) -> Any: + """ + Instantiate a tool from a module by finding the Tool class + + Args: + tool_module: Module containing the tool + + Returns: + Tool instance + """ + tool_instance = self._find_tool_class_in_module(tool_module) + return tool_instance if tool_instance is not None else tool_module + + def _find_tool_class_in_module(self, tool_module: Any) -> Any: + """ + Find and instantiate a Tool class in a module + + Args: + tool_module: Module to search + + Returns: + Tool instance, function, or None if not found + """ + # Check for @tool decorator pattern (DecoratedFunctionTool) + for attr_name in dir(tool_module): + if attr_name.startswith("_"): + continue + + attr = getattr(tool_module, attr_name) + + # Check if it's a DecoratedFunctionTool (from @tool decorator) + if isinstance(attr, DecoratedFunctionTool): + return attr + + # If nothing found, return None (module will be used for TOOL_SPEC pattern) + return None + + def _log_tool_not_found(self, tool_id: str) -> None: + """Log warning when tool is not found in registry""" + logger.warning( + f"[TOOL DISCOVERY FAILURE] Tool: '{tool_id}', Source: Strands, Error: Tool not found in registry" + ) + available_tools_str = ", ".join(sorted(self._available_tools.keys())) if self._available_tools else "None" + logger.warning(f"Tool '{tool_id}' not found in Strands tools registry. Available tools: {available_tools_str}") + + def _log_tool_load_error(self, tool_id: str, error: Exception) -> None: + """Log error when tool fails to load""" + logger.error(f"[TOOL DISCOVERY FAILURE] Tool: '{tool_id}', Source: Strands, Error: {str(error)}") + logger.error(f"Error loading tool {tool_id}: {error}", exc_info=True) + + def _log_missing_tools(self, missing_tools: List[str]) -> None: + """Log summary of missing tools""" + if missing_tools: + logger.warning(f"Could not load {len(missing_tools)} Strands tool(s): {', '.join(missing_tools)}") + + def list_available_tools(self) -> List[Dict[str, str]]: + """ + List all available Strands tools with metadata + + Returns: + List of dictionaries containing tool metadata (id, name, description) + """ + tools_list = [] + + for tool_id, tool_class in self._available_tools.items(): + tool_info = { + "id": tool_id, + "name": tool_class.__name__, + "description": self._get_tool_description(tool_class), + } + tools_list.append(tool_info) + + return tools_list + + def _get_tool_description(self, tool_class: Type) -> str: + """ + Extract description from tool class + + Args: + tool_class: The tool class + + Returns: + Tool description string + """ + if tool_class.__doc__: + return tool_class.__doc__.strip().split("\n")[0] + + if hasattr(tool_class, "description"): + return str(tool_class.description) + + return "No description available" + + def has_tool(self, tool_id: str) -> bool: + """ + Check if a tool is available in the registry + + Args: + tool_id: Tool identifier + + Returns: + True if tool is available, False otherwise + """ + return tool_id in self._available_tools + + def get_available_tool_ids(self) -> List[str]: + """ + Get list of all available tool IDs + + Returns: + List of tool IDs + """ + return list(self._available_tools.keys()) diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/tool_wrapper.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/tool_wrapper.py new file mode 100644 index 00000000..b293088b --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/tool_wrapper.py @@ -0,0 +1,445 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Tool Wrapper - Wraps tools to emit usage events for UI tracking +""" + +import inspect +import logging +import time +from datetime import datetime, timezone +from functools import wraps +from typing import Any, Callable, Dict + +logger = logging.getLogger(__name__) + +# Maximum length for tool argument/output strings in logs +MAX_TOOL_ARG_LENGTH = 500 +MAX_TOOL_OUTPUT_LENGTH = 500 + + +class ToolUsageEvent: + """Represents a tool usage event for streaming to UI""" + + def __init__(self, tool_name: str, status: str, start_time: str, **kwargs): + self.tool_name = tool_name + self.status = status + self.start_time = start_time + self.data = kwargs + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary for streaming""" + result = { + "toolName": self.tool_name, + "status": self.status, + "startTime": self.start_time, + } + result.update(self.data) + return result + + +class ToolEventEmitter: + """ + Singleton class to collect tool usage events during agent execution. + Events are collected and can be retrieved by the streaming function. + """ + + _instance = None + _events = [] + + def __new__(cls): + if cls._instance is None: + cls._instance = super().__new__(cls) + cls._events = [] + return cls._instance + + @classmethod + def emit(cls, event: ToolUsageEvent): + """Emit a tool usage event""" + cls._events.append(event) + logger.info(f"[TOOL_EVENT] Emitted: {event.tool_name} - {event.status}") + + @classmethod + def get_events(cls): + """Get all events and clear the queue""" + events = cls._events.copy() + cls._events.clear() + return events + + @classmethod + def clear(cls): + """Clear all events""" + cls._events.clear() + + +def _get_tool_name(tool: Any) -> str: + """Extract tool name from tool object.""" + # For DecoratedFunctionTool (from @tool decorator) + if hasattr(tool, "tool_name"): + return str(tool.tool_name) + # For regular tools with name attribute + if hasattr(tool, "name"): + return str(tool.name) + # For plain functions + if hasattr(tool, "__name__"): + return str(tool.__name__) + # For wrapped functions + if hasattr(tool, "func") and hasattr(tool.func, "__name__"): + return str(tool.func.__name__) + return tool.__class__.__name__ + + +def _extract_tool_name_from_args(args: tuple, fallback_name: str) -> str: + """Try to extract tool name from the first argument if it contains a 'name' field.""" + if not args: + return fallback_name + + first_arg = args[0] + if hasattr(first_arg, "name"): + return str(first_arg.name) + if isinstance(first_arg, dict) and "name" in first_arg: + return str(first_arg["name"]) + return fallback_name + + +def _get_mcp_server_name(tool: Any) -> str | None: + """Extract MCP server name from tool metadata.""" + if not hasattr(tool, "metadata") or not isinstance(tool.metadata, dict): + return None + return tool.metadata.get("mcp_server") or tool.metadata.get("server_name") + + +def _filter_tool_args(args: tuple) -> list: + """Filter out agent objects from args.""" + filtered_args = [] + for arg in args: + arg_str = str(arg) + if "Agent" not in arg_str or len(arg_str) < 100: + filtered_args.append(arg_str[:MAX_TOOL_ARG_LENGTH]) + return filtered_args + + +def _filter_tool_kwargs(kwargs: dict) -> dict: + """Filter out agent and internal parameters from kwargs.""" + return { + k: str(v)[:MAX_TOOL_ARG_LENGTH] + for k, v in kwargs.items() + if k not in ["agent", "_agent", "self"] and "Agent" not in str(v) + } + + +def _build_tool_input(args: tuple, kwargs: dict) -> dict: + """Build tool input data from args and kwargs.""" + tool_input = {} + + if args: + filtered_args = _filter_tool_args(args) + if filtered_args: + tool_input["args"] = filtered_args + + if kwargs: + filtered_kwargs = _filter_tool_kwargs(kwargs) + if filtered_kwargs: + tool_input["kwargs"] = filtered_kwargs + + return tool_input + + +def _build_start_event_data(start_time_iso: str, args: tuple, kwargs: dict, mcp_server_name: str | None) -> dict: + """Build start event data.""" + event_data = {"startTime": start_time_iso} + + tool_input = _build_tool_input(args, kwargs) + if tool_input: + event_data["toolInput"] = tool_input + + if mcp_server_name: + event_data["mcpServerName"] = mcp_server_name + + return event_data + + +def _build_completion_event_data(start_time_iso: str, end_time_iso: str, mcp_server_name: str | None) -> dict: + """Build completion event data.""" + event_data = { + "startTime": start_time_iso, + "endTime": end_time_iso, + } + if mcp_server_name: + event_data["mcpServerName"] = mcp_server_name + return event_data + + +def _build_error_event_data( + start_time_iso: str, end_time_iso: str, error: Exception, mcp_server_name: str | None +) -> dict: + """Build error event data.""" + event_data = { + "startTime": start_time_iso, + "endTime": end_time_iso, + "error": str(error)[:MAX_TOOL_OUTPUT_LENGTH], + } + if mcp_server_name: + event_data["mcpServerName"] = mcp_server_name + return event_data + + +def _wrap_stream_method(tool: Any, tool_name: str, mcp_server_name: str | None): + """Wrap the stream method of a tool.""" + original_stream = tool.stream + + @wraps(original_stream) + async def wrapped_stream(*args, **kwargs): + emitter = ToolEventEmitter() + start_time_iso = datetime.now(timezone.utc).isoformat() + start_time_perf = time.perf_counter() + + actual_tool_name = _extract_tool_name_from_args(args, tool_name) + start_event_data = _build_start_event_data(start_time_iso, args, kwargs, mcp_server_name) + + start_event = ToolUsageEvent(actual_tool_name, "started", start_time_iso, **start_event_data) + emitter.emit(start_event) + + try: + result_chunks = [] + async for chunk in original_stream(*args, **kwargs): + result_chunks.append(chunk) + yield chunk + + duration = time.perf_counter() - start_time_perf + end_time_iso = datetime.now(timezone.utc).isoformat() + completion_event_data = _build_completion_event_data(start_time_iso, end_time_iso, mcp_server_name) + + completion_event = ToolUsageEvent(actual_tool_name, "completed", start_time_iso, **completion_event_data) + emitter.emit(completion_event) + logger.info(f"[TOOL_EXECUTION] {actual_tool_name} completed in {duration:.3f}s") + + except Exception as e: + duration = time.perf_counter() - start_time_perf + end_time_iso = datetime.now(timezone.utc).isoformat() + error_event_data = _build_error_event_data(start_time_iso, end_time_iso, e, mcp_server_name) + + error_event = ToolUsageEvent(actual_tool_name, "failed", start_time_iso, **error_event_data) + emitter.emit(error_event) + logger.error(f"[TOOL_EXECUTION] {actual_tool_name} failed after {duration:.3f}s: {e}") + raise + + tool.stream = wrapped_stream + logger.debug(f"Wrapped 'stream' method for tool: {tool_name}") + + +def _wrap_call_method(tool: Any, tool_name: str, mcp_server_name: str | None): + """Wrap the __call__ method of a tool.""" + original_call = tool.__call__ + + @wraps(original_call) + def wrapped_call(*args, **kwargs): + actual_tool_name = _extract_tool_name_from_args(args, tool_name) + return _execute_with_events( + actual_tool_name, + mcp_server_name, + lambda: original_call(*args, **kwargs), + args, + kwargs, + ) + + tool.__call__ = wrapped_call + logger.debug(f"Wrapped '__call__' method for tool: {tool_name}") + + +def _wrap_invoke_method(tool: Any, tool_name: str, mcp_server_name: str | None): + """Wrap the invoke method of a tool.""" + original_invoke = tool.invoke + + @wraps(original_invoke) + def wrapped_invoke(*args, **kwargs): + actual_tool_name = _extract_tool_name_from_args(args, tool_name) + return _execute_with_events( + actual_tool_name, + mcp_server_name, + lambda: original_invoke(*args, **kwargs), + args, + kwargs, + ) + + tool.invoke = wrapped_invoke + logger.debug(f"Wrapped 'invoke' method for tool: {tool_name}") + + +def _wrap_tool_spec_module(tool_module: Any, mcp_server_name: str | None) -> Any: + """Wrap a TOOL_SPEC module by wrapping its function in place.""" + if not hasattr(tool_module, "TOOL_SPEC"): + return tool_module + + tool_spec = tool_module.TOOL_SPEC + if not isinstance(tool_spec, dict) or "name" not in tool_spec: + return tool_module + + tool_name = tool_spec["name"] + + # Find and wrap the function + if hasattr(tool_module, tool_name): + original_func = getattr(tool_module, tool_name) + if callable(original_func): + wrapped_func = _wrap_plain_function(original_func, tool_name, mcp_server_name) + setattr(tool_module, tool_name, wrapped_func) + + return tool_module + + +def _wrap_plain_function(func: Callable, tool_name: str, mcp_server_name: str | None) -> Callable: + """Wrap a plain function (TOOL_SPEC pattern) to emit events.""" + + @wraps(func) + def wrapped_function(*args, **kwargs): + return _execute_with_events( + tool_name, + mcp_server_name, + lambda: func(*args, **kwargs), + args, + kwargs, + ) + + return wrapped_function + + +def _log_unwrappable_tool(tool_name: str, tool: Any): + """Log warning for tools that cannot be wrapped.""" + methods = [m for m in dir(tool) if not m.startswith("_") and callable(getattr(tool, m, None))] + logger.warning( + f"Tool {tool_name} has no stream/__call__/invoke method, cannot wrap. " + f"Available methods: {', '.join(methods[:10])}" + ) + + +def wrap_tool_with_events(tool: Any) -> Any: + """ + Wrap a tool to emit usage events when invoked. + + This wraps the tool's __call__ or invoke method to emit events + before and after execution. + + Args: + tool: The tool to wrap (Strands or MCP tool) + + Returns: + The wrapped tool that emits events + """ + tool_name = _get_tool_name(tool) + logger.info(f"Wrapping tool: {tool_name} (type: {type(tool).__name__})") + + mcp_server_name = _get_mcp_server_name(tool) + + if hasattr(tool, "stream") and callable(tool.stream): + _wrap_stream_method(tool, tool_name, mcp_server_name) + elif hasattr(tool, "__call__") and callable(tool): + _wrap_call_method(tool, tool_name, mcp_server_name) + elif hasattr(tool, "invoke") and callable(tool.invoke): + _wrap_invoke_method(tool, tool_name, mcp_server_name) + # For modules with TOOL_SPEC, wrap the function in place + elif inspect.ismodule(tool) and hasattr(tool, "TOOL_SPEC"): + return _wrap_tool_spec_module(tool, mcp_server_name) + # For plain functions, create a wrapper function + elif inspect.isfunction(tool) or inspect.ismethod(tool): + return _wrap_plain_function(tool, tool_name, mcp_server_name) + else: + _log_unwrappable_tool(tool_name, tool) + + return tool + + +def _add_tool_output_to_event(event_data: dict, result: Any): + """Add tool output to event data, truncating if necessary.""" + if result is None: + return + + result_str = str(result) + if len(result_str) > MAX_TOOL_OUTPUT_LENGTH: + event_data["toolOutput"] = result_str[:MAX_TOOL_OUTPUT_LENGTH] + "... (truncated)" + else: + event_data["toolOutput"] = result_str + + +def _emit_start_event( + emitter: ToolEventEmitter, + tool_name: str, + start_time_iso: str, + args: tuple, + kwargs: dict, + mcp_server_name: str | None, +): + """Emit tool start event.""" + start_event_data = _build_start_event_data(start_time_iso, args, kwargs, mcp_server_name) + start_event = ToolUsageEvent(tool_name, "started", start_time_iso, **start_event_data) + emitter.emit(start_event) + + +def _emit_completion_event( + emitter: ToolEventEmitter, + tool_name: str, + start_time_iso: str, + end_time_iso: str, + result: Any, + mcp_server_name: str | None, +): + """Emit tool completion event.""" + completion_event_data = _build_completion_event_data(start_time_iso, end_time_iso, mcp_server_name) + _add_tool_output_to_event(completion_event_data, result) + + completion_event = ToolUsageEvent(tool_name, "completed", start_time_iso, **completion_event_data) + emitter.emit(completion_event) + + +def _emit_error_event( + emitter: ToolEventEmitter, + tool_name: str, + start_time_iso: str, + end_time_iso: str, + error: Exception, + mcp_server_name: str | None, +): + """Emit tool error event.""" + error_event_data = _build_error_event_data(start_time_iso, end_time_iso, error, mcp_server_name) + error_event = ToolUsageEvent(tool_name, "failed", start_time_iso, **error_event_data) + emitter.emit(error_event) + + +def _execute_with_events(tool_name: str, mcp_server_name: str | None, func: Callable, args: tuple, kwargs: dict) -> Any: + """ + Execute a function with tool usage event emission. + + Args: + tool_name: Name of the tool + mcp_server_name: MCP server name if applicable + func: Function to execute + args: Positional arguments + kwargs: Keyword arguments + + Returns: + Result from the function + """ + emitter = ToolEventEmitter() + start_time_iso = datetime.now(timezone.utc).isoformat() + start_time_perf = time.perf_counter() + + _emit_start_event(emitter, tool_name, start_time_iso, args, kwargs, mcp_server_name) + + try: + result = func() + duration = time.perf_counter() - start_time_perf + end_time_iso = datetime.now(timezone.utc).isoformat() + + _emit_completion_event(emitter, tool_name, start_time_iso, end_time_iso, result, mcp_server_name) + logger.info(f"[TOOL_EXECUTION] {tool_name} completed in {duration:.3f}s") + + return result + + except Exception as e: + duration = time.perf_counter() - start_time_perf + end_time_iso = datetime.now(timezone.utc).isoformat() + + _emit_error_event(emitter, tool_name, start_time_iso, end_time_iso, e, mcp_server_name) + logger.error(f"[TOOL_EXECUTION] {tool_name} failed after {duration:.3f}s: {e}") + + raise diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/tools_manager.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/tools_manager.py new file mode 100644 index 00000000..e2752289 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/tools_manager.py @@ -0,0 +1,445 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/usr/bin/env python3 +""" +ToolsManager - Coordinates all tool sources (MCP and built-in Strands tools) + +This module provides the main orchestrator for loading and managing tools from +multiple sources: built-in Strands tools and MCP servers (Gateway and Runtime). +It handles tool loading, conflict detection, and provides debugging capabilities. +""" + +import logging +from typing import Any, Dict, List + +from gaab_strands_common import wrap_tool_with_events +from gaab_strands_common.custom_tools.setup.registry import CustomToolsRegistry +from gaab_strands_common.mcp_tools_loader import MCPToolsLoader +from gaab_strands_common.models import UseCaseConfig +from gaab_strands_common.strands_tools_registry import StrandsToolsRegistry + +logger = logging.getLogger(__name__) + + +class ToolsManager: + """ + Manages both MCP tools and built-in Strands tools. + + This class coordinates tool loading from multiple sources: + - Built-in Strands tools from strands-agents-tools package + - MCP tools from Gateway servers (external OAuth services) + - MCP tools from Runtime servers (containerized MCP servers) + + It provides: + - Unified tool loading interface + - Tool name conflict detection and resolution + - Comprehensive logging for debugging + - Tool source tracking + """ + + def __init__(self, region: str, config: UseCaseConfig): + """ + Initialize ToolsManager with AWS region. + + Args: + region: AWS region for MCP clients + config: Full use case configuration object + """ + self.region = region + self.config = config + self._config_dict = config.model_dump(by_alias=True) + self.strands_tools_registry = StrandsToolsRegistry() + self.custom_tools_registry = CustomToolsRegistry() + self.mcp_loader = MCPToolsLoader(region) + self._tool_sources: Dict[str, str] = {} # Maps tool name to source + logger.info(f"Initialized ToolsManager for region: {region}") + + def load_all_tools( + self, + mcp_servers: List[Dict[str, str]], + strands_tool_ids: List[str], + custom_tool_ids: List[str], + ) -> List[Any]: + """ + Load all tools from MCP servers, Strands built-in registry, and custom tools. + + This is the main entry point for tool loading. It: + 1. Loads built-in Strands tools first (fast, no network calls) + 2. Loads custom tools (registry-based with auto-discovery) + 3. Loads MCP tools (may involve network calls) + 4. Detects and resolves tool name conflicts + 5. Tracks tool sources for debugging + 6. Returns combined list of all tools + + Args: + mcp_servers: List of MCP server dicts with keys: + - use_case_id: Server identifier + - url: MCP server endpoint URL + - type: Either 'gateway' or 'runtime' + strands_tool_ids: List of built-in Strands tool identifiers + custom_tool_ids: List of custom Strands tool identifiers + + Returns: + Combined list of all Strands-compatible tool objects + + Note: + Errors are logged but don't stop processing. The agent will + continue with whatever tools successfully loaded. + """ + logger.info("=" * 80) + logger.info("[TOOL LOADING START]") + logger.info(f"Configuration: {len(strands_tool_ids)} built-in tool(s), {len(mcp_servers)} MCP server(s)") + if strands_tool_ids: + logger.info(f"Built-in tools requested: {', '.join(strands_tool_ids)}") + if mcp_servers: + server_names = [server.get("use_case_id", "unknown") for server in mcp_servers] + logger.info(f"MCP servers requested: {', '.join(server_names)}") + if custom_tool_ids: + logger.info(f"Custom tools requested: {', '.join(custom_tool_ids)}") + logger.info("=" * 80) + + all_tools = [] + self._tool_sources.clear() + + strands_tools = self._load_strands_tools(strands_tool_ids) + all_tools.extend(strands_tools) + + custom_tools = self._load_custom_tools(custom_tool_ids) + all_tools.extend(custom_tools) + + mcp_tools = self._load_mcp_tools(mcp_servers) + all_tools.extend(mcp_tools) + + self._detect_conflicts(all_tools) + + # Wrap all tools with event emission + logger.info("Wrapping tools with event emission for UI tracking") + wrapped_tools = [] + for tool in all_tools: + try: + wrapped_tool = wrap_tool_with_events(tool) + wrapped_tools.append(wrapped_tool) + except Exception as e: + logger.error(f"Failed to wrap tool {self._get_tool_name(tool)}: {e}") + # Use unwrapped tool as fallback + wrapped_tools.append(tool) + + all_tools = wrapped_tools + + self._log_tool_summary(all_tools) + + logger.info("=" * 80) + logger.info( + f"[TOOL LOADING COMPLETE] Total: {len(all_tools)} tool(s) available (all wrapped with event emission)" + ) + logger.info("=" * 80) + return all_tools + + def _load_strands_tools(self, tool_ids: List[str]) -> List[Any]: + """ + Load built-in Strands tools from the registry. + + Args: + tool_ids: List of Strands tool identifiers + + Returns: + List of instantiated Strands tool objects + """ + if not tool_ids: + logger.info("No built-in Strands tools requested") + return [] + + logger.info(f"Loading {len(tool_ids)} built-in Strands tool(s): {', '.join(tool_ids)}") + + try: + tools = self.strands_tools_registry.get_tools(tool_ids) + + # Track tool sources + for tool in tools: + tool_name = self._get_tool_name(tool) + self._tool_sources[tool_name] = "Strands" + logger.debug(f"Registered Strands tool: {tool_name}") + + logger.info(f"Successfully loaded {len(tools)} built-in Strands tool(s)") + return tools + + except Exception as e: + logger.error(f"Error loading Strands tools: {e}") + return [] + + def _load_mcp_tools(self, mcp_servers: List[Dict[str, str]]) -> List[Any]: + """ + Load MCP tools from configured servers. + + Args: + mcp_servers: List of MCP server dicts with keys: + - use_case_id: Server identifier + - url: MCP server endpoint URL + - type: Either 'gateway' or 'runtime' + + Returns: + List of Strands-compatible MCP tool objects + """ + if not mcp_servers: + logger.info("No MCP servers configured") + return [] + + logger.info(f"Loading MCP tools from {len(mcp_servers)} server(s)") + + try: + + tools = self.mcp_loader.load_tools(mcp_servers) + + for tool in tools: + tool_name = self._get_tool_name(tool) + # Try to get server type from tool metadata first, then fall back to server type detection + server_type = self._get_tool_server_type(tool) + if server_type == "Unknown": + # If we can't determine from tool metadata, we'll use "MCP" as generic type + server_type = "MCP" + self._tool_sources[tool_name] = f"MCP-{server_type}" + logger.debug(f"Registered MCP tool: {tool_name} from {server_type}") + + logger.info(f"Successfully loaded {len(tools)} MCP tool(s)") + return tools + + except Exception as e: + logger.error(f"Error loading MCP tools: {e}") + return [] + + def _load_custom_tools(self, custom_tool_ids: List[str]) -> List[Any]: + """ + Load custom tools using the new registry pattern. + + This method: + 1. Loads explicitly configured custom tools + 2. Loads auto-attach tools based on conditions + 3. Removes duplicates + + Args: + custom_tool_ids: List of custom Strands tool identifiers + + Returns: + List of custom tool implementations + """ + custom_tools = [] + + try: + logger.info(f"Loading custom tools with configured IDs: {custom_tool_ids}") + + # Load explicitly configured tools + configured_tools = self._load_configured_custom_tools(custom_tool_ids) + custom_tools.extend(configured_tools) + logger.info("Loaded %d explicitly configured custom tools", len(configured_tools)) + + # Load auto-attach tools + logger.info("Starting auto-attach tools loading...") + auto_tools = self._load_auto_attach_tools() + custom_tools.extend(auto_tools) + logger.info("Successfully loaded %d auto-attach custom tool(s)", len(auto_tools)) + + return custom_tools + + except Exception as e: + logger.error(f"Error loading custom tools: {e}") + return [] + + def _load_configured_custom_tools(self, custom_tool_ids: List[str]) -> List[Any]: + """Load explicitly configured custom tools""" + configured_tools = [] + + for tool_id in custom_tool_ids: + tool_methods = self._load_single_custom_tool(tool_id) + configured_tools.extend(tool_methods) + + return configured_tools + + def _load_auto_attach_tools(self) -> List[Any]: + """Load tools that should be auto-attached based on conditions""" + auto_tools = [] + + all_tools = self.custom_tools_registry.get_all_tools() + + for tool_id, tool_class in all_tools.items(): + if hasattr(tool_class, "_auto_condition"): + try: + condition_result = tool_class._auto_condition(self._config_dict) + logger.info(f"Auto-attach condition for {tool_id} returned: {condition_result}") + + if condition_result: + tool_methods = self._load_single_custom_tool(tool_id) + auto_tools.extend(tool_methods) + except Exception as e: + logger.error(f"Failed to auto-attach tool {tool_id}: {e}") + + return auto_tools + + def _load_single_custom_tool(self, tool_id: str) -> List[str]: + """ + Load a single custom tool by ID and return all its @tool decorated methods. + + Uses pre-discovered method names from the registry for efficiency. + Returns empty list if tool not found or has no methods. + """ + tool_class = self.custom_tools_registry.get_tool(tool_id) + if not tool_class: + logger.warning(f"Custom tool {tool_id} not found in registry") + return [] + + # Get pre-discovered method names from registry + method_names = self.custom_tools_registry.get_tool_method_names(tool_id) + if not method_names: + logger.warning(f"Custom tool {tool_id} has no @tool decorated methods") + return [] + + try: + tool_instance = tool_class(config=self._config_dict, region=self.region) + + # Get the bound methods from the instance + tool_methods = [] + for method_name in method_names: + tool_method = getattr(tool_instance, method_name) + tool_methods.append(tool_method) + + tool_name = self._get_tool_name(tool_method) + self._tool_sources[tool_name] = "Custom" + logger.debug(f"Loaded tool method '{tool_name}' from custom tool {tool_id}") + + logger.debug(f"Successfully loaded custom tool {tool_id} with {len(tool_methods)} method(s)") + return tool_methods + + except Exception as e: + logger.error(f"Failed to load custom tool {tool_id}: {e}") + return [] + + def _detect_conflicts(self, tools: List[Any]) -> None: + """ + Detect and log tool name conflicts. + + When tools from different sources have the same name, this method + logs warnings with details about the conflicting sources. + + Args: + tools: List of all loaded tools + + Note: + This method only logs conflicts. The actual conflict resolution + (which tool takes precedence) is handled by the Strands agent + based on the order tools are provided. + """ + tool_names: Dict[str, List[str]] = {} + + for tool in tools: + tool_name = self._get_tool_name(tool) + source = self._tool_sources.get(tool_name, "Unknown") + + if tool_name not in tool_names: + tool_names[tool_name] = [] + tool_names[tool_name].append(source) + + conflicts = {name: sources for name, sources in tool_names.items() if len(sources) > 1} + + if conflicts: + logger.warning(f"Detected {len(conflicts)} tool name conflict(s):") + for tool_name, sources in conflicts.items(): + logger.warning(f" - Tool '{tool_name}' provided by: {', '.join(sources)}") + logger.warning(f" Resolution: First occurrence will be used (order: Strands, then MCP servers)") + else: + logger.info("No tool name conflicts detected") + + def _log_tool_summary(self, tools: List[Any]) -> None: + """ + Log summary of loaded tools by source. + + Args: + tools: List of all loaded tools + """ + source_counts: Dict[str, int] = {} + tool_details_by_source: Dict[str, List[str]] = {} + + for tool in tools: + tool_name = self._get_tool_name(tool) + source = self._tool_sources.get(tool_name, "Unknown") + source_counts[source] = source_counts.get(source, 0) + 1 + + if source not in tool_details_by_source: + tool_details_by_source[source] = [] + tool_details_by_source[source].append(tool_name) + + logger.info("-" * 80) + logger.info("[FINAL TOOL REGISTRATION]") + logger.info(f"Total tools registered: {len(tools)}") + logger.info("-" * 80) + + logger.info("Tools by source:") + for source in sorted(source_counts.keys()): + count = source_counts[source] + tool_names = tool_details_by_source[source] + logger.info(f" {source}: {count} tool(s)") + logger.info(f" Tools: {', '.join(tool_names)}") + + if tools: + all_tool_names = [self._get_tool_name(tool) for tool in tools] + logger.debug(f"All available tools: {', '.join(sorted(all_tool_names))}") + + logger.info("-" * 80) + + def get_tool_sources(self) -> Dict[str, str]: + """ + Return mapping of tool names to their sources. + + This is useful for debugging and understanding where each tool came from. + + Returns: + Dictionary mapping tool names to source identifiers + (e.g., "Strands", "MCP-Gateway", "MCP-Runtime") + + Example: + { + "web_search": "Strands", + "get_calendar_events": "MCP-Gateway", + "query_database": "MCP-Runtime" + } + """ + return self._tool_sources.copy() + + def _get_tool_name(self, tool: Any) -> str: + """ + Extract tool name from a tool object. + + Args: + tool: Tool object (Strands or MCP) + + Returns: + Tool name as string + """ + if hasattr(tool, "name"): + return str(tool.name) + elif hasattr(tool, "__name__"): + return str(tool.__name__) + elif hasattr(tool, "func") and hasattr(tool.func, "__name__"): + return str(tool.func.__name__) + else: + return tool.__class__.__name__ + + def _get_tool_server_type(self, tool: Any) -> str: + """ + Extract server type from MCP tool metadata. + + Args: + tool: MCP tool object + + Returns: + Server type ("Gateway", "Runtime", or "Unknown") + """ + if hasattr(tool, "metadata") and isinstance(tool.metadata, dict): + return tool.metadata.get("server_type", "Unknown") + + if hasattr(tool, "description") and isinstance(tool.description, str): + if "Gateway" in tool.description: + return "Gateway" + if "Runtime" in tool.description: + return "Runtime" + + return "Unknown" diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/utils/__init__.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/utils/__init__.py new file mode 100644 index 00000000..0e3f6983 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/utils/__init__.py @@ -0,0 +1,42 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Utils - Utility functions and constants for GAAB Strands +""" + +from .constants import ( + MAX_PARALLEL_FILE_PROCESSING_THREADS, + MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR, + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR, + RETRY_CONFIG, + SUPPORTED_DOCUMENT_FORMATS, + SUPPORTED_IMAGE_FORMATS, + USE_CASE_CONFIG_RECORD_KEY_ENV_VAR, + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, + USE_CASE_UUID, + FileStatus, +) +from .helpers import ( + extract_user_message, + get_file_category_from_extension, + is_supported_file_type, + retry_with_backoff, +) + +__all__ = [ + "FileStatus", + "MAX_PARALLEL_FILE_PROCESSING_THREADS", + "MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR", + "MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR", + "RETRY_CONFIG", + "SUPPORTED_DOCUMENT_FORMATS", + "SUPPORTED_IMAGE_FORMATS", + "USE_CASE_CONFIG_RECORD_KEY_ENV_VAR", + "USE_CASE_CONFIG_TABLE_NAME_ENV_VAR", + "USE_CASE_UUID", + "extract_user_message", + "get_file_category_from_extension", + "is_supported_file_type", + "retry_with_backoff", +] diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/utils/constants.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/utils/constants.py new file mode 100644 index 00000000..6322a7fe --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/utils/constants.py @@ -0,0 +1,36 @@ +# Supported file formats +SUPPORTED_IMAGE_FORMATS = {"png", "jpeg", "jpg", "gif", "webp"} +SUPPORTED_DOCUMENT_FORMATS = {"pdf", "csv", "doc", "docx", "xls", "xlsx", "html", "txt", "md"} + +MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR = "MULTIMODAL_METADATA_TABLE_NAME" +MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR = "MULTIMODAL_DATA_BUCKET" +USE_CASE_CONFIG_TABLE_NAME_ENV_VAR = "USE_CASE_TABLE_NAME" +USE_CASE_CONFIG_RECORD_KEY_ENV_VAR = "USE_CASE_CONFIG_KEY" +USE_CASE_UUID = "USE_CASE_UUID" +MAX_PARALLEL_FILE_PROCESSING_THREADS = 5 +REMAINING_SECONDS_FOR_FILE_ACCESS = 3600 + + +# File status constants +class FileStatus: + PENDING = "pending" + UPLOADED = "uploaded" + DELETED = "deleted" + INVALID = "invalid" + NOT_FOUND = "not_found" + EXPIRING_SOON = "expiring_soon" + + +# Retry configuration constants +RETRY_CONFIG = { + "max_retries": 3, + "back_off_rate": 2, + "initial_delay_ms": 1000, + "max_delay": 60.0, +} + +# Boto3 client configuration constants +BOTO_CONFIG = { + "max_attempts": 5, + "retry_mode": "standard", +} diff --git a/deployment/ecr/gaab-strands-common/src/gaab_strands_common/utils/helpers.py b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/utils/helpers.py new file mode 100644 index 00000000..aea7d213 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/src/gaab_strands_common/utils/helpers.py @@ -0,0 +1,263 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +File utilities for multimodal file processing and WebSocket payload handling +""" +import json +import logging +import os +import time +from typing import Any, Callable, Dict, Optional + +from botocore.config import Config + +from gaab_strands_common.utils.constants import ( + BOTO_CONFIG, + RETRY_CONFIG, + SUPPORTED_DOCUMENT_FORMATS, + SUPPORTED_IMAGE_FORMATS, +) + + +def get_file_category_from_extension(extension: str) -> str: + """ + Get file category from extension (image, document, etc.) + + Args: + extension: File extension (without dot) + + Returns: + File category string + """ + extension = extension.lower() + + # Image types + if extension in SUPPORTED_IMAGE_FORMATS: + return "image" + + # Document types + elif extension in SUPPORTED_DOCUMENT_FORMATS: + return "document" + + # Default + return "unknown" + + +def is_supported_file_type(extension: str) -> bool: + """ + Check if file extension is supported for multimodal processing + + Args: + extension: File extension (without dot) + + Returns: + True if supported, False otherwise + """ + extension = extension.lower() + + return extension in SUPPORTED_IMAGE_FORMATS or extension in SUPPORTED_DOCUMENT_FORMATS + + +def extract_user_message(payload: Dict[str, Any]) -> str: + """ + Extract user message from AgentCore Runtime payload. + + Args: + payload: Request payload from AgentCore Runtime + + Returns: + str: User message or error message if invalid + + Raises: + ValueError: If payload structure is invalid + """ + if not isinstance(payload, dict): + raise ValueError(f"Payload must be a dictionary, got {type(payload).__name__}") + + if "input" not in payload: + return "Please provide your message in the 'input' field of the request payload." + + user_input = payload["input"] + + if user_input is None or (isinstance(user_input, str) and not user_input.strip()): + return "Please provide your message in the 'input' field of the request payload." + + return str(user_input).strip() + + +logger = logging.getLogger(__name__) + + +def create_boto_config(region: str) -> Config: + """ + Create a Botocore Config object with standard retry settings and user agent. + + This function safely parses the AWS_SDK_USER_AGENT environment variable and + creates a Config object with retry settings from BOTO_CONFIG constants. + + Args: + region: AWS region name + + Returns: + Config: Configured Botocore Config object + + Example: + >>> config = create_boto_config("us-east-1") + >>> client = boto3.client('bedrock-runtime', config=config) + """ + # Parse user agent from environment with safety checks + user_agent_extra = "" + try: + user_agent_json = os.environ.get("AWS_SDK_USER_AGENT", "{}") + user_agent_config = json.loads(user_agent_json) + user_agent_extra = user_agent_config.get("user_agent_extra", "") + if user_agent_extra: + logger.info(f"Using custom user agent: {user_agent_extra}") + except json.JSONDecodeError as e: + logger.warning(f"Invalid AWS_SDK_USER_AGENT format, using default: {e}") + except Exception as e: + logger.warning(f"Error parsing AWS_SDK_USER_AGENT, using default: {e}") + + # Create Config object with retry settings from constants + return Config( + region_name=region, + retries={"max_attempts": BOTO_CONFIG["max_attempts"], "mode": BOTO_CONFIG["retry_mode"]}, + user_agent_extra=user_agent_extra, + ) + + +def build_guardrail_config(bedrock_params) -> Dict[str, str]: + """ + Build guardrail configuration dictionary from Bedrock parameters. + + Returns guardrail configuration only if both identifier and version are present + and non-empty. This ensures partial configurations are not passed to BedrockModel. + + Args: + bedrock_params: BedrockLlmParams object that may contain guardrail fields + + Returns: + Dictionary with guardrail_id and guardrail_version if both present, + empty dictionary otherwise + + Example: + >>> params = BedrockLlmParams( + ... ModelId="amazon.nova-pro-v1:0", + ... GuardrailIdentifier="abc123", + ... GuardrailVersion="1" + ... ) + >>> config = build_guardrail_config(params) + >>> print(config) + {'guardrail_id': 'abc123', 'guardrail_version': '1'} + """ + if ( + hasattr(bedrock_params, "guardrail_identifier") + and bedrock_params.guardrail_identifier + and hasattr(bedrock_params, "guardrail_version") + and bedrock_params.guardrail_version + ): + logger.debug( + f"Applying guardrail: {bedrock_params.guardrail_identifier} " f"v{bedrock_params.guardrail_version}" + ) + return { + "guardrail_id": bedrock_params.guardrail_identifier, + "guardrail_version": bedrock_params.guardrail_version, + } + return {} + + +def _get_retry_config(max_retries: Optional[int], base_delay: Optional[float], max_delay: Optional[float]) -> tuple: + """Get retry configuration with defaults from RETRY_CONFIG.""" + if max_retries is None: + max_retries = RETRY_CONFIG["max_retries"] + if base_delay is None: + base_delay = RETRY_CONFIG["initial_delay_ms"] / 1000.0 # Convert ms to seconds + if max_delay is None: + max_delay = RETRY_CONFIG["max_delay"] + return max_retries, base_delay, max_delay + + +def _calculate_delays(base_delay: float, max_delay: float, max_retries: int) -> list: + """Calculate exponential backoff delays.""" + back_off_rate = RETRY_CONFIG["back_off_rate"] + return [min(base_delay * (back_off_rate**i), max_delay) for i in range(max_retries)] + + +def _should_retry_on_condition( + result: Any, retry_condition: Optional[Callable], attempt: int, max_retries: int +) -> bool: + """Check if retry is needed based on result condition.""" + if not retry_condition or not retry_condition(result): + return False + + if attempt < max_retries: + return True + + logger.warning("Retry condition still met after %d attempts", max_retries + 1) + return False + + +def _handle_retry_condition( + result: Any, retry_condition: Optional[Callable], attempt: int, max_retries: int, delays: list +) -> Optional[Any]: + """Handle retry logic based on condition. Returns result if no retry needed, None if should retry.""" + if not _should_retry_on_condition(result, retry_condition, attempt, max_retries): + if attempt > 0: + logger.info("Function succeeded on attempt %d", attempt + 1) + return result + + delay = delays[attempt] + logger.info("Retry condition met, retrying in %ds (attempt %d/%d)", delay, attempt + 1, max_retries + 1) + time.sleep(delay) + return None + + +def _handle_exception(exception: Exception, attempt: int, max_retries: int, delays: list) -> None: + """Handle exception with retry logic.""" + if attempt < max_retries: + delay = delays[attempt] + logger.warning(f"Attempt {attempt + 1} failed: {exception}. Retrying in {delay}s") + time.sleep(delay) + else: + logger.error(f"All {max_retries + 1} attempts failed. Last error: {exception}") + raise exception + + +def retry_with_backoff( + func: Callable, + retry_condition: Optional[Callable[[Any], bool]] = None, + exception_types: tuple = (Exception,), + max_retries: int = None, + base_delay: float = None, + max_delay: float = None, +) -> Any: + """ + Retry a function with exponential backoff strategy + + Args: + func: Function to retry (should be a callable with no arguments) + max_retries: Maximum number of retry attempts (defaults to RETRY_CONFIG) + base_delay: Base delay in seconds for exponential backoff (defaults to RETRY_CONFIG) + max_delay: Maximum delay in seconds (defaults to RETRY_CONFIG) + retry_condition: Optional function that takes the result and returns True if retry is needed + exception_types: Tuple of exception types to catch and retry on + + Returns: + Result of the function call + + Raises: + The last exception encountered if all retries are exhausted + """ + max_retries, base_delay, max_delay = _get_retry_config(max_retries, base_delay, max_delay) + delays = _calculate_delays(base_delay, max_delay, max_retries) + + for attempt in range(max_retries + 1): # +1 for initial attempt + try: + result = func() + handled_result = _handle_retry_condition(result, retry_condition, attempt, max_retries, delays) + if handled_result is not None: + return handled_result + # If None returned, continue to next iteration (retry) + except exception_types as e: + _handle_exception(e, attempt, max_retries, delays) diff --git a/deployment/ecr/gaab-strands-common/test/README.md b/deployment/ecr/gaab-strands-common/test/README.md new file mode 100644 index 00000000..6f9b2181 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/README.md @@ -0,0 +1,113 @@ +# GAAB Strands Common Library Tests + +This directory contains comprehensive unit tests for the `gaab-strands-common` shared library. + +## Test Coverage + +The test suite covers all major components of the shared library: + +### 1. RuntimeStreaming (`test_runtime_streaming.py`) +- Event text extraction from streaming events +- Content, completion, and error chunk creation +- Tool event handling and emission +- Event skipping logic for duplicates +- Async streaming with tool events +- Error handling and fallback mechanisms +- Synchronous streaming wrapper + +### 2. DynamoDBHelper (`test_ddb_helper.py`) +- DynamoDB table initialization +- Configuration retrieval by key +- MCP server configuration fetching +- Validation of UseCaseType +- Error handling for missing configurations +- Partial failure handling for batch operations + +### 3. Data Models (`test_models.py`) +- Pydantic model validation +- BedrockLlmParams with multiple inference types +- Tool and MCP server reference parsing +- AgentConfig deserialization from DDB +- WorkflowConfig deserialization +- Memory configuration +- Model identifier resolution + +### 4. Tool Wrapper (`test_tool_wrapper.py`) +- ToolUsageEvent creation and serialization +- ToolEventEmitter singleton pattern +- Tool name extraction from various sources +- MCP server metadata handling +- Argument and kwarg filtering +- Tool wrapping for __call__, invoke, and stream methods +- Event emission for start, completion, and error states +- Tool input/output capture and truncation + +### 5. BaseAgent (`test_base_agent.py`) +- Agent initialization with region +- Bedrock model creation for different inference types +- Use case type validation +- Configuration management +- Cross-region inference profile handling + +## Running Tests + +### Run all tests +```bash +cd deployment/ecr/gaab-strands-common +uv run pytest +``` + +### Run with coverage +```bash +uv run pytest --cov=gaab_strands_common --cov-report=html +``` + +### Run specific test file +```bash +uv run pytest test/test_runtime_streaming.py +``` + +### Run specific test class +```bash +uv run pytest test/test_models.py::TestBedrockLlmParams +``` + +### Run specific test +```bash +uv run pytest test/test_tool_wrapper.py::TestWrapToolWithEvents::test_wrap_tool_with_call_method +``` + +## Test Configuration + +Tests are configured via `pytest.ini`: +- Test discovery: `test_*.py` files +- Coverage reporting: terminal, HTML, and XML +- Async test support via pytest-asyncio +- Verbose output enabled + +## Fixtures + +Shared fixtures are defined in `conftest.py`: +- `mock_environment`: Mocks AWS environment variables +- `mock_bedrock_model`: Mocks BedrockModel for testing +- `sample_agent_config`: Sample agent configuration +- `sample_workflow_config`: Sample workflow configuration +- `sample_mcp_config`: Sample MCP server configuration +- `mock_strands_agent`: Mock Strands agent with streaming + +## Requirements Coverage + +These tests satisfy the requirements from the specification: +- **1.1**: RuntimeStreaming chunk creation and streaming logic +- **1.2**: DynamoDBHelper configuration loading +- **1.3**: Model deserialization with `from_ddb_config()` +- **1.4**: `wrap_tool_with_events()` functionality +- **2.1-2.4**: BaseAgent model creation and validation + +## Best Practices + +- All tests use proper mocking to avoid external dependencies +- Tests are isolated and can run in any order +- Async tests use pytest-asyncio markers +- Clear test names describe what is being tested +- Comprehensive edge case coverage diff --git a/deployment/ecr/gaab-strands-common/test/__init__.py b/deployment/ecr/gaab-strands-common/test/__init__.py new file mode 100644 index 00000000..f7d25dcb --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/__init__.py @@ -0,0 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Test package for gaab-strands-common +""" diff --git a/deployment/ecr/gaab-strands-common/test/conftest.py b/deployment/ecr/gaab-strands-common/test/conftest.py new file mode 100644 index 00000000..a5f801b3 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/conftest.py @@ -0,0 +1,132 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Shared test fixtures and configuration +""" + +import pytest +import os +from unittest.mock import Mock, patch + + +@pytest.fixture(autouse=True) +def mock_environment(): + """Mock environment variables for all tests""" + with patch.dict( + os.environ, + { + "AWS_REGION": "us-east-1", + "M2M_IDENTITY_NAME": "test-m2m-identity-provider", + "USE_CASE_TABLE_NAME": "test-table", + "USE_CASE_CONFIG_KEY": "test-key", + "AWS_SDK_USER_AGENT": '{"user_agent_extra": "test-agent"}', + }, + clear=False, + ): + yield + + +@pytest.fixture(autouse=True) +def mock_requires_access_token(): + """Mock the requires_access_token decorator for all tests""" + # Mock at the source - bedrock_agentcore.identity.auth + with patch("bedrock_agentcore.identity.auth.requires_access_token", lambda **kwargs: lambda func: func): + yield + + +@pytest.fixture(autouse=True) +def clear_tool_events(): + """Clear tool events before each test""" + from gaab_strands_common.tool_wrapper import ToolEventEmitter + + ToolEventEmitter.clear() + yield + ToolEventEmitter.clear() + + +@pytest.fixture +def mock_bedrock_model(): + """Mock BedrockModel for testing""" + with patch("gaab_strands_common.base_agent.BedrockModel") as mock: + yield mock + + +@pytest.fixture +def sample_agent_config(): + """Sample agent configuration for testing""" + return { + "UseCaseName": "TestAgent", + "UseCaseType": "Agent", + "AgentBuilderParams": { + "SystemPrompt": "You are a helpful assistant", + "Tools": [{"ToolId": "web-search"}], + "MCPServers": [{"McpId": "mcp-1"}], + "MemoryConfig": {"LongTermEnabled": False}, + }, + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 0.7, + "Streaming": True, + "Verbose": False, + "BedrockLlmParams": { + "ModelId": "amazon.nova-pro-v1:0", + "BedrockInferenceType": "QUICK_START", + }, + "ModelParams": {}, + }, + } + + +@pytest.fixture +def sample_workflow_config(): + """Sample workflow configuration for testing""" + return { + "WorkflowType": "agents-as-tools", + "WorkflowParams": { + "AgentsAsToolsParams": { + "Agents": [ + { + "AgentParams": { + "SystemPrompt": "You are a specialist", + "Tools": [], + "MCPServers": [{"McpId": "mcp-1"}], + } + } + ] + } + }, + } + + +@pytest.fixture +def sample_mcp_config(): + """Sample MCP server configuration for testing""" + return { + "UseCaseName": "TestMCP", + "UseCaseType": "MCPServer", + "MCPParams": { + "GatewayParams": { + "GatewayUrl": "https://api.example.com", + "GatewayArn": "arn:aws:execute-api:us-east-1:123456789012:abc123", + "GatewayId": "abc123", + "GatewayName": "TestGateway", + "TargetParams": [], + } + }, + } + + +@pytest.fixture +def mock_strands_agent(): + """Mock Strands agent for testing""" + agent = Mock() + agent.name = "test_agent" + + async def mock_stream(message): + yield Mock(data="Hello") + yield Mock(data="World") + + agent.stream_async = Mock(return_value=mock_stream(None)) + agent.return_value = "Non-streaming response" + return agent diff --git a/deployment/ecr/gaab-strands-common/test/custom_tools/test_base_tool.py b/deployment/ecr/gaab-strands-common/test/custom_tools/test_base_tool.py new file mode 100644 index 00000000..cac4bbb9 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/custom_tools/test_base_tool.py @@ -0,0 +1,181 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import pytest +from gaab_strands_common.custom_tools.setup.base_tool import BaseCustomTool +from strands import tool + + +@pytest.fixture +def sample_config(): + """Sample configuration for testing""" + return { + "LlmParams": { + "MultimodalParams": { + "MultimodalEnabled": True, + } + } + } + + +def test_init_success_with_tool_method(sample_config): + """Test successful initialization when subclass has @tool decorated method""" + + class ValidTool(BaseCustomTool): + """A valid tool with @tool decorated method""" + + @tool + def my_tool_method(self, **kwargs): + """A properly decorated tool method""" + return "tool result" + + valid_tool = ValidTool(sample_config, "us-east-1") + + assert valid_tool.config == sample_config + assert valid_tool.region == "us-east-1" + # _requirements and _auto_condition are class attributes set by decorators + assert not hasattr(ValidTool, "_requirements") + assert not hasattr(ValidTool, "_auto_condition") + + +def test_init_success_with_multiple_tool_methods(sample_config): + """Test successful initialization when subclass has multiple @tool decorated methods""" + + class MultiToolClass(BaseCustomTool): + """A tool with multiple @tool decorated methods""" + + @tool + def tool_one(self, **kwargs): + """First tool method""" + return "tool one" + + @tool + def tool_two(self, **kwargs): + """Second tool method""" + return "tool two" + + multi_tool = MultiToolClass(sample_config, "us-east-1") + + assert multi_tool.config == sample_config + assert multi_tool.region == "us-east-1" + + +def test_get_config_param(sample_config): + """Test _get_config_param method""" + + class ValidTool(BaseCustomTool): + @tool + def my_tool(self, **kwargs): + return "result" + + valid_tool = ValidTool(sample_config, "us-east-1") + + assert valid_tool._get_config_param("LlmParams.MultimodalParams.MultimodalEnabled") is True + + # Test non-existing parameter with default and without default + assert valid_tool._get_config_param("Non.Existing.Param", "default") == "default" + + # Test non-existing parameter without default + assert valid_tool._get_config_param("Non.Existing.Param") is None + + # Test nested parameter access + assert valid_tool._get_config_param("LlmParams.MultimodalParams") == {"MultimodalEnabled": True} + + +def test_metadata_class_attribute(sample_config): + """Test metadata class attribute""" + + class ValidTool(BaseCustomTool): + @tool + def my_tool(self, **kwargs): + return "result" + + valid_tool = ValidTool(sample_config, "us-east-1") + + # BaseCustomTool without decorator should not have metadata class attribute + assert not hasattr(BaseCustomTool, "metadata") + # ValidTool also doesn't have metadata unless decorated with @custom_tool + assert not hasattr(ValidTool, "metadata") + + +def test_tool_spec_structure(sample_config): + """Test that @tool decorated methods have proper tool_spec structure""" + + class ValidTool(BaseCustomTool): + @tool + def my_custom_tool(self, **kwargs): + """This is my custom tool description""" + return "result" + + valid_tool = ValidTool(sample_config, "us-east-1") + + # Get the method from the class + method = getattr(ValidTool, "my_custom_tool") + + # Verify it has tool_spec attribute + assert hasattr(method, "tool_spec"), "Method should have tool_spec attribute" + + # Verify tool_spec is a dictionary + assert isinstance(method.tool_spec, dict), "tool_spec should be a dictionary" + + # Verify tool_spec has expected keys + assert "name" in method.tool_spec, "tool_spec should have 'name' key" + assert "description" in method.tool_spec, "tool_spec should have 'description' key" + assert "inputSchema" in method.tool_spec, "tool_spec should have 'inputSchema' key" + + # Verify the name matches the method name + assert method.tool_spec["name"] == "my_custom_tool", "tool_spec name should match method name" + + # Verify description is extracted from docstring + assert "This is my custom tool description" in method.tool_spec["description"] + + +def test_inheritance_tool_validation(sample_config): + """Test that tool validation works correctly with inheritance""" + + class ParentTool(BaseCustomTool): + """Parent tool with a tool method""" + + @tool + def parent_tool_method(self, **kwargs): + """Parent tool""" + return "parent" + + class ChildTool(ParentTool): + """Child tool that inherits parent's tool method""" + + def regular_method(self): + """Not a tool method""" + return "regular" + + # Child should initialize successfully because it inherits parent's tool method + child = ChildTool(sample_config, "us-east-1") + assert child is not None + + # Verify the parent's tool method is accessible + assert hasattr(ChildTool, "parent_tool_method") + assert hasattr(getattr(ChildTool, "parent_tool_method"), "tool_spec") + + +def test_inheritance_with_override(sample_config): + """Test that child can have its own tool methods""" + + class ParentTool(BaseCustomTool): + """Parent tool""" + + @tool + def parent_tool(self, **kwargs): + """Parent tool""" + return "parent" + + class ChildTool(ParentTool): + """Child tool with its own tool method""" + + @tool + def child_tool(self, **kwargs): + """Child tool""" + return "child" + + # Child should have both parent and child tool methods + child = ChildTool(sample_config, "us-east-1") + assert hasattr(ChildTool, "parent_tool") + assert hasattr(ChildTool, "child_tool") diff --git a/deployment/ecr/gaab-strands-common/test/custom_tools/test_decorators.py b/deployment/ecr/gaab-strands-common/test/custom_tools/test_decorators.py new file mode 100644 index 00000000..9489ca26 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/custom_tools/test_decorators.py @@ -0,0 +1,99 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os + +import pytest +from gaab_strands_common.custom_tools.setup.decorators import auto_attach_when, custom_tool, requires +from gaab_strands_common.custom_tools.setup.metadata import ToolCategory +from gaab_strands_common.custom_tools.setup.registry import CustomToolsRegistry + + +class MockTool: + """Mock tool class for testing""" + + def __init__(self, config=None, region=None): + self.config = config or {} + self.region = region or "us-east-1" + + +def test_custom_tool_decorator(): + """Test @custom_tool decorator""" + # Clear registry before test + CustomToolsRegistry.clear() + + @custom_tool( + tool_id="test-tool", + name="Test Tool", + description="A test tool", + category=ToolCategory.GENERAL, + version="1.0.0", + ) + class TestTool(MockTool): + pass + + # Check that tool was registered + assert "test-tool" in CustomToolsRegistry.list_tool_ids() + registered_tool = CustomToolsRegistry.get_tool("test-tool") + assert registered_tool == TestTool + + # Check metadata was set + assert hasattr(TestTool, "metadata") + assert TestTool.metadata.tool_id == "test-tool" + assert TestTool.metadata.name == "Test Tool" + assert TestTool.metadata.description == "A test tool" + assert TestTool.metadata.category == ToolCategory.GENERAL + assert TestTool.metadata.version == "1.0.0" + + +def test_custom_tool_decorator_without_category(): + """Test @custom_tool decorator without category""" + # Clear registry before test + CustomToolsRegistry.clear() + + @custom_tool(tool_id="test-tool-2", name="Test Tool 2", description="Another test tool") + class TestTool2(MockTool): + pass + + # Check metadata was set with default category + assert hasattr(TestTool2, "metadata") + assert TestTool2.metadata.category == ToolCategory.GENERAL + + +def test_requires_decorator(): + """Test @requires decorator""" + + @requires(env_vars=["TEST_ENV_VAR"], config_params=["Test.Param"]) + class TestTool(MockTool): + pass + + # Check requirements were set + assert hasattr(TestTool, "_requirements") + assert TestTool._requirements.env_vars == ["TEST_ENV_VAR"] + assert TestTool._requirements.config_params == ["Test.Param"] + + +def test_requires_decorator_empty(): + """Test @requires decorator with empty requirements""" + + @requires() + class TestTool(MockTool): + pass + + assert hasattr(TestTool, "_requirements") + assert TestTool._requirements.env_vars == [] + assert TestTool._requirements.config_params == [] + + +def test_auto_attach_when_decorator(): + """Test @auto_attach_when decorator""" + + def test_condition(config): + return True + + @auto_attach_when(test_condition) + class TestTool(MockTool): + pass + + # Check auto condition was set + assert hasattr(TestTool, "_auto_condition") + assert TestTool._auto_condition == test_condition diff --git a/deployment/ecr/gaab-strands-common/test/custom_tools/test_metadata.py b/deployment/ecr/gaab-strands-common/test/custom_tools/test_metadata.py new file mode 100644 index 00000000..5ff769e7 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/custom_tools/test_metadata.py @@ -0,0 +1,58 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import pytest +from gaab_strands_common.custom_tools.setup.metadata import ToolCategory, ToolMetadata, ToolRequirements + + +def test_tool_metadata_creation(): + """Test ToolMetadata creation with all parameters""" + metadata = ToolMetadata( + tool_id="test-tool", name="Test Tool", description="A test tool", category=ToolCategory.GENERAL, version="1.0.0" + ) + + assert metadata.tool_id == "test-tool" + assert metadata.name == "Test Tool" + assert metadata.description == "A test tool" + assert metadata.category == ToolCategory.GENERAL + assert metadata.version == "1.0.0" + + +def test_tool_metadata_default_version(): + """Test ToolMetadata creation with default version""" + metadata = ToolMetadata(tool_id="test-tool", name="Test Tool", description="A test tool") + + assert metadata.tool_id == "test-tool" + assert metadata.name == "Test Tool" + assert metadata.description == "A test tool" + assert metadata.category == ToolCategory.GENERAL + assert metadata.version == "1.0.0" + + +def test_tool_requirements_creation(): + """Test ToolRequirements creation with all parameters""" + requirements = ToolRequirements(env_vars=["VAR1", "VAR2"], config_params=["Param1", "Param2"]) + + assert requirements.env_vars == ["VAR1", "VAR2"] + assert requirements.config_params == ["Param1", "Param2"] + + +def test_tool_requirements_default_values(): + """Test ToolRequirements creation with default values""" + requirements = ToolRequirements() + + assert requirements.env_vars == [] + assert requirements.config_params == [] + + +def test_tool_requirements_separate_instances(): + """Test that each ToolRequirements instance gets its own lists""" + req1 = ToolRequirements() + req2 = ToolRequirements() + + # Modify one instance + req1.env_vars.append("TEST_VAR") + + # Verify the other instance is not affected + assert req1.env_vars == ["TEST_VAR"] + assert req2.env_vars == [] + assert req1.env_vars is not req2.env_vars diff --git a/deployment/ecr/gaab-strands-common/test/custom_tools/test_registry.py b/deployment/ecr/gaab-strands-common/test/custom_tools/test_registry.py new file mode 100644 index 00000000..5186f313 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/custom_tools/test_registry.py @@ -0,0 +1,122 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import pytest +from gaab_strands_common.custom_tools.setup.registry import CustomToolsRegistry + + +def create_mock_tool_class(tool_id="mock-tool", name=None): + """Factory function to create mock tool classes""" + if name is None: + name = f"Mock Tool {tool_id}" + + metadata_class = type("metadata", (), {"tool_id": tool_id, "name": name}) + MockTool = type("MockTool", (), {"metadata": metadata_class}) + + return MockTool + + +@pytest.fixture(autouse=True) +def clear_registry(): + """Clear registry before each test""" + CustomToolsRegistry.clear() + + +def test_register_tool(): + """Test registering a tool""" + tool_class = create_mock_tool_class() + registered_tool = CustomToolsRegistry.register(tool_class) + + # Check that the tool was registered and returned + assert registered_tool == tool_class + assert CustomToolsRegistry.get_tool("mock-tool") == tool_class + assert "mock-tool" in CustomToolsRegistry.list_tool_ids() + + +def test_register_duplicate_tool(): + """Test registering a duplicate tool raises ValueError""" + tool_class1 = create_mock_tool_class() + tool_class2 = create_mock_tool_class() # Same tool_id + + # Register first tool + CustomToolsRegistry.register(tool_class1) + + # Try to register second tool with same ID + with pytest.raises(ValueError, match="Tool ID 'mock-tool' already registered"): + CustomToolsRegistry.register(tool_class2) + + +def test_get_tool(): + """Test getting a registered tool""" + tool_class = create_mock_tool_class() + CustomToolsRegistry.register(tool_class) + + # Get the tool + retrieved_tool = CustomToolsRegistry.get_tool("mock-tool") + assert retrieved_tool == tool_class + + # Try to get non-existent tool + non_existent_tool = CustomToolsRegistry.get_tool("non-existent") + assert non_existent_tool is None + + +def test_get_all_tools(): + """Test getting all registered tools""" + # Register multiple tools + tool1 = create_mock_tool_class("tool-1") + tool2 = create_mock_tool_class("tool-2") + CustomToolsRegistry.register(tool1) + CustomToolsRegistry.register(tool2) + + all_tools = CustomToolsRegistry.get_all_tools() + assert len(all_tools) == 2 + assert all_tools["tool-1"] == tool1 + assert all_tools["tool-2"] == tool2 + + +def test_list_tool_ids(): + """Test listing all tool IDs""" + # Register multiple tools + tool1 = create_mock_tool_class("tool-1") + tool2 = create_mock_tool_class("tool-2") + CustomToolsRegistry.register(tool1) + CustomToolsRegistry.register(tool2) + + tool_ids = CustomToolsRegistry.list_tool_ids() + assert len(tool_ids) == 2 + assert "tool-1" in tool_ids + assert "tool-2" in tool_ids + + +def test_clear_registry(): + """Test clearing the registry""" + # Register a tool + tool_class = create_mock_tool_class() + CustomToolsRegistry.register(tool_class) + + # Verify tool is registered + assert len(CustomToolsRegistry.list_tool_ids()) == 1 + + # Clear registry + CustomToolsRegistry.clear() + + # Verify registry is empty + assert len(CustomToolsRegistry.list_tool_ids()) == 0 + assert CustomToolsRegistry.get_all_tools() == {} + + +def test_register_duplicate_tool_name(): + """Test registering tools with duplicate names (deduplication)""" + # Register first tool + tool_class1 = create_mock_tool_class("tool-1", "Duplicate Name") + CustomToolsRegistry.register(tool_class1) + + # Register second tool with same name but different ID + tool_class2 = create_mock_tool_class("tool-2", "Duplicate Name") + CustomToolsRegistry.register(tool_class2) + + # Verify only the first tool is registered + assert len(CustomToolsRegistry.list_tool_ids()) == 1 + assert "tool-1" in CustomToolsRegistry.list_tool_ids() + assert "tool-2" not in CustomToolsRegistry.list_tool_ids() + assert CustomToolsRegistry.get_tool("tool-1") == tool_class1 + assert CustomToolsRegistry.get_tool("tool-2") is None diff --git a/deployment/ecr/gaab-strands-common/test/custom_tools/test_s3_file_reader.py b/deployment/ecr/gaab-strands-common/test/custom_tools/test_s3_file_reader.py new file mode 100644 index 00000000..0a2bdc6a --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/custom_tools/test_s3_file_reader.py @@ -0,0 +1,368 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os +from unittest.mock import Mock, patch + +import boto3 +import pytest +from botocore.exceptions import ClientError +from gaab_strands_common.custom_tools.s3_file_reader import S3FileReaderTool +from moto import mock_aws + + +@pytest.fixture(autouse=True) +def setup_environment(mock_environment): + """Setup environment variables for all tests""" + with patch.dict( + os.environ, + { + "MULTIMODAL_DATA_BUCKET": "test-bucket", + "MULTIMODAL_METADATA_TABLE_NAME": "test-table", + "USE_CASE_UUID": "test-use-case-uuid", + }, + clear=False, + ): + yield + + +@pytest.fixture +def sample_config(): + """Sample configuration for testing""" + return {"LlmParams": {"MultimodalParams": {"MultimodalEnabled": True}}} + + +@pytest.fixture +def tool(sample_config): + """Create S3FileReaderTool instance for testing""" + return S3FileReaderTool(sample_config, "us-east-1") + + +def test_successful_initialization(tool): + """Test successful initialization with all required environment variables""" + assert tool.bucket_name == "test-bucket" + assert tool.metadata_table_name == "test-table" + assert tool.use_case_uuid == "test-use-case-uuid" + assert tool.region == "us-east-1" + assert tool.s3_client is not None + + +def test_initialization_missing_env_vars(sample_config): + """Test initialization fails with missing environment variables""" + with patch.dict(os.environ, {}, clear=True): + with pytest.raises(ValueError, match="Missing required environment variables"): + S3FileReaderTool(sample_config, "us-east-1") + + +def test_get_file_extension(tool): + """Test file extension extraction from S3 keys""" + # Standard cases + assert tool.get_file_extension("document.pdf") == "pdf" + assert tool.get_file_extension("data.csv") == "csv" + + # Complex paths + assert tool.get_file_extension("folder/subfolder/file.txt") == "txt" + assert tool.get_file_extension("file.with.dots.xlsx") == "xlsx" + + # Edge cases + assert tool.get_file_extension("no_extension") == "unsupported" + assert tool.get_file_extension("") == "unsupported" + + +def test_determine_file_type(tool): + """Test file type determination based on extension""" + assert tool.determine_file_type("png") == "image" + assert tool.determine_file_type("jpg") == "image" + assert tool.determine_file_type("gif") == "image" + assert tool.determine_file_type("webp") == "image" + + # Document formats + assert tool.determine_file_type("pdf") == "document" + assert tool.determine_file_type("txt") == "document" + assert tool.determine_file_type("md") == "document" + assert tool.determine_file_type("csv") == "document" + assert tool.determine_file_type("docx") == "document" + + # Unsupported format + with pytest.raises(ValueError, match="Unsupported file type"): + tool.determine_file_type("exe") + + +@mock_aws +def test_image_processing_png(tool): + """Test successful PNG image processing""" + filename = "11111111-11111111-11111111-11111111-111111111111.png" + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="test-bucket") + + test_content = b"fake png image content" + s3.put_object(Bucket="test-bucket", Key=filename, Body=test_content) + + # Replace tool's S3 client with mocked one + tool.s3_client = s3 + + tool_use = {"toolUseId": "test-png", "input": {"s3_key": filename}} + result = tool.s3_file_reader(tool_use) + + # Verify ToolResult structure + assert result["toolUseId"] == "test-png" + assert result["status"] == "success" + assert len(result["content"]) == 1 + + # Verify image content + image_content = result["content"][0]["image"] + assert image_content["format"] == "png" + assert image_content["source"]["bytes"] == test_content + + +@mock_aws +def test_image_processing_jpg_normalization(tool): + """Test JPG image processing with JPEG normalization""" + filename = "11111111-11111111-11111111-11111111-111111111111.jpg" + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="test-bucket") + + test_content = b"fake jpg image content" + s3.put_object(Bucket="test-bucket", Key=filename, Body=test_content) + + tool.s3_client = s3 + + tool_use = {"toolUseId": "test-jpg", "input": {"s3_key": filename}} + result = tool.s3_file_reader(tool_use) + + # Verify JPG is normalized to JPEG + assert result["status"] == "success" + assert result["content"][0]["image"]["format"] == "jpeg" # Normalized + assert result["content"][0]["image"]["source"]["bytes"] == test_content + + +@mock_aws +def test_no_normalization_for_valid_formats(tool): + """Test document processing with format normalization""" + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="test-bucket") + + tool.s3_client = s3 + filename = "11111111-11111111-11111111-11111111-111111111111.csv" + test_content = b"fake document content" + s3.put_object(Bucket="test-bucket", Key=filename, Body=test_content) + + tool_use = {"toolUseId": f"test-csv", "input": {"s3_key": filename}} + result = tool.s3_file_reader(tool_use) + + assert result["status"] == "success" + assert result["content"][0]["document"]["format"] == "csv" + + +@mock_aws +def test_document_processing_pdf(tool): + """Test successful PDF document processing""" + filename = "11111111-11111111-11111111-11111111-111111111111.pdf" + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="test-bucket") + + test_content = b"fake pdf document content" + s3.put_object(Bucket="test-bucket", Key=filename, Body=test_content) + + tool.s3_client = s3 + + tool_use = {"toolUseId": "test-pdf", "input": {"s3_key": filename}} + result = tool.s3_file_reader(tool_use) + + # Verify ToolResult structure + assert result["toolUseId"] == "test-pdf" + assert result["status"] == "success" + assert len(result["content"]) == 1 + + # Verify document content + document_content = result["content"][0]["document"] + assert document_content["format"] == "pdf" + assert document_content["name"] == "11111111-11111111-11111111-11111111-111111111111" # Filename without extension + assert document_content["source"]["bytes"] == test_content + + +def test_missing_s3_key(tool): + """Test error when s3_key is missing from input""" + tool_use = {"toolUseId": "test-missing", "input": {}} + result = tool.s3_file_reader(tool_use) + + assert result["toolUseId"] == "test-missing" + assert result["status"] == "error" + assert "S3 key is required" in result["content"][0]["text"] + + +def test_empty_s3_key(tool): + """Test error when s3_key is empty""" + tool_use = {"toolUseId": "test-empty", "input": {"s3_key": ""}} + result = tool.s3_file_reader(tool_use) + + assert result["status"] == "error" + assert "S3 key cannot be empty" in result["content"][0]["text"] + + +def test_s3_uri_rejected(tool): + """Test that S3 URIs are rejected""" + tool_use = {"toolUseId": "test-uri", "input": {"s3_key": "s3://bucket/key.txt"}} + result = tool.s3_file_reader(tool_use) + + assert result["status"] == "error" + assert "Invalid input" in result["content"][0]["text"] + assert "Please provide only the S3 key" in result["content"][0]["text"] + + +@mock_aws +def test_unsupported_file_format(tool): + """Test error for unsupported file formats""" + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="test-bucket") + + test_content = b"fake executable content" + s3.put_object(Bucket="test-bucket", Key="program.exe", Body=test_content) + + tool.s3_client = s3 + + tool_use = {"toolUseId": "test-unsupported", "input": {"s3_key": "program.exe"}} + result = tool.s3_file_reader(tool_use) + + assert result["status"] == "error" + assert "Unsupported file type" in result["content"][0]["text"] + assert "program.exe" in result["content"][0]["text"] + + +@mock_aws +def test_file_not_found(tool): + """Test error when file doesn't exist in S3""" + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="test-bucket") + + tool.s3_client = s3 + + tool_use = {"toolUseId": "test-not-found", "input": {"s3_key": "nonexistent.txt"}} + result = tool.s3_file_reader(tool_use) + + assert result["status"] == "error" + assert "not found" in result["content"][0]["text"] + assert "nonexistent.txt" in result["content"][0]["text"] + + +def test_aws_access_denied(tool): + """Test handling of AWS access denied errors""" + + def mock_get_object(**kwargs): + raise ClientError( + error_response={"Error": {"Code": "AccessDenied", "Message": "Access Denied"}}, operation_name="GetObject" + ) + + with patch.object(tool.s3_client, "get_object", side_effect=mock_get_object): + tool_use = {"toolUseId": "test-access", "input": {"s3_key": "restricted.txt"}} + result = tool.s3_file_reader(tool_use) + + assert result["status"] == "error" + assert result["content"][0]["text"] == "File 'restricted.txt' not found. The file may have been deleted or moved." + + +def test_malformed_tool_use_missing_tool_use_id(tool): + """Test handling of malformed ToolUse objects missing toolUseId""" + result = tool.s3_file_reader({"input": {"s3_key": "test.txt"}}) + + assert result["status"] == "error" + assert result["toolUseId"] == "unknown" + assert "Unexpected error" in result["content"][0]["text"] + + +def test_malformed_tool_use_missing_input(tool): + """Test handling of malformed ToolUse objects missing input""" + result = tool.s3_file_reader({"toolUseId": "test-id"}) + + assert result["status"] == "error" + assert result["toolUseId"] == "test-id" + assert "Unexpected error" in result["content"][0]["text"] + + +@mock_aws +def test_bedrock_compliance_document_names(tool): + """Test that document names are Bedrock-compliant (no file extensions)""" + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="test-bucket") + + tool.s3_client = s3 + + # Test various filenames to ensure Bedrock compliance + test_files = [ + ("invoice.pdf", "invoice"), + ("data-2024.csv", "data-2024"), + ("file_with_underscores.txt", "file_with_underscores"), + ] + + for filename, expected_name in test_files: + test_content = b"bedrock compliance test content" + s3.put_object(Bucket="test-bucket", Key=filename, Body=test_content) + + tool_use = {"toolUseId": f"test-{filename}", "input": {"s3_key": filename}} + result = tool.s3_file_reader(tool_use) + + assert result["status"] == "success" + document_name = result["content"][0]["document"]["name"] + + # No file extension, allowed characters only, no periods in the end + assert document_name == expected_name + assert not document_name.endswith(f".{filename.split('.')[-1]}") + assert not document_name.endswith(".") + + +@mock_aws +def test_full_document_s3_key_structure(tool): + """Test processing files with complex S3 key structures""" + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="test-bucket") + + tool.s3_client = s3 + + # Test with current S3 key structure containing UUIDs - for documents + complex_key = "11111111-11111111-11111111-11111111-111111111111/22222222-22222222-22222222-22222222-222222222222/33333333-33333333-33333333-33333333-333333333333/44444444-44444444-44444444-44444444-444444444444/55555555-55555555-55555555-55555555-555555555555.pdf" + test_content = b"complex path document content" + s3.put_object(Bucket="test-bucket", Key=complex_key, Body=test_content) + + tool_use = {"toolUseId": "test-complex", "input": {"s3_key": complex_key}} + result = tool.s3_file_reader(tool_use) + + assert result["status"] == "success" + assert result["content"] == [ + { + "document": { + "format": "pdf", + "name": "55555555-55555555-55555555-55555555-555555555555", + "source": { + "bytes": b"complex path document content", + }, + }, + }, + ] + + +@mock_aws +def test_full_image_s3_key_structure(tool): + """Test processing files with complex S3 key structures""" + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="test-bucket") + + tool.s3_client = s3 + + # Test with current S3 key structure containing UUIDs - for images + complex_key = "11111111-11111111-11111111-11111111-111111111111/22222222-22222222-22222222-22222222-222222222222/33333333-33333333-33333333-33333333-333333333333/44444444-44444444-44444444-44444444-444444444444/66666666-66666666-66666666-66666666-666666666666.png" + test_content = b"complex path image content" + s3.put_object(Bucket="test-bucket", Key=complex_key, Body=test_content) + + tool_use = {"toolUseId": "test-complex", "input": {"s3_key": complex_key}} + result = tool.s3_file_reader(tool_use) + + assert result["status"] == "success" + assert result["content"] == [ + { + "image": { + "format": "png", + "source": { + "bytes": b"complex path image content", + }, + }, + }, + ] diff --git a/deployment/ecr/gaab-strands-common/test/multimodal/test_file_handler.py b/deployment/ecr/gaab-strands-common/test/multimodal/test_file_handler.py new file mode 100644 index 00000000..52a40963 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/multimodal/test_file_handler.py @@ -0,0 +1,657 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os +import time +from unittest.mock import patch + +import boto3 +import pytest +from gaab_strands_common.models import FileReference +from gaab_strands_common.multimodal.file_handler import FileHandler +from gaab_strands_common.utils.constants import ( + MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR, + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR, + USE_CASE_UUID, + FileStatus, +) +from moto import mock_aws + + +@pytest.fixture(autouse=True) +def setup_environment(mock_environment): + """Setup environment variables for all tests""" + with patch.dict( + os.environ, + { + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR: "test-metadata-table", + MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR: "test-bucket", + USE_CASE_UUID: "test-use-case-id", + }, + clear=False, + ): + yield + + +@pytest.fixture +def file_handler(setup_environment): + """Create a FileHandler instance for testing""" + return FileHandler("us-east-1") + + +def test_init_success(): + """Test successful initialization""" + with patch.dict( + os.environ, + { + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR: "test-table", + MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR: "test-bucket", + }, + ): + handler = FileHandler("us-east-1") + + assert handler.region == "us-east-1" + assert handler.metadata_table_name == "test-table" + assert handler.bucket_name == "test-bucket" + + +@mock_aws +def test_validate_all_files_empty_files(file_handler): + """Test validate_all_files with empty files list""" + payload = {"files": [], "requestContext": {"authorizer": {"UserId": "test-user-id"}}} + + result = file_handler.validate_all_files(payload) + + assert result == [] + + +@mock_aws +def test_validate_all_files_no_files_key(file_handler): + """Test validate_all_files with no files key""" + payload = {"requestContext": {"authorizer": {"UserId": "test-user-id"}}} + + result = file_handler.validate_all_files(payload) + + assert result == [] + + +@mock_aws +def test_validate_all_files_invalid_file_data(file_handler): + """Test validate_all_files with invalid file data""" + payload = { + "files": [ + {"invalid": "data"}, # Missing required keys + "not_a_dict", # Not a dict + ], + "requestContext": {"authorizer": {"UserId": "test-user-id"}}, + } + + result = file_handler.validate_all_files(payload) + + assert result == [] + + +@mock_aws +def test_validate_all_files_valid_files(): + """Test validate_all_files with valid files""" + # Setup DynamoDB mock + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + table = dynamodb.create_table( + TableName="test-metadata-table", + KeySchema=[ + {"AttributeName": "fileKey", "KeyType": "HASH"}, + {"AttributeName": "fileName", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "fileKey", "AttributeType": "S"}, + {"AttributeName": "fileName", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + ) + + # Create FileHandler after table is created + file_handler = FileHandler("us-east-1") + + # Add a valid file entry with more than 1 hour remaining + table.put_item( + Item={ + "fileKey": "test-use-case-id/test-user-id/test-conversation-id/test-message-id", + "fileName": "test-file.txt", + "status": FileStatus.UPLOADED, + "ttl": int(time.time()) + 7200, # 2 hours from now + "uploadTimestamp": int(time.time() * 1000), # Current time in milliseconds + } + ) + + payload = { + "files": [{"fileReference": "test-file-ref", "fileName": "test-file.txt"}], + "conversationId": "test-conversation-id", + "messageId": "test-message-id", + "userId": "test-user-id", + } + + result = file_handler.validate_all_files(payload) + + assert len(result) == 1 + expected_s3_key = "test-use-case-id/test-user-id/test-conversation-id/test-message-id/test-file-ref" + assert result[0]["text"] == f"File available for reading: test-file.txt with S3 key '{expected_s3_key}'" + + +@mock_aws +def test_validate_all_files_expired_file(file_handler): + """Test validate_all_files with expired file""" + # Setup DynamoDB mock + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + table = dynamodb.create_table( + TableName="test-metadata-table", + KeySchema=[ + {"AttributeName": "fileKey", "KeyType": "HASH"}, + {"AttributeName": "fileName", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "fileKey", "AttributeType": "S"}, + {"AttributeName": "fileName", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + ) + + # Add an expired file entry + table.put_item( + Item={ + "fileKey": "test-use-case-id/test-user-id/test-conversation-id/test-message-id", + "fileName": "expired-file.txt", + "status": FileStatus.UPLOADED, + "ttl": int(time.time()) - 3600, # 1 hour ago + } + ) + + payload = { + "files": [{"fileReference": "expired-file-ref", "fileName": "expired-file.txt"}], + "conversationId": "test-conversation-id", + "messageId": "test-message-id", + "userId": "test-user-id", + } + + result = file_handler.validate_all_files(payload) + + assert len(result) == 1 + assert result[0]["text"] == "File expired-file.txt is not available. It was either deleted or it has expired." + + +@mock_aws +def test_validate_all_files_expiring_soon_file(): + """Test validate_all_files with file expiring in less than 1 hour""" + # Setup DynamoDB mock + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + table = dynamodb.create_table( + TableName="test-metadata-table", + KeySchema=[ + {"AttributeName": "fileKey", "KeyType": "HASH"}, + {"AttributeName": "fileName", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "fileKey", "AttributeType": "S"}, + {"AttributeName": "fileName", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + ) + + # Create FileHandler after table is created + file_handler = FileHandler("us-east-1") + + # Add a file expiring in 30 minutes + table.put_item( + Item={ + "fileKey": "test-use-case-id/test-user-id/test-conversation-id/test-message-id", + "fileName": "expiring-file.txt", + "status": FileStatus.UPLOADED, + "ttl": int(time.time()) + 1800, # 30 minutes from now + } + ) + + payload = { + "files": [{"fileReference": "expiring-file-ref", "fileName": "expiring-file.txt"}], + "conversationId": "test-conversation-id", + "messageId": "test-message-id", + "userId": "test-user-id", + } + + result = file_handler.validate_all_files(payload) + + assert len(result) == 1 + assert result[0]["text"] == "File expiring-file.txt is not available. It was either deleted or it has expired." + + +@mock_aws +def test_validate_single_file_not_found(): + """Test _validate_single_file with file not found in metadata""" + # Setup DynamoDB mock + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-metadata-table", + KeySchema=[ + {"AttributeName": "fileKey", "KeyType": "HASH"}, + {"AttributeName": "fileName", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "fileKey", "AttributeType": "S"}, + {"AttributeName": "fileName", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + ) + + # Create FileHandler after table is created + file_handler = FileHandler("us-east-1") + + file_ref = FileReference(fileReference="test-ref", fileName="non-existent-file.txt") + + result = file_handler._validate_single_file( + file_ref, "test-use-case-id", "test-user-id", "test-conversation-id", "test-message-id" + ) + + assert result["is_valid"] is False + assert result["reason"] == "No metadata found" + + +@mock_aws +def test_validate_single_file_deleted_status(): + """Test _validate_single_file with deleted file status""" + # Setup DynamoDB mock + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + table = dynamodb.create_table( + TableName="test-metadata-table", + KeySchema=[ + {"AttributeName": "fileKey", "KeyType": "HASH"}, + {"AttributeName": "fileName", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "fileKey", "AttributeType": "S"}, + {"AttributeName": "fileName", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + ) + + # Create FileHandler after table is created + file_handler = FileHandler("us-east-1") + + # Add a deleted file entry + table.put_item( + Item={ + "fileKey": "test-use-case-id/test-user-id/test-conversation-id/test-message-id", + "fileName": "deleted-file.txt", + "status": FileStatus.DELETED, + "ttl": int(time.time()) + 3600, + } + ) + + file_ref = FileReference(fileReference="test-ref", fileName="deleted-file.txt") + + result = file_handler._validate_single_file( + file_ref, "test-use-case-id", "test-user-id", "test-conversation-id", "test-message-id" + ) + + assert result["is_valid"] is False + assert result["reason"] == "File has been deleted." + + +@mock_aws +def test_validate_single_file_invalid_status(): + """Test _validate_single_file with invalid file status""" + # Setup DynamoDB mock + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + table = dynamodb.create_table( + TableName="test-metadata-table", + KeySchema=[ + {"AttributeName": "fileKey", "KeyType": "HASH"}, + {"AttributeName": "fileName", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "fileKey", "AttributeType": "S"}, + {"AttributeName": "fileName", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + ) + + # Create FileHandler after table is created + file_handler = FileHandler("us-east-1") + + # Add an invalid file entry + table.put_item( + Item={ + "fileKey": "test-use-case-id/test-user-id/test-conversation-id/test-message-id", + "fileName": "invalid-file.txt", + "status": FileStatus.INVALID, + "ttl": int(time.time()) + 7200, + } + ) + + file_ref = FileReference(fileReference="test-ref", fileName="invalid-file.txt") + + result = file_handler._validate_single_file( + file_ref, "test-use-case-id", "test-user-id", "test-conversation-id", "test-message-id" + ) + + assert result["is_valid"] is False + assert result["reason"] == "File is not available for use due to constraint violations." + + +@mock_aws +def test_validate_single_file_pending_then_uploaded(): + """Test _validate_single_file with file that starts pending then becomes uploaded""" + # Setup DynamoDB mock + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + table = dynamodb.create_table( + TableName="test-metadata-table", + KeySchema=[ + {"AttributeName": "fileKey", "KeyType": "HASH"}, + {"AttributeName": "fileName", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "fileKey", "AttributeType": "S"}, + {"AttributeName": "fileName", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + ) + + # Create FileHandler after table is created + file_handler = FileHandler("us-east-1") + + file_ref = FileReference(fileReference="test-ref", fileName="pending-file.txt") + file_key = "test-use-case-id/test-user-id/test-conversation-id/test-message-id" + + # Mock the get_item response to simulate pending then uploaded + call_count = 0 + + def mock_get_item(**kwargs): + nonlocal call_count + call_count += 1 + + if call_count == 1: + # First call - file is pending + return { + "Item": { + "fileKey": file_key, + "fileName": "pending-file.txt", + "status": FileStatus.PENDING, + "ttl": int(time.time()) + 3600, + } + } + else: + # Second call - file is uploaded + return { + "Item": { + "fileKey": file_key, + "fileName": "pending-file.txt", + "status": FileStatus.UPLOADED, + "ttl": int(time.time()) + 3600, # More than 1 hour remaining + } + } + + with patch.object(file_handler.metadata_table, "get_item", side_effect=mock_get_item): + result = file_handler._validate_single_file( + file_ref, "test-use-case-id", "test-user-id", "test-conversation-id", "test-message-id" + ) + + assert result["is_valid"] is True + assert "s3_key" in result + + +def test_generate_s3_key(file_handler): + """Test _generate_s3_key method""" + result = file_handler._generate_s3_key("usecase-123", "user-456", "conversation-789", "message-012", "file-ref-345") + + expected = "usecase-123/user-456/conversation-789/message-012/file-ref-345" + assert result == expected + + +def test_generate_metadata_key(file_handler): + """Test _generate_metadata_key method""" + result = file_handler._generate_metadata_key("usecase-123", "user-456", "conversation-789", "message-012") + + expected = "usecase-123/user-456/conversation-789/message-012" + assert result == expected + + +@mock_aws +def test_validate_single_file_expiring_soon(): + """Test _validate_single_file with file expiring in less than 1 hour""" + # Setup DynamoDB mock + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + table = dynamodb.create_table( + TableName="test-metadata-table", + KeySchema=[ + {"AttributeName": "fileKey", "KeyType": "HASH"}, + {"AttributeName": "fileName", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "fileKey", "AttributeType": "S"}, + {"AttributeName": "fileName", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + ) + + # Create FileHandler after table is created + file_handler = FileHandler("us-east-1") + + # Add a file that expires in 30 minutes (less than 1 hour) + table.put_item( + Item={ + "fileKey": "test-use-case-id/test-user-id/test-conversation-id/test-message-id", + "fileName": "expiring-soon-file.txt", + "status": FileStatus.UPLOADED, + "ttl": int(time.time()) + 1800, # 30 minutes from now + } + ) + + file_ref = FileReference(fileReference="test-ref", fileName="expiring-soon-file.txt") + + result = file_handler._validate_single_file( + file_ref, "test-use-case-id", "test-user-id", "test-conversation-id", "test-message-id" + ) + + assert result["is_valid"] is False + assert result["reason"].startswith("File expires in") + assert "seconds (less than 1 hour)" in result["reason"] + + +@mock_aws +def test_validate_single_file_valid_with_time_remaining(): + """Test _validate_single_file with file that has more than 1 hour remaining""" + # Setup DynamoDB mock + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + table = dynamodb.create_table( + TableName="test-metadata-table", + KeySchema=[ + {"AttributeName": "fileKey", "KeyType": "HASH"}, + {"AttributeName": "fileName", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "fileKey", "AttributeType": "S"}, + {"AttributeName": "fileName", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + ) + + # Create FileHandler after table is created + file_handler = FileHandler("us-east-1") + + # Add a file that expires in 2 hours (more than 1 hour) + table.put_item( + Item={ + "fileKey": "test-use-case-id/test-user-id/test-conversation-id/test-message-id", + "fileName": "valid-file.txt", + "status": FileStatus.UPLOADED, + "ttl": int(time.time()) + 7200, # 2 hours from now + "uploadTimestamp": int(time.time() * 1000), # Current time in milliseconds + } + ) + + file_ref = FileReference(fileReference="test-ref", fileName="valid-file.txt") + + result = file_handler._validate_single_file( + file_ref, "test-use-case-id", "test-user-id", "test-conversation-id", "test-message-id" + ) + + assert result["is_valid"] is True + assert "s3_key" in result + assert "reason" not in result + + +@mock_aws +def test_validate_all_files_with_deleted_file(): + """Test validate_all_files returns informational message for deleted files (no S3 key)""" + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + table = dynamodb.create_table( + TableName="test-metadata-table", + KeySchema=[ + {"AttributeName": "fileKey", "KeyType": "HASH"}, + {"AttributeName": "fileName", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "fileKey", "AttributeType": "S"}, + {"AttributeName": "fileName", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + ) + + test_file_handler = FileHandler("us-east-1") + + table.put_item( + Item={ + "fileKey": "test-use-case-id/test-user-id/test-conversation-id/test-message-id", + "fileName": "deleted-file.jpg", + "status": FileStatus.DELETED, + "ttl": int(time.time()) + 3600, + } + ) + + payload = { + "files": [{"fileReference": "deleted-file-ref", "fileName": "deleted-file.jpg"}], + "conversationId": "test-conversation-id", + "messageId": "test-message-id", + "userId": "test-user-id", + } + + result = test_file_handler.validate_all_files(payload) + + # Should return a content block with informational message + assert len(result) == 1 + assert result[0]["text"] == "File deleted-file.jpg is not available. It was either deleted or it has expired." + + +@mock_aws +def test_validate_all_files_mixed_valid_and_invalid(): + """Test validate_all_files with mix of valid and invalid files""" + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + table = dynamodb.create_table( + TableName="test-metadata-table", + KeySchema=[ + {"AttributeName": "fileKey", "KeyType": "HASH"}, + {"AttributeName": "fileName", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "fileKey", "AttributeType": "S"}, + {"AttributeName": "fileName", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + ) + + test_file_handler = FileHandler("us-east-1") + + # Add valid file + table.put_item( + Item={ + "fileKey": "test-use-case-id/test-user-id/test-conversation-id/test-message-id", + "fileName": "valid-file.jpg", + "status": FileStatus.UPLOADED, + "ttl": int(time.time()) + 7200, + } + ) + + # Add deleted file + table.put_item( + Item={ + "fileKey": "test-use-case-id/test-user-id/test-conversation-id/test-message-id", + "fileName": "deleted-file.pdf", + "status": FileStatus.DELETED, + "ttl": int(time.time()) + 3600, + } + ) + + # Add invalid file + table.put_item( + Item={ + "fileKey": "test-use-case-id/test-user-id/test-conversation-id/test-message-id", + "fileName": "invalid-file.png", + "status": FileStatus.INVALID, + "ttl": int(time.time()) + 3600, + } + ) + + payload = { + "files": [ + {"fileReference": "valid-file-ref", "fileName": "valid-file.jpg"}, + {"fileReference": "deleted-file-ref", "fileName": "deleted-file.pdf"}, + {"fileReference": "invalid-file-ref", "fileName": "invalid-file.png"}, + ], + "conversationId": "test-conversation-id", + "messageId": "test-message-id", + "userId": "test-user-id", + } + + result = test_file_handler.validate_all_files(payload) + + # Should return content blocks for all 3 files + assert len(result) == 3 + + # Extract all text messages + result_texts = {r["text"] for r in result} + expected_s3_key = "test-use-case-id/test-user-id/test-conversation-id/test-message-id/valid-file-ref" + expected_messages = { + f"File available for reading: valid-file.jpg with S3 key '{expected_s3_key}'", + "File deleted-file.pdf is not available. It was either deleted or it has expired.", + "File invalid-file.png is not available. It was either deleted or it has expired.", + } + + assert result_texts == expected_messages + + +@mock_aws +def test_validate_all_files_expired_file_message(): + """Test validate_all_files returns informational message for expired files""" + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + table = dynamodb.create_table( + TableName="test-metadata-table", + KeySchema=[ + {"AttributeName": "fileKey", "KeyType": "HASH"}, + {"AttributeName": "fileName", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "fileKey", "AttributeType": "S"}, + {"AttributeName": "fileName", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + ) + + test_file_handler = FileHandler("us-east-1") + + # Add expired file (TTL in the past) + table.put_item( + Item={ + "fileKey": "test-use-case-id/test-user-id/test-conversation-id/test-message-id", + "fileName": "expired-file.jpg", + "status": FileStatus.UPLOADED, + "ttl": int(time.time()) - 3600, # 1 hour ago + } + ) + + payload = { + "files": [{"fileReference": "expired-file-ref", "fileName": "expired-file.jpg"}], + "conversationId": "test-conversation-id", + "messageId": "test-message-id", + "userId": "test-user-id", + } + + result = test_file_handler.validate_all_files(payload) + + assert len(result) == 1 + assert result[0]["text"] == "File expired-file.jpg is not available. It was either deleted or it has expired." diff --git a/deployment/ecr/gaab-strands-common/test/multimodal/test_multimodal_processor.py b/deployment/ecr/gaab-strands-common/test/multimodal/test_multimodal_processor.py new file mode 100644 index 00000000..d3633477 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/multimodal/test_multimodal_processor.py @@ -0,0 +1,214 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Tests for MultimodalRequestProcessor class +""" + +from unittest.mock import Mock, patch + +import pytest +from gaab_strands_common.models import UseCaseConfig +from gaab_strands_common.multimodal.multimodal_processor import MultimodalRequestProcessor + + +@pytest.fixture +def multimodal_processor(): + """Create a MultimodalRequestProcessor instance for testing""" + return MultimodalRequestProcessor("us-east-1") + + +def test_init_success(): + """Test successful initialization""" + processor = MultimodalRequestProcessor("us-west-2") + + assert processor.region == "us-west-2" + + +def test_process_multimodal_request_with_files(multimodal_processor): + """Test process_multimodal_request with files""" + payload = {"input": "Analyze this document", "files": [{"fileReference": "file-ref-1", "fileName": "document.pdf"}]} + + # Mock the FileHandler.validate_all_files method to return content blocks + mock_content_blocks = [{"text": "File available: document.pdf at s3://test-bucket/test-key"}] + + with patch("gaab_strands_common.multimodal.multimodal_processor.FileHandler") as mock_file_handler_class: + mock_file_handler_instance = Mock() + mock_file_handler_instance.validate_all_files.return_value = mock_content_blocks + mock_file_handler_class.return_value = mock_file_handler_instance + + result = multimodal_processor.process_multimodal_request(payload) + + # Should return content blocks + assert isinstance(result, list) + assert len(result) == 2 # User message + file content block + + assert result[0]["text"] == "Analyze this document" # User query comes first + assert "File available" in result[1]["text"] # File content block comes second + + +def test_process_multimodal_request_no_valid_files(multimodal_processor): + """Test process_multimodal_request when no valid files are found""" + payload = {"input": "Analyze this document", "files": [{"fileReference": "file-ref-1", "fileName": "document.pdf"}]} + + # Mock the FileHandler.validate_all_files method to return empty list + with patch("gaab_strands_common.multimodal.multimodal_processor.FileHandler") as mock_file_handler_class: + mock_file_handler_instance = Mock() + mock_file_handler_instance.validate_all_files.return_value = [] + mock_file_handler_class.return_value = mock_file_handler_instance + + # Should raise ValueError + with pytest.raises(ValueError, match="No files were provided"): + multimodal_processor.process_multimodal_request(payload) + + +def test_process_multimodal_request_file_handler_exception(multimodal_processor): + """Test process_multimodal_request when FileHandler throws an exception""" + payload = {"input": "Analyze this document", "files": [{"fileReference": "file-ref-1", "fileName": "document.pdf"}]} + + # Mock the FileHandler to throw an exception + with patch("gaab_strands_common.multimodal.multimodal_processor.FileHandler") as mock_file_handler_class: + mock_file_handler_class.side_effect = Exception("Test exception") + + # Should re-raise the exception + with pytest.raises(Exception, match="Test exception"): + multimodal_processor.process_multimodal_request(payload) + + +def test_process_multimodal_request_no_files(multimodal_processor): + """Test process_multimodal_request with no files""" + payload = {"input": "Hello, how are you?", "files": []} + + # Mock the FileHandler.validate_all_files method + with patch("gaab_strands_common.multimodal.multimodal_processor.FileHandler") as mock_file_handler_class: + mock_file_handler_instance = Mock() + mock_file_handler_instance.validate_all_files.return_value = [] + mock_file_handler_class.return_value = mock_file_handler_instance + + # Should raise ValueError when no valid files + with pytest.raises(ValueError, match="No files were provided"): + multimodal_processor.process_multimodal_request(payload) + + +def test_is_multimodal_enabled_true(): + """Test is_multimodal_enabled when multimodal is enabled""" + config_dict = { + "UseCaseName": "TestUseCase", + "UseCaseType": "Agent", + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 0.7, + "Streaming": True, + "Verbose": False, + "BedrockLlmParams": { + "ModelId": "amazon.nova-pro-v1:0", + "BedrockInferenceType": "QUICK_START", + }, + "ModelParams": {}, + "MultimodalParams": {"MultimodalEnabled": True}, + }, + } + + config = UseCaseConfig(**config_dict) + processor = MultimodalRequestProcessor("us-east-1") + + result = processor.is_multimodal_enabled(config) + + assert result is True + + +def test_is_multimodal_enabled_false(): + """Test is_multimodal_enabled when multimodal is disabled""" + config_dict = { + "UseCaseName": "TestUseCase", + "UseCaseType": "Agent", + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 0.7, + "Streaming": True, + "Verbose": False, + "BedrockLlmParams": { + "ModelId": "amazon.nova-pro-v1:0", + "BedrockInferenceType": "QUICK_START", + }, + "ModelParams": {}, + "MultimodalParams": {"MultimodalEnabled": False}, + }, + } + + config = UseCaseConfig(**config_dict) + processor = MultimodalRequestProcessor("us-east-1") + + result = processor.is_multimodal_enabled(config) + + assert result is False + + +def test_is_multimodal_enabled_missing_config(): + """Test is_multimodal_enabled when multimodal config is missing""" + config_dict = { + "UseCaseName": "TestUseCase", + "UseCaseType": "Agent", + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 0.7, + "Streaming": True, + "Verbose": False, + "BedrockLlmParams": { + "ModelId": "amazon.nova-pro-v1:0", + "BedrockInferenceType": "QUICK_START", + }, + "ModelParams": {}, + # No MultimodalParams + }, + } + + config = UseCaseConfig(**config_dict) + processor = MultimodalRequestProcessor("us-east-1") + + result = processor.is_multimodal_enabled(config) + + assert result is False + + +def test_has_files_true(): + """Test has_files when files are present""" + payload = {"files": [{"fileReference": "file-ref-1", "fileName": "document.pdf"}]} + + processor = MultimodalRequestProcessor("us-east-1") + result = processor.has_files(payload) + + assert result is True + + +def test_has_files_false(): + """Test has_files when no files are present""" + payload = {"files": []} + + processor = MultimodalRequestProcessor("us-east-1") + result = processor.has_files(payload) + + assert result is False + + +def test_has_files_missing_key(): + """Test has_files when files key is missing""" + payload = { + "input": "Hello" + # No files key + } + + processor = MultimodalRequestProcessor("us-east-1") + result = processor.has_files(payload) + + assert result is False + + +def test_has_files_not_list(): + """Test has_files when files is not a list""" + payload = {"files": "not-a-list"} + + processor = MultimodalRequestProcessor("us-east-1") + result = processor.has_files(payload) + + assert result is False diff --git a/deployment/ecr/gaab-strands-common/test/pytest_plugin.py b/deployment/ecr/gaab-strands-common/test/pytest_plugin.py new file mode 100644 index 00000000..27457861 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/pytest_plugin.py @@ -0,0 +1,29 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Pytest plugin to mock decorators before module imports +""" + +import sys +from unittest.mock import MagicMock + + +# Mock the requires_access_token decorator before any imports +def identity_decorator(**kwargs): + """Mock decorator that just returns the function unchanged""" + + def decorator(func): + return func + + return decorator + + +# Create mock module +mock_auth = MagicMock() +mock_auth.requires_access_token = identity_decorator + +# Inject into sys.modules before imports +sys.modules["bedrock_agentcore"] = MagicMock() +sys.modules["bedrock_agentcore.identity"] = MagicMock() +sys.modules["bedrock_agentcore.identity.auth"] = mock_auth diff --git a/deployment/ecr/gaab-strands-common/test/test_base_agent.py b/deployment/ecr/gaab-strands-common/test/test_base_agent.py new file mode 100644 index 00000000..09c417fa --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/test_base_agent.py @@ -0,0 +1,251 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Tests for BaseAgent class +""" + +import pytest +from unittest.mock import Mock, patch +from gaab_strands_common.base_agent import BaseAgent +from gaab_strands_common.models import LlmParams, BedrockLlmParams, UseCaseConfig + + +class TestBaseAgentInit: + """Tests for BaseAgent initialization""" + + def test_init_with_region(self): + """Test initialization with region""" + agent = BaseAgent("us-east-1") + assert agent.region == "us-east-1" + assert agent.config is None + + def test_init_with_different_region(self): + """Test initialization with different region""" + agent = BaseAgent("us-west-2") + assert agent.region == "us-west-2" + + +class TestCreateModel: + """Tests for _create_model method""" + + @patch("gaab_strands_common.base_agent.BedrockModel") + def test_create_model_quick_start(self, mock_bedrock_model): + """Test creating model with QUICK_START inference type""" + agent = BaseAgent("us-east-1") + + llm_params = LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams(ModelId="amazon.nova-pro-v1:0", BedrockInferenceType="QUICK_START"), + ModelParams={}, + ) + + agent._create_model(llm_params) + + # Verify BedrockModel was called with correct parameters + mock_bedrock_model.assert_called_once() + call_kwargs = mock_bedrock_model.call_args[1] + assert call_kwargs["model_id"] == "amazon.nova-pro-v1:0" + assert call_kwargs["region_name"] == "us-east-1" + assert call_kwargs["temperature"] == 0.7 + assert call_kwargs["streaming"] is True + + @patch("gaab_strands_common.base_agent.BedrockModel") + def test_create_model_inference_profile(self, mock_bedrock_model): + """Test creating model with INFERENCE_PROFILE type""" + agent = BaseAgent("us-west-2") + + llm_params = LlmParams( + ModelProvider="Bedrock", + Temperature=0.5, + Streaming=False, + Verbose=False, + BedrockLlmParams=BedrockLlmParams( + InferenceProfileId="us.anthropic.claude-3-5-sonnet-20241022-v2:0", + BedrockInferenceType="INFERENCE_PROFILE", + ), + ModelParams={}, + ) + + agent._create_model(llm_params) + + # Verify model_id is the inference profile ID + call_kwargs = mock_bedrock_model.call_args[1] + assert call_kwargs["model_id"] == "us.anthropic.claude-3-5-sonnet-20241022-v2:0" + assert call_kwargs["region_name"] == "us-west-2" + + @patch("gaab_strands_common.base_agent.BedrockModel") + def test_create_model_provisioned(self, mock_bedrock_model): + """Test creating model with PROVISIONED type""" + agent = BaseAgent("us-east-1") + + llm_params = LlmParams( + ModelProvider="Bedrock", + Temperature=0.3, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams( + ModelArn="arn:aws:bedrock:us-east-1:123456789012:provisioned-model/abc", + BedrockInferenceType="PROVISIONED", + ), + ModelParams={}, + ) + + agent._create_model(llm_params) + + # Verify model_id is the model ARN + call_kwargs = mock_bedrock_model.call_args[1] + assert call_kwargs["model_id"] == "arn:aws:bedrock:us-east-1:123456789012:provisioned-model/abc" + + @patch("gaab_strands_common.base_agent.BedrockModel") + @patch.dict("os.environ", {"AWS_REGION": "us-west-2"}) + def test_create_model_logs_environment(self, mock_bedrock_model): + """Test that model creation logs environment info""" + agent = BaseAgent("us-east-1") + + llm_params = LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams(ModelId="amazon.nova-pro-v1:0", BedrockInferenceType="QUICK_START"), + ModelParams={}, + ) + + # Should not raise exception + agent._create_model(llm_params) + + @patch("gaab_strands_common.base_agent.BedrockModel") + def test_create_model_cross_region_profile(self, mock_bedrock_model): + """Test creating model with cross-region inference profile""" + agent = BaseAgent("us-east-1") + + llm_params = LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams( + InferenceProfileId="us.anthropic.claude-3-5-sonnet-20241022-v2:0", + BedrockInferenceType="INFERENCE_PROFILE", + ), + ModelParams={}, + ) + + agent._create_model(llm_params) + + # Verify cross-region profile is detected and used + call_kwargs = mock_bedrock_model.call_args[1] + assert call_kwargs["model_id"].startswith("us.") + + +class TestValidateUseCaseType: + """Tests for _validate_use_case_type method""" + + def test_validate_correct_type(self): + """Test validation passes with correct type""" + agent = BaseAgent("us-east-1") + config_dict = {"UseCaseType": "Agent"} + + # Should not raise exception + agent._validate_use_case_type(config_dict, "Agent") + + def test_validate_incorrect_type(self): + """Test validation fails with incorrect type""" + agent = BaseAgent("us-east-1") + config_dict = {"UseCaseType": "Workflow"} + + with pytest.raises(ValueError, match="Expected Agent, got Workflow"): + agent._validate_use_case_type(config_dict, "Agent") + + def test_validate_missing_type(self): + """Test validation with missing UseCaseType""" + agent = BaseAgent("us-east-1") + config_dict = {} + + with pytest.raises(ValueError, match="Expected Agent, got None"): + agent._validate_use_case_type(config_dict, "Agent") + + +class TestGetConfig: + """Tests for get_config method""" + + def test_get_config_when_loaded(self): + """Test getting config when it's loaded""" + agent = BaseAgent("us-east-1") + + # Create mock config + mock_config = Mock(spec=UseCaseConfig) + agent.config = mock_config + + config = agent.get_config() + assert config is mock_config + + def test_get_config_when_not_loaded(self): + """Test getting config when not loaded""" + agent = BaseAgent("us-east-1") + + with pytest.raises(ValueError, match="Configuration not loaded"): + agent.get_config() + + def test_get_config_returns_same_instance(self): + """Test that get_config returns the same instance""" + agent = BaseAgent("us-east-1") + mock_config = Mock(spec=UseCaseConfig) + agent.config = mock_config + + config1 = agent.get_config() + config2 = agent.get_config() + assert config1 is config2 + + +class TestBaseAgentIntegration: + """Integration tests for BaseAgent""" + + @patch("gaab_strands_common.base_agent.BedrockModel") + def test_full_workflow(self, mock_bedrock_model): + """Test full workflow of creating agent and model""" + # Create agent + agent = BaseAgent("us-east-1") + + # Validate use case type + config_dict = {"UseCaseType": "Agent"} + agent._validate_use_case_type(config_dict, "Agent") + + # Create model + llm_params = LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams(ModelId="amazon.nova-pro-v1:0", BedrockInferenceType="QUICK_START"), + ModelParams={}, + ) + + model = agent._create_model(llm_params) + + # Verify model was created + mock_bedrock_model.assert_called_once() + + def test_region_consistency(self): + """Test that region is used consistently""" + agent = BaseAgent("eu-west-1") + assert agent.region == "eu-west-1" + + # Region should be used in model creation + llm_params = LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams(ModelId="amazon.nova-pro-v1:0", BedrockInferenceType="QUICK_START"), + ModelParams={}, + ) + + with patch("gaab_strands_common.base_agent.BedrockModel") as mock_model: + agent._create_model(llm_params) + call_kwargs = mock_model.call_args[1] + assert call_kwargs["region_name"] == "eu-west-1" diff --git a/deployment/ecr/gaab-strands-common/test/test_helpers.py b/deployment/ecr/gaab-strands-common/test/test_helpers.py new file mode 100644 index 00000000..65ba2dc7 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/test_helpers.py @@ -0,0 +1,117 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Tests for helper functions +""" + +import pytest +from unittest.mock import patch +from gaab_strands_common.models import BedrockLlmParams +from gaab_strands_common.utils.helpers import build_guardrail_config + + +class TestBuildGuardrailConfig: + """Tests for build_guardrail_config function""" + + def test_build_guardrail_config_with_both_fields(self): + """Test guardrail config built when both fields present""" + params = BedrockLlmParams( + ModelId="amazon.nova-pro-v1:0", + BedrockInferenceType="OTHER_FOUNDATION", + GuardrailIdentifier="abc123xyz", + GuardrailVersion="1", + ) + config = build_guardrail_config(params) + assert config == {"guardrail_id": "abc123xyz", "guardrail_version": "1"} + + def test_build_guardrail_config_missing_fields(self): + """Test empty config when fields missing""" + params = BedrockLlmParams( + ModelId="amazon.nova-pro-v1:0", + BedrockInferenceType="OTHER_FOUNDATION", + ) + config = build_guardrail_config(params) + assert config == {} + + def test_build_guardrail_config_partial_identifier_only(self): + """Test empty config when only identifier present""" + params = BedrockLlmParams( + ModelId="amazon.nova-pro-v1:0", + BedrockInferenceType="OTHER_FOUNDATION", + GuardrailIdentifier="abc123xyz", + ) + config = build_guardrail_config(params) + assert config == {} + + def test_build_guardrail_config_partial_version_only(self): + """Test empty config when only version present""" + params = BedrockLlmParams( + ModelId="amazon.nova-pro-v1:0", + BedrockInferenceType="OTHER_FOUNDATION", + GuardrailVersion="1", + ) + config = build_guardrail_config(params) + assert config == {} + + def test_build_guardrail_config_empty_strings(self): + """Test empty config when fields are empty strings""" + params = BedrockLlmParams( + ModelId="amazon.nova-pro-v1:0", + BedrockInferenceType="OTHER_FOUNDATION", + GuardrailIdentifier="", + GuardrailVersion="", + ) + config = build_guardrail_config(params) + assert config == {} + + def test_build_guardrail_config_with_inference_profile(self): + """Test guardrail config works with inference profile""" + params = BedrockLlmParams( + InferenceProfileId="us.anthropic.claude-3-5-sonnet-20241022-v2:0", + BedrockInferenceType="INFERENCE_PROFILE", + GuardrailIdentifier="def456uvw", + GuardrailVersion="2", + ) + config = build_guardrail_config(params) + assert config == {"guardrail_id": "def456uvw", "guardrail_version": "2"} + + def test_build_guardrail_config_with_provisioned(self): + """Test guardrail config works with provisioned throughput""" + params = BedrockLlmParams( + ModelArn="arn:aws:bedrock:us-east-1:123456789012:provisioned-model/abc", + BedrockInferenceType="PROVISIONED", + GuardrailIdentifier="ghi789rst", + GuardrailVersion="3", + ) + config = build_guardrail_config(params) + assert config == {"guardrail_id": "ghi789rst", "guardrail_version": "3"} + + @patch("gaab_strands_common.utils.helpers.logger") + def test_build_guardrail_config_logs_debug(self, mock_logger): + """Test that debug logging occurs when guardrails are applied""" + params = BedrockLlmParams( + ModelId="amazon.nova-pro-v1:0", + BedrockInferenceType="OTHER_FOUNDATION", + GuardrailIdentifier="test123", + GuardrailVersion="5", + ) + config = build_guardrail_config(params) + + # Verify debug log was called + mock_logger.debug.assert_called_once() + call_args = str(mock_logger.debug.call_args) + assert "test123" in call_args + assert "v5" in call_args + + def test_build_guardrail_config_no_logging_when_missing(self): + """Test that no debug logging occurs when guardrails are missing""" + params = BedrockLlmParams( + ModelId="amazon.nova-pro-v1:0", + BedrockInferenceType="OTHER_FOUNDATION", + ) + + with patch("gaab_strands_common.utils.helpers.logger") as mock_logger: + config = build_guardrail_config(params) + # Verify debug log was NOT called + mock_logger.debug.assert_not_called() diff --git a/deployment/ecr/gaab-strands-common/test/test_models.py b/deployment/ecr/gaab-strands-common/test/test_models.py new file mode 100644 index 00000000..a5fd67b8 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/test_models.py @@ -0,0 +1,559 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Tests for data models with from_ddb_config deserialization +""" + +import pytest +from gaab_strands_common.models import ( + AgentBuilderParams, + AgentParams, + AgentReference, + AgentsAsToolsParams, + BedrockLlmParams, + CustomToolReference, + FileReference, + GatewayMCPParams, + LlmParams, + MCPParams, + MCPServerConfig, + MCPServerReference, + MemoryConfig, + MultimodalParams, + RuntimeMCPParams, + StrandsToolReference, + UseCaseConfig, + WorkflowConfig, + WorkflowParams, +) +from pydantic import ValidationError + + +class TestBedrockLlmParams: + """Tests for BedrockLlmParams model""" + + def test_quick_start_inference_type(self): + """Test QUICK_START inference type requires ModelId""" + params = BedrockLlmParams(ModelId="amazon.nova-pro-v1:0", BedrockInferenceType="QUICK_START") + assert params.model_identifier == "amazon.nova-pro-v1:0" + + def test_inference_profile_type(self): + """Test INFERENCE_PROFILE requires InferenceProfileId""" + params = BedrockLlmParams( + InferenceProfileId="us.anthropic.claude-3-5-sonnet-20241022-v2:0", + BedrockInferenceType="INFERENCE_PROFILE", + ) + assert params.model_identifier == "us.anthropic.claude-3-5-sonnet-20241022-v2:0" + + def test_provisioned_type(self): + """Test PROVISIONED requires ModelArn""" + params = BedrockLlmParams( + ModelArn="arn:aws:bedrock:us-east-1:123456789012:provisioned-model/abc", + BedrockInferenceType="PROVISIONED", + ) + assert params.model_identifier == "arn:aws:bedrock:us-east-1:123456789012:provisioned-model/abc" + + def test_validation_fails_without_required_field(self): + """Test validation fails when required field is missing""" + with pytest.raises(ValidationError): + BedrockLlmParams(BedrockInferenceType="QUICK_START") + + def test_validation_fails_inference_profile_without_id(self): + """Test validation fails for INFERENCE_PROFILE without InferenceProfileId""" + with pytest.raises(ValidationError): + BedrockLlmParams(ModelId="test", BedrockInferenceType="INFERENCE_PROFILE") + + def test_validation_fails_provisioned_without_arn(self): + """Test validation fails for PROVISIONED without ModelArn""" + with pytest.raises(ValidationError): + BedrockLlmParams(ModelId="test", BedrockInferenceType="PROVISIONED") + + +class TestLlmParams: + """Tests for LlmParams model""" + + def test_llm_params_without_multimodal(self): + """Test LlmParams without multimodal configuration""" + params = LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams={"ModelId": "amazon.nova-pro-v1:0", "BedrockInferenceType": "QUICK_START"}, + ModelParams={}, + ) + assert params.model_provider == "Bedrock" + assert params.temperature == 0.7 + assert params.multimodal_params is None + + def test_llm_params_with_multimodal_enabled(self): + """Test LlmParams with multimodal enabled""" + params = LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams={"ModelId": "amazon.nova-pro-v1:0", "BedrockInferenceType": "QUICK_START"}, + ModelParams={}, + MultimodalParams={"MultimodalEnabled": True}, + ) + assert params.multimodal_params is not None + assert params.multimodal_params.multimodal_enabled is True + + def test_llm_params_with_multimodal_disabled(self): + """Test LlmParams with multimodal explicitly disabled""" + params = LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams={"ModelId": "amazon.nova-pro-v1:0", "BedrockInferenceType": "QUICK_START"}, + ModelParams={}, + MultimodalParams={"MultimodalEnabled": False}, + ) + assert params.multimodal_params is not None + assert params.multimodal_params.multimodal_enabled is False + + +class TestStrandsToolReference: + """Tests for StrandsToolReference model""" + + def test_create_tool_reference(self): + """Test creating tool reference""" + tool = StrandsToolReference(ToolId="web-search") + assert tool.tool_id == "web-search" + + +class TestMCPServerReference: + """Tests for MCPServerReference model""" + + def test_create_mcp_reference(self): + """Test creating MCP server reference""" + mcp = MCPServerReference(UseCaseId="mcp-server-1", Url="https://example.com/mcp", Type="gateway") + assert mcp.use_case_id == "mcp-server-1" + assert mcp.url == "https://example.com/mcp" + assert mcp.type == "gateway" + + +class TestAgentBuilderParams: + """Tests for AgentBuilderParams model""" + + def test_parse_tools_from_dicts(self): + """Test parsing tools from dict format""" + params = AgentBuilderParams( + SystemPrompt="Test prompt", + Tools=[{"ToolId": "web-search"}, {"ToolId": "calculator"}], + MCPServers=[], + ) + assert len(params.tools) == 2 + assert params.tools[0].tool_id == "web-search" + + def test_parse_mcp_servers_from_dicts(self): + """Test parsing MCP servers from dict format""" + params = AgentBuilderParams( + SystemPrompt="Test prompt", + Tools=[], + MCPServers=[ + {"UseCaseId": "mcp-1", "Url": "https://example.com/mcp1", "Type": "gateway"}, + {"UseCaseId": "mcp-2", "Url": "https://example.com/mcp2", "Type": "runtime"}, + ], + ) + assert len(params.mcp_servers) == 2 + assert params.mcp_servers[0].use_case_id == "mcp-1" + assert params.mcp_servers[0].type == "gateway" + + def test_get_tool_ids(self): + """Test extracting tool IDs""" + params = AgentBuilderParams( + SystemPrompt="Test prompt", Tools=[{"ToolId": "web-search"}, {"ToolId": "calculator"}], MCPServers=[] + ) + tool_ids = params.get_tool_ids() + assert tool_ids == ["web-search", "calculator"] + + def test_get_mcp_server_ids(self): + """Test extracting MCP server IDs (deprecated method)""" + params = AgentBuilderParams( + SystemPrompt="Test prompt", + Tools=[], + MCPServers=[ + {"UseCaseId": "mcp-1", "Url": "https://example.com/mcp1", "Type": "gateway"}, + {"UseCaseId": "mcp-2", "Url": "https://example.com/mcp2", "Type": "runtime"}, + ], + ) + mcp_ids = params.get_mcp_server_ids() + assert mcp_ids == ["mcp-1", "mcp-2"] + + def test_get_mcp_servers(self): + """Test extracting MCP server details""" + params = AgentBuilderParams( + SystemPrompt="Test prompt", + Tools=[], + MCPServers=[ + {"UseCaseId": "mcp-1", "Url": "https://example.com/mcp1", "Type": "gateway"}, + {"UseCaseId": "mcp-2", "Url": "https://example.com/mcp2", "Type": "runtime"}, + ], + ) + mcp_servers = params.get_mcp_servers() + assert len(mcp_servers) == 2 + assert mcp_servers[0] == {"use_case_id": "mcp-1", "url": "https://example.com/mcp1", "type": "gateway"} + assert mcp_servers[1] == {"use_case_id": "mcp-2", "url": "https://example.com/mcp2", "type": "runtime"} + + def test_empty_tools_and_mcp_servers(self): + """Test with empty tools and MCP servers""" + params = AgentBuilderParams(SystemPrompt="Test prompt", Tools=[], MCPServers=[]) + assert len(params.tools) == 0 + assert len(params.mcp_servers) == 0 + + +class TestUseCaseConfig: + """Tests for UseCaseConfig model and from_ddb_config""" + + def test_from_ddb_config_success(self): + """Test successful deserialization from DDB config""" + ddb_config = { + "UseCaseName": "TestAgent", + "UseCaseType": "Agent", + "AgentBuilderParams": { + "SystemPrompt": "You are a helpful assistant", + "Tools": [{"ToolId": "web-search"}], + "MCPServers": [{"UseCaseId": "mcp-1", "Url": "https://example.com/mcp", "Type": "gateway"}], + "MemoryConfig": {"LongTermEnabled": False}, + }, + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 1, + "Streaming": True, + "Verbose": False, + "BedrockLlmParams": { + "ModelId": "amazon.nova-pro-v1:0", + "BedrockInferenceType": "QUICK_START", + }, + "ModelParams": {}, + }, + } + + config = UseCaseConfig.from_ddb_config(ddb_config) + + assert config.use_case_name == "TestAgent" + assert config.use_case_type == "Agent" + assert config.agent_builder_params.system_prompt == "You are a helpful assistant" + assert len(config.agent_builder_params.tools) == 1 + assert config.llm_params.temperature == 1 + + def test_from_ddb_config_with_inference_profile(self): + """Test deserialization with inference profile""" + ddb_config = { + "UseCaseName": "TestAgent", + "UseCaseType": "Agent", + "AgentBuilderParams": { + "SystemPrompt": "Test", + "Tools": [], + "MCPServers": [], + "MemoryConfig": {"LongTermEnabled": False}, + }, + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 0.5, + "Streaming": True, + "Verbose": False, + "BedrockLlmParams": { + "InferenceProfileId": "us.anthropic.claude-3-5-sonnet-20241022-v2:0", + "BedrockInferenceType": "INFERENCE_PROFILE", + }, + "ModelParams": {}, + }, + } + + config = UseCaseConfig.from_ddb_config(ddb_config) + + assert config.llm_params.bedrock_llm_params.model_identifier == "us.anthropic.claude-3-5-sonnet-20241022-v2:0" + + def test_from_ddb_config_invalid_data(self): + """Test error handling for invalid config""" + invalid_config = {"UseCaseName": "Test"} + + with pytest.raises(ValueError, match="Error parsing configuration"): + UseCaseConfig.from_ddb_config(invalid_config) + + +class TestWorkflowModels: + """Tests for workflow-specific models""" + + def test_agent_params_with_optional_fields(self): + """Test AgentParams with optional fields""" + params = AgentParams( + SystemPrompt="Test prompt", + Tools=[{"ToolId": "web-search"}], + MCPServers=[{"UseCaseId": "mcp-1", "Url": "https://example.com/mcp", "Type": "gateway"}], + ) + assert params.system_prompt == "Test prompt" + assert len(params.tools) == 1 + assert len(params.mcp_servers) == 1 + + def test_agent_params_minimal(self): + """Test AgentParams with minimal fields""" + params = AgentParams() + assert params.system_prompt is None + assert params.tools is None + assert params.mcp_servers is None + + def test_agent_reference(self): + """Test AgentReference model""" + agent_ref = AgentReference( + UseCaseId="test-agent-id", + UseCaseType="AgentBuilder", + UseCaseName="TestAgent", + AgentBuilderParams={ + "SystemPrompt": "You are a specialist", + "Tools": [], + "MCPServers": [], + }, + ) + assert agent_ref.agent_builder_params.system_prompt == "You are a specialist" + assert agent_ref.use_case_id == "test-agent-id" + + def test_agents_as_tools_params(self): + """Test AgentsAsToolsParams model""" + params = AgentsAsToolsParams( + Agents=[ + { + "UseCaseId": "agent-1-id", + "UseCaseType": "AgentBuilder", + "UseCaseName": "Agent1", + "AgentBuilderParams": {"SystemPrompt": "Agent 1"}, + }, + { + "UseCaseId": "agent-2-id", + "UseCaseType": "AgentBuilder", + "UseCaseName": "Agent2", + "AgentBuilderParams": {"SystemPrompt": "Agent 2"}, + }, + ] + ) + assert len(params.agents) == 2 + assert params.agents[0].agent_builder_params.system_prompt == "Agent 1" + + def test_workflow_params(self): + """Test WorkflowParams model""" + params = WorkflowParams( + OrchestrationPattern="agents-as-tools", + SystemPrompt="You are a coordinator", + AgentsAsToolsParams={ + "Agents": [ + { + "UseCaseId": "agent-1-id", + "UseCaseType": "AgentBuilder", + "UseCaseName": "Agent1", + "AgentBuilderParams": {"SystemPrompt": "Agent 1"}, + }, + ] + }, + ) + assert params.orchestration_pattern == "agents-as-tools" + assert params.system_prompt == "You are a coordinator" + assert len(params.agents_as_tools_params.agents) == 1 + assert params.agents_as_tools_params.agents[0].agent_builder_params.system_prompt == "Agent 1" + + +class TestWorkflowConfig: + """Tests for WorkflowConfig and from_ddb_config""" + + def test_from_ddb_config_success(self): + """Test successful workflow config deserialization""" + ddb_config = { + "WorkflowType": "agents-as-tools", + "WorkflowParams": { + "OrchestrationPattern": "agents-as-tools", + "SystemPrompt": "You are a workflow coordinator", + "MemoryConfig": {"MaxTokens": 1000}, + "AgentsAsToolsParams": { + "Agents": [ + { + "UseCaseId": "test-agent-id", + "UseCaseType": "AgentBuilder", + "UseCaseName": "SpecialistAgent", + "AgentBuilderParams": { + "SystemPrompt": "You are a specialist", + "Tools": [], + "MCPServers": [], + }, + } + ] + }, + }, + } + + config = WorkflowConfig.from_ddb_config(ddb_config) + + assert config.workflow_type == "agents-as-tools" + assert len(config.workflow_params.agents_as_tools_params.agents) == 1 + assert ( + config.workflow_params.agents_as_tools_params.agents[0].agent_builder_params.system_prompt + == "You are a specialist" + ) + + def test_from_ddb_config_invalid_data(self): + """Test error handling for invalid workflow config""" + invalid_config = {"WorkflowType": "test"} + + with pytest.raises(ValueError, match="Error parsing workflow configuration"): + WorkflowConfig.from_ddb_config(invalid_config) + + +class TestMCPServerConfig: + """Tests for MCP server configuration models""" + + def test_gateway_mcp_params(self): + """Test Gateway MCP parameters""" + params = GatewayMCPParams( + GatewayUrl="https://api.example.com", + GatewayArn="arn:aws:execute-api:us-east-1:123456789012:abc123", + GatewayId="abc123", + GatewayName="TestGateway", + TargetParams=[], + ) + assert params.gateway_url == "https://api.example.com" + assert params.gateway_name == "TestGateway" + + def test_runtime_mcp_params(self): + """Test Runtime MCP parameters""" + params = RuntimeMCPParams( + EcrUri="123456789012.dkr.ecr.us-east-1.amazonaws.com/mcp-server:latest", + RuntimeArn="arn:aws:bedrock-agent-runtime:us-east-1:123456789012:agent-runtime/ABC123", + RuntimeUrl="https://bedrock-agent-runtime.us-east-1.amazonaws.com", + ) + assert params.ecr_uri == "123456789012.dkr.ecr.us-east-1.amazonaws.com/mcp-server:latest" + assert params.runtime_arn == "arn:aws:bedrock-agent-runtime:us-east-1:123456789012:agent-runtime/ABC123" + assert params.runtime_url == "https://bedrock-agent-runtime.us-east-1.amazonaws.com" + + def test_mcp_server_config_with_gateway(self): + """Test MCP server config with gateway params""" + config = MCPServerConfig( + UseCaseName="TestMCP", + UseCaseType="MCPServer", + MCPParams={ + "GatewayParams": { + "GatewayUrl": "https://api.example.com", + "GatewayArn": "arn:aws:execute-api:us-east-1:123456789012:abc123", + "GatewayId": "abc123", + "GatewayName": "TestGateway", + "TargetParams": [], + } + }, + ) + assert config.use_case_name == "TestMCP" + assert config.mcp_params.gateway_params is not None + assert config.mcp_params.gateway_params.gateway_name == "TestGateway" + + def test_mcp_server_config_with_runtime(self): + """Test MCP server config with runtime params""" + config = MCPServerConfig( + UseCaseName="TestMCP", + UseCaseType="MCPServer", + MCPParams={ + "RuntimeParams": { + "EcrUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/mcp-server:latest", + "RuntimeArn": "arn:aws:bedrock-agent-runtime:us-east-1:123456789012:agent-runtime/ABC123", + "RuntimeUrl": "https://bedrock-agent-runtime.us-east-1.amazonaws.com", + } + }, + ) + assert config.mcp_params.runtime_params is not None + assert "mcp-server" in config.mcp_params.runtime_params.ecr_uri + assert config.mcp_params.runtime_params.runtime_arn.startswith("arn:aws:bedrock-agent-runtime") + + +class TestMemoryConfig: + """Tests for MemoryConfig model""" + + def test_memory_config_enabled(self): + """Test memory config with long term enabled""" + config = MemoryConfig(LongTermEnabled=True) + assert config.long_term_enabled is True + + def test_memory_config_disabled(self): + """Test memory config with long term disabled""" + config = MemoryConfig(LongTermEnabled=False) + assert config.long_term_enabled is False + + def test_memory_config_default(self): + """Test memory config default value""" + config = MemoryConfig() + assert config.long_term_enabled is False + + +class TestFileReference: + """Tests for FileReference model""" + + def test_create_file_reference(self): + """Test creating file reference with proper field aliases""" + file_ref = FileReference(fileReference="s3://bucket/key", fileName="document.pdf") + assert file_ref.file_reference == "s3://bucket/key" + assert file_ref.file_name == "document.pdf" + + def test_file_reference_field_aliases(self): + """Test that field aliases work correctly""" + file_ref = FileReference(**{"fileReference": "s3://bucket/key", "fileName": "document.pdf"}) + assert file_ref.file_reference == "s3://bucket/key" + assert file_ref.file_name == "document.pdf" + + +class TestCustomToolReference: + """Tests for CustomToolReference model""" + + def test_create_custom_tool_reference(self): + """Test creating custom tool reference""" + tool = CustomToolReference(ToolId="s3-file-reader") + assert tool.tool_id == "s3-file-reader" + + def test_custom_tool_reference_field_alias(self): + """Test that field alias works correctly""" + tool = CustomToolReference(**{"ToolId": "s3-file-reader"}) + assert tool.tool_id == "s3-file-reader" + + +class TestAgentBuilderParamsCustomTools: + """Additional tests for AgentBuilderParams model with custom tools""" + + def test_parse_custom_tools_from_dicts(self): + """Test parsing custom tools from dict format""" + params = AgentBuilderParams( + SystemPrompt="Test prompt", + Tools=[], + CustomTools=[{"ToolId": "s3-file-reader"}, {"ToolId": "database-query"}], + MCPServers=[], + ) + assert len(params.custom_tools) == 2 + assert params.custom_tools[0].tool_id == "s3-file-reader" + assert params.custom_tools[1].tool_id == "database-query" + + def test_get_custom_tool_ids(self): + """Test extracting custom tool IDs""" + params = AgentBuilderParams( + SystemPrompt="Test prompt", + Tools=[], + CustomTools=[{"ToolId": "s3-file-reader"}, {"ToolId": "database-query"}], + MCPServers=[], + ) + custom_tool_ids = params.get_custom_tool_ids() + assert custom_tool_ids == ["s3-file-reader", "database-query"] + + def test_empty_custom_tools(self): + """Test with empty custom tools""" + params = AgentBuilderParams(SystemPrompt="Test prompt", Tools=[], CustomTools=[], MCPServers=[]) + assert len(params.custom_tools) == 0 + assert params.get_custom_tool_ids() == [] + + def test_mixed_tools(self): + """Test with both built-in and custom tools""" + params = AgentBuilderParams( + SystemPrompt="Test prompt", + Tools=[{"ToolId": "web-search"}], + CustomTools=[{"ToolId": "s3-file-reader"}], + MCPServers=[], + ) + assert len(params.tools) == 1 + assert len(params.custom_tools) == 1 + assert params.get_tool_ids() == ["web-search"] + assert params.get_custom_tool_ids() == ["s3-file-reader"] diff --git a/deployment/ecr/gaab-strands-common/test/test_runtime_streaming.py b/deployment/ecr/gaab-strands-common/test/test_runtime_streaming.py new file mode 100644 index 00000000..3a7d03d3 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/test_runtime_streaming.py @@ -0,0 +1,481 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Tests for RuntimeStreaming class +""" + +import asyncio +import pytest +from unittest.mock import Mock, AsyncMock, patch +from gaab_strands_common.runtime_streaming import RuntimeStreaming +from gaab_strands_common.tool_wrapper import ToolEventEmitter, ToolUsageEvent + + +class MockConfig: + """Mock configuration for testing""" + + def __init__(self): + self.use_case_name = "TestAgent" + self.llm_params = Mock() + self.llm_params.bedrock_llm_params = Mock() + self.llm_params.bedrock_llm_params.model_id = "test-model-id" + + +class MockEvent: + """Mock streaming event""" + + def __init__(self, data): + self.data = data + + +class TestExtractEventText: + """Tests for extract_event_text method""" + + def test_extract_text_from_event_with_data_attribute(self): + """Test extracting text from event with data attribute""" + event = MockEvent("Hello world") + text = RuntimeStreaming.extract_event_text(event) + assert text == "Hello world" + + def test_extract_text_from_dict_event(self): + """Test extracting text from dictionary event""" + event = {"data": "Test message"} + text = RuntimeStreaming.extract_event_text(event) + assert text == "Test message" + + def test_extract_text_from_empty_event(self): + """Test extracting text from empty event""" + event = MockEvent(None) + text = RuntimeStreaming.extract_event_text(event) + assert text == "" + + def test_extract_text_from_event_without_data(self): + """Test extracting text from event without data""" + event = {} + text = RuntimeStreaming.extract_event_text(event) + assert text == "" + + def test_extract_text_handles_exception(self): + """Test that exceptions are handled gracefully""" + event = None + text = RuntimeStreaming.extract_event_text(event) + assert text == "" + + +class TestChunkCreation: + """Tests for chunk creation methods""" + + def test_create_content_chunk(self): + """Test creating content chunk""" + config = MockConfig() + chunk = RuntimeStreaming._create_content_chunk("Test content", config) + + assert chunk["type"] == "content" + assert chunk["text"] == "Test content" + assert chunk["agent_name"] == "TestAgent" + assert chunk["model_id"] == "test-model-id" + + def test_create_completion_chunk_without_usage(self): + """Test creating completion chunk without usage metadata""" + config = MockConfig() + chunk = RuntimeStreaming._create_completion_chunk(config) + + assert chunk["type"] == "completion" + assert chunk["agent_name"] == "TestAgent" + assert chunk["model_id"] == "test-model-id" + assert "usage" not in chunk + + def test_create_completion_chunk_with_usage(self): + """Test creating completion chunk with usage metadata""" + config = MockConfig() + usage_metadata = { + "inputTokens": 1453, + "outputTokens": 271, + "totalTokens": 1724 + } + chunk = RuntimeStreaming._create_completion_chunk(config, usage_metadata) + + assert chunk["type"] == "completion" + assert chunk["agent_name"] == "TestAgent" + assert chunk["model_id"] == "test-model-id" + assert chunk["usage"] == usage_metadata + assert chunk["usage"]["inputTokens"] == 1453 + assert chunk["usage"]["outputTokens"] == 271 + assert chunk["usage"]["totalTokens"] == 1724 + + def test_create_error_chunk(self): + """Test creating error chunk""" + config = MockConfig() + error = ValueError("Test error") + chunk = RuntimeStreaming._create_error_chunk(error, config) + + assert chunk["type"] == "error" + assert chunk["error"] == "Streaming response failed" + assert chunk["message"] == "Test error" + assert chunk["agent_name"] == "TestAgent" + assert chunk["model_id"] == "test-model-id" + + +class TestToolEvents: + """Tests for tool event handling""" + + def test_yield_tool_events(self): + """Test yielding tool events""" + ToolEventEmitter.clear() + + # Add test events + event1 = ToolUsageEvent("tool1", "started", "2024-01-01T00:00:00Z") + event2 = ToolUsageEvent("tool2", "completed", "2024-01-01T00:00:01Z") + ToolEventEmitter.emit(event1) + ToolEventEmitter.emit(event2) + + # Yield events + chunks = list(RuntimeStreaming._yield_tool_events()) + + assert len(chunks) == 2 + assert chunks[0]["type"] == "tool_use" + assert chunks[0]["toolUsage"]["toolName"] == "tool1" + assert chunks[1]["toolUsage"]["toolName"] == "tool2" + + def test_yield_tool_events_empty(self): + """Test yielding when no events exist""" + ToolEventEmitter.clear() + chunks = list(RuntimeStreaming._yield_tool_events()) + assert len(chunks) == 0 + + +class TestUsageMetadataExtraction: + """Tests for usage metadata extraction""" + + def test_extract_usage_from_nested_dict_event(self): + """Test extracting usage from nested dictionary event""" + event = { + "event": { + "metadata": { + "usage": { + "inputTokens": 1453, + "outputTokens": 271, + "totalTokens": 1724 + } + } + } + } + usage = RuntimeStreaming._extract_usage_metadata(event) + assert usage is not None + assert usage["inputTokens"] == 1453 + assert usage["outputTokens"] == 271 + assert usage["totalTokens"] == 1724 + + def test_extract_usage_from_direct_dict_event(self): + """Test extracting usage from direct dictionary event""" + event = { + "metadata": { + "usage": { + "inputTokens": 100, + "outputTokens": 50, + "totalTokens": 150 + } + } + } + usage = RuntimeStreaming._extract_usage_metadata(event) + assert usage is not None + assert usage["inputTokens"] == 100 + assert usage["outputTokens"] == 50 + assert usage["totalTokens"] == 150 + + def test_extract_usage_from_event_without_usage(self): + """Test extracting usage from event without usage metadata""" + event = {"event": {"metadata": {}}} + usage = RuntimeStreaming._extract_usage_metadata(event) + assert usage is None + + def test_extract_usage_from_event_without_metadata(self): + """Test extracting usage from event without metadata""" + event = {"event": {}} + usage = RuntimeStreaming._extract_usage_metadata(event) + assert usage is None + + def test_extract_usage_from_empty_event(self): + """Test extracting usage from empty event""" + event = {} + usage = RuntimeStreaming._extract_usage_metadata(event) + assert usage is None + + def test_extract_usage_handles_exception(self): + """Test that exceptions are handled gracefully""" + event = None + usage = RuntimeStreaming._extract_usage_metadata(event) + assert usage is None + + def test_extract_usage_from_non_dict_event(self): + """Test extracting usage from non-dictionary event""" + event = "not a dict" + usage = RuntimeStreaming._extract_usage_metadata(event) + assert usage is None + + +class TestShouldSkipEvent: + """Tests for event skipping logic""" + + def test_should_skip_empty_event(self): + """Test skipping empty event""" + assert RuntimeStreaming._should_skip_event("", "previous") is True + + def test_should_skip_duplicate_event(self): + """Test skipping duplicate event""" + assert RuntimeStreaming._should_skip_event("same", "same") is True + + def test_should_not_skip_new_event(self): + """Test not skipping new event""" + assert RuntimeStreaming._should_skip_event("new", "old") is False + + def test_should_not_skip_first_event(self): + """Test not skipping first event""" + assert RuntimeStreaming._should_skip_event("first", None) is False + + +class TestStreamResponseAsync: + """Tests for async streaming""" + + @pytest.mark.asyncio + async def test_stream_response_async_success(self): + """Test successful async streaming""" + config = MockConfig() + ToolEventEmitter.clear() + + # Mock agent with stream_async + mock_agent = Mock() + + async def mock_stream(): + yield MockEvent("Hello") + yield MockEvent("World") + + mock_agent.stream_async = Mock(return_value=mock_stream()) + + # Collect chunks + chunks = [] + async for chunk in RuntimeStreaming.stream_response_async(mock_agent, "test", config): + chunks.append(chunk) + + # Verify chunks + content_chunks = [c for c in chunks if c["type"] == "content"] + completion_chunks = [c for c in chunks if c["type"] == "completion"] + + assert len(content_chunks) == 2 + assert content_chunks[0]["text"] == "Hello" + assert content_chunks[1]["text"] == "World" + assert len(completion_chunks) == 1 + + @pytest.mark.asyncio + async def test_stream_response_async_with_tool_events(self): + """Test async streaming with tool events""" + config = MockConfig() + ToolEventEmitter.clear() + + # Mock agent that emits tool events during streaming + mock_agent = Mock() + + async def mock_stream(): + # Emit tool event during streaming (simulating tool wrapper behavior) + tool_event = ToolUsageEvent("test_tool", "started", "2024-01-01T00:00:00Z") + ToolEventEmitter.emit(tool_event) + yield MockEvent("Response") + + mock_agent.stream_async = Mock(return_value=mock_stream()) + + # Collect chunks + chunks = [] + async for chunk in RuntimeStreaming.stream_response_async(mock_agent, "test", config): + chunks.append(chunk) + + # Verify tool event was included + tool_chunks = [c for c in chunks if c["type"] == "tool_use"] + assert len(tool_chunks) >= 1 + assert tool_chunks[0]["toolUsage"]["toolName"] == "test_tool" + + @pytest.mark.asyncio + async def test_stream_response_async_handles_error(self): + """Test async streaming handles errors""" + config = MockConfig() + ToolEventEmitter.clear() + + # Mock agent that raises error + mock_agent = Mock() + + async def mock_stream(): + # Raise error immediately in async generator + if True: # Always true, but makes yield reachable for syntax + raise ValueError("Stream error") + yield # Make it a generator (unreachable but needed for syntax) + + mock_agent.stream_async = Mock(return_value=mock_stream()) + + # Collect chunks + chunks = [] + async for chunk in RuntimeStreaming.stream_response_async(mock_agent, "test", config): + chunks.append(chunk) + + # Verify error chunk + error_chunks = [c for c in chunks if c["type"] == "error"] + assert len(error_chunks) == 1 + assert "Stream error" in error_chunks[0]["message"] + + @pytest.mark.asyncio + async def test_stream_response_async_fallback(self): + """Test fallback when stream_async not available""" + config = MockConfig() + ToolEventEmitter.clear() + + # Mock agent without stream_async + mock_agent = Mock() + mock_agent.stream_async = Mock(side_effect=AttributeError("No stream_async")) + mock_agent.return_value = "Fallback response" + + # Collect chunks + chunks = [] + async for chunk in RuntimeStreaming.stream_response_async(mock_agent, "test", config): + chunks.append(chunk) + + # Verify fallback response + content_chunks = [c for c in chunks if c["type"] == "content"] + assert len(content_chunks) == 1 + assert content_chunks[0]["text"] == "Fallback response" + + @pytest.mark.asyncio + async def test_stream_response_async_skips_duplicate_events(self): + """Test that duplicate events are skipped""" + config = MockConfig() + ToolEventEmitter.clear() + + # Mock agent with duplicate events + mock_agent = Mock() + + async def mock_stream(): + yield MockEvent("Same") + yield MockEvent("Same") + yield MockEvent("Different") + + mock_agent.stream_async = Mock(return_value=mock_stream()) + + # Collect chunks + chunks = [] + async for chunk in RuntimeStreaming.stream_response_async(mock_agent, "test", config): + chunks.append(chunk) + + # Verify only unique content + content_chunks = [c for c in chunks if c["type"] == "content"] + assert len(content_chunks) == 2 + assert content_chunks[0]["text"] == "Same" + assert content_chunks[1]["text"] == "Different" + + @pytest.mark.asyncio + async def test_stream_response_async_with_usage_metadata(self): + """Test async streaming captures and includes usage metadata""" + config = MockConfig() + ToolEventEmitter.clear() + + # Mock agent with usage metadata event + mock_agent = Mock() + + async def mock_stream(): + yield MockEvent("Response text") + # Yield event with usage metadata + yield { + "event": { + "metadata": { + "usage": { + "inputTokens": 1453, + "outputTokens": 271, + "totalTokens": 1724 + } + } + } + } + + mock_agent.stream_async = Mock(return_value=mock_stream()) + + # Collect chunks + chunks = [] + async for chunk in RuntimeStreaming.stream_response_async(mock_agent, "test", config): + chunks.append(chunk) + + # Verify completion chunk includes usage metadata + completion_chunks = [c for c in chunks if c["type"] == "completion"] + assert len(completion_chunks) == 1 + assert "usage" in completion_chunks[0] + assert completion_chunks[0]["usage"]["inputTokens"] == 1453 + assert completion_chunks[0]["usage"]["outputTokens"] == 271 + assert completion_chunks[0]["usage"]["totalTokens"] == 1724 + + @pytest.mark.asyncio + async def test_stream_response_async_without_usage_metadata(self): + """Test async streaming when no usage metadata is present""" + config = MockConfig() + ToolEventEmitter.clear() + + # Mock agent without usage metadata + mock_agent = Mock() + + async def mock_stream(): + yield MockEvent("Response text") + + mock_agent.stream_async = Mock(return_value=mock_stream()) + + # Collect chunks + chunks = [] + async for chunk in RuntimeStreaming.stream_response_async(mock_agent, "test", config): + chunks.append(chunk) + + # Verify completion chunk does not include usage metadata + completion_chunks = [c for c in chunks if c["type"] == "completion"] + assert len(completion_chunks) == 1 + assert "usage" not in completion_chunks[0] + + +class TestStreamResponse: + """Tests for synchronous streaming wrapper""" + + def test_stream_response_sync_wrapper(self): + """Test synchronous wrapper works""" + config = MockConfig() + ToolEventEmitter.clear() + + # Mock agent + mock_agent = Mock() + + async def mock_stream(): + yield MockEvent("Test") + + mock_agent.stream_async = Mock(return_value=mock_stream()) + + # Collect chunks + chunks = list(RuntimeStreaming.stream_response(mock_agent, "test", config)) + + # Verify chunks + content_chunks = [c for c in chunks if c["type"] == "content"] + assert len(content_chunks) == 1 + assert content_chunks[0]["text"] == "Test" + + def test_stream_response_handles_empty_stream(self): + """Test handling empty stream""" + config = MockConfig() + ToolEventEmitter.clear() + + # Mock agent with empty stream + mock_agent = Mock() + + async def mock_stream(): + # Empty generator - no yields + if False: # Never true, but makes yield reachable for syntax + yield # Make it a generator (unreachable but needed for syntax) + return + + mock_agent.stream_async = Mock(return_value=mock_stream()) + + # Collect chunks + chunks = list(RuntimeStreaming.stream_response(mock_agent, "test", config)) + + # Should still have completion chunk + completion_chunks = [c for c in chunks if c["type"] == "completion"] + assert len(completion_chunks) == 1 diff --git a/deployment/ecr/gaab-strands-common/test/test_tool_wrapper.py b/deployment/ecr/gaab-strands-common/test/test_tool_wrapper.py new file mode 100644 index 00000000..0d4edcfe --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/test_tool_wrapper.py @@ -0,0 +1,491 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Tests for tool wrapper and event emission +""" + +import time +from unittest.mock import AsyncMock, Mock + +import pytest +from gaab_strands_common.tool_wrapper import ( + ToolEventEmitter, + ToolUsageEvent, + _extract_tool_name_from_args, + _filter_tool_args, + _filter_tool_kwargs, + _get_mcp_server_name, + _get_tool_name, + wrap_tool_with_events, +) + + +class TestToolUsageEvent: + """Tests for ToolUsageEvent class""" + + def test_create_event(self): + """Test creating tool usage event""" + event = ToolUsageEvent("test_tool", "started", "2024-01-01T00:00:00Z", toolInput={"arg": "value"}) + + assert event.tool_name == "test_tool" + assert event.status == "started" + assert event.start_time == "2024-01-01T00:00:00Z" + assert event.data["toolInput"] == {"arg": "value"} + + def test_to_dict(self): + """Test converting event to dictionary""" + event = ToolUsageEvent( + "test_tool", + "completed", + "2024-01-01T00:00:00Z", + endTime="2024-01-01T00:00:01Z", + toolOutput="result", + ) + + event_dict = event.to_dict() + + assert event_dict["toolName"] == "test_tool" + assert event_dict["status"] == "completed" + assert event_dict["startTime"] == "2024-01-01T00:00:00Z" + assert event_dict["endTime"] == "2024-01-01T00:00:01Z" + assert event_dict["toolOutput"] == "result" + + +class TestToolEventEmitter: + """Tests for ToolEventEmitter singleton""" + + def test_emit_event(self): + """Test emitting events""" + ToolEventEmitter.clear() + + event = ToolUsageEvent("tool1", "started", "2024-01-01T00:00:00Z") + ToolEventEmitter.emit(event) + + events = ToolEventEmitter.get_events() + assert len(events) == 1 + assert events[0].tool_name == "tool1" + + def test_get_events_clears_queue(self): + """Test that get_events clears the queue""" + ToolEventEmitter.clear() + + event1 = ToolUsageEvent("tool1", "started", "2024-01-01T00:00:00Z") + event2 = ToolUsageEvent("tool2", "started", "2024-01-01T00:00:01Z") + ToolEventEmitter.emit(event1) + ToolEventEmitter.emit(event2) + + # First get + events = ToolEventEmitter.get_events() + assert len(events) == 2 + + # Second get should be empty + events = ToolEventEmitter.get_events() + assert len(events) == 0 + + def test_clear_events(self): + """Test clearing events""" + ToolEventEmitter.clear() + + event = ToolUsageEvent("tool1", "started", "2024-01-01T00:00:00Z") + ToolEventEmitter.emit(event) + + ToolEventEmitter.clear() + events = ToolEventEmitter.get_events() + assert len(events) == 0 + + def test_singleton_pattern(self): + """Test that ToolEventEmitter is a singleton""" + emitter1 = ToolEventEmitter() + emitter2 = ToolEventEmitter() + assert emitter1 is emitter2 + + +class TestGetToolName: + """Tests for _get_tool_name helper""" + + def test_get_name_from_tool_name_attribute(self): + """Test getting name from tool_name attribute (DecoratedFunctionTool pattern)""" + tool = Mock() + tool.tool_name = "decorated_tool" + tool.name = "fallback_name" # Should not be used + assert _get_tool_name(tool) == "decorated_tool" + + def test_get_name_from_name_attribute(self): + """Test getting name from name attribute (fallback when tool_name not present)""" + tool = Mock(spec=["name"]) # Only spec 'name' to avoid auto-creating tool_name + tool.name = "test_tool" + assert _get_tool_name(tool) == "test_tool" + + def test_get_name_from_function_name(self): + """Test getting name from __name__ attribute""" + + def test_function(): + """Empty test function for name extraction""" + pass + + assert _get_tool_name(test_function) == "test_function" + + def test_get_name_from_class_name(self): + """Test getting name from class name""" + + class TestTool: + pass + + tool = TestTool() + assert _get_tool_name(tool) == "TestTool" + + +class TestExtractToolNameFromArgs: + """Tests for _extract_tool_name_from_args helper""" + + def test_extract_from_object_with_name(self): + """Test extracting name from object with name attribute""" + arg = Mock() + arg.name = "extracted_name" + assert _extract_tool_name_from_args((arg,), "fallback") == "extracted_name" + + def test_extract_from_dict_with_name(self): + """Test extracting name from dict with name key""" + arg = {"name": "dict_name"} + assert _extract_tool_name_from_args((arg,), "fallback") == "dict_name" + + def test_fallback_when_no_name(self): + """Test fallback when no name found""" + arg = {"other": "value"} + assert _extract_tool_name_from_args((arg,), "fallback") == "fallback" + + def test_fallback_when_empty_args(self): + """Test fallback when args are empty""" + assert _extract_tool_name_from_args((), "fallback") == "fallback" + + +class TestGetMcpServerName: + """Tests for _get_mcp_server_name helper""" + + def test_get_mcp_server_name_from_metadata(self): + """Test getting MCP server name from metadata""" + tool = Mock() + tool.metadata = {"mcp_server": "test-mcp-server"} + assert _get_mcp_server_name(tool) == "test-mcp-server" + + def test_get_server_name_from_metadata(self): + """Test getting server name from metadata""" + tool = Mock() + tool.metadata = {"server_name": "test-server"} + assert _get_mcp_server_name(tool) == "test-server" + + def test_no_metadata(self): + """Test when tool has no metadata""" + tool = Mock(spec=[]) + assert _get_mcp_server_name(tool) is None + + def test_metadata_not_dict(self): + """Test when metadata is not a dict""" + tool = Mock() + tool.metadata = "not a dict" + assert _get_mcp_server_name(tool) is None + + +class TestFilterToolArgs: + """Tests for _filter_tool_args helper""" + + def test_filter_normal_args(self): + """Test filtering normal arguments""" + args = ("arg1", "arg2", 123) + filtered = _filter_tool_args(args) + assert len(filtered) == 3 + assert "arg1" in filtered + + def test_filter_agent_objects(self): + """Test filtering out agent objects""" + agent_str = "Agent" + "x" * 500 # Long string with Agent + args = ("normal_arg", agent_str) + filtered = _filter_tool_args(args) + # Agent string should be filtered out + assert len(filtered) == 1 + assert filtered[0] == "normal_arg" + + def test_truncate_long_args(self): + """Test truncating long arguments""" + long_arg = "x" * 600 + args = (long_arg,) + filtered = _filter_tool_args(args) + assert len(filtered[0]) == 500 # MAX_TOOL_ARG_LENGTH + + +class TestFilterToolKwargs: + """Tests for _filter_tool_kwargs helper""" + + def test_filter_normal_kwargs(self): + """Test filtering normal kwargs""" + kwargs = {"arg1": "value1", "arg2": "value2"} + filtered = _filter_tool_kwargs(kwargs) + assert len(filtered) == 2 + assert filtered["arg1"] == "value1" + + def test_filter_internal_params(self): + """Test filtering internal parameters""" + kwargs = {"arg1": "value1", "agent": "agent_obj", "_agent": "internal", "self": "self_obj"} + filtered = _filter_tool_kwargs(kwargs) + assert len(filtered) == 1 + assert "arg1" in filtered + assert "agent" not in filtered + + def test_filter_agent_values(self): + """Test filtering kwargs with Agent in value""" + kwargs = {"arg1": "value1", "arg2": "Agent object"} + filtered = _filter_tool_kwargs(kwargs) + assert "arg1" in filtered + assert "arg2" not in filtered + + def test_truncate_long_values(self): + """Test truncating long values""" + long_value = "x" * 600 + kwargs = {"arg": long_value} + filtered = _filter_tool_kwargs(kwargs) + assert len(filtered["arg"]) == 500 + + +class TestWrapToolWithEvents: + """Tests for wrap_tool_with_events function""" + + def test_wrap_tool_with_call_method(self): + """Test wrapping tool with __call__ method""" + ToolEventEmitter.clear() + + # Create a real callable class (Mock doesn't work with __call__ wrapping) + class CallableTool: + def __init__(self): + self.name = "test_tool" + self.call_count = 0 + + def __call__(self, *args, **kwargs): + self.call_count += 1 + return "result" + + tool = CallableTool() + + # Wrap tool + wrap_tool_with_events(tool) + + # Call the wrapped __call__ method directly (Python doesn't use instance __call__ for tool()) + result = tool.__call__("arg1", kwarg1="value1") + + # Verify result + assert result == "result" + assert tool.call_count == 1 + + # Verify events were emitted + events = ToolEventEmitter.get_events() + assert len(events) == 2 # started and completed + assert events[0].status == "started" + assert events[1].status == "completed" + + def test_wrap_tool_with_invoke_method(self): + """Test wrapping tool with invoke method""" + ToolEventEmitter.clear() + + # Create a tool with invoke method + class InvokableTool: + def __init__(self): + self.name = "test_tool" + self.invoke_count = 0 + + def invoke(self, *args, **kwargs): + self.invoke_count += 1 + return "result" + + tool = InvokableTool() + + # Wrap tool + wrap_tool_with_events(tool) + + # Call wrapped tool + result = tool.invoke("arg1") + + # Verify result + assert result == "result" + assert tool.invoke_count == 1 + + # Verify events + events = ToolEventEmitter.get_events() + assert len(events) == 2 + + @pytest.mark.asyncio + async def test_wrap_tool_with_stream_method(self): + """Test wrapping tool with stream method""" + ToolEventEmitter.clear() + + # Create tool with async stream + class StreamTool: + def __init__(self): + self.name = "test_tool" + + async def stream(self, *args, **kwargs): + yield "chunk1" + yield "chunk2" + + tool = StreamTool() + + # Wrap tool + wrap_tool_with_events(tool) + + # Call wrapped stream + chunks = [] + async for chunk in tool.stream("arg1"): + chunks.append(chunk) + + # Verify chunks + assert chunks == ["chunk1", "chunk2"] + + # Verify events + events = ToolEventEmitter.get_events() + assert len(events) == 2 + assert events[0].status == "started" + assert events[1].status == "completed" + + def test_wrap_tool_handles_error(self): + """Test wrapping tool handles errors""" + ToolEventEmitter.clear() + + # Create a real callable class that raises error + class ErrorTool: + def __init__(self): + self.name = "test_tool" + + def __call__(self, *args, **kwargs): + raise ValueError("Test error") + + tool = ErrorTool() + + # Wrap tool + wrap_tool_with_events(tool) + + # Call wrapped __call__ method directly and expect error + with pytest.raises(ValueError, match="Test error"): + tool.__call__("arg1") + + # Verify error event was emitted + events = ToolEventEmitter.get_events() + assert len(events) == 2 + assert events[0].status == "started" + assert events[1].status == "failed" + assert "Test error" in events[1].data["error"] + + def test_wrap_tool_with_mcp_metadata(self): + """Test wrapping tool with MCP server metadata""" + ToolEventEmitter.clear() + + # Create a real callable class with MCP metadata + class MCPTool: + def __init__(self): + self.name = "test_tool" + self.metadata = {"mcp_server": "test-mcp-server"} + + def __call__(self, *args, **kwargs): + return "result" + + tool = MCPTool() + + # Wrap tool + wrap_tool_with_events(tool) + + # Call wrapped __call__ method directly + tool.__call__("arg1") + + # Verify MCP server name in events + events = ToolEventEmitter.get_events() + assert events[0].data.get("mcpServerName") == "test-mcp-server" + assert events[1].data.get("mcpServerName") == "test-mcp-server" + + def test_wrap_tool_without_wrappable_methods(self): + """Test wrapping tool without wrappable methods""" + # Create mock tool without call/invoke/stream + tool = Mock(spec=["some_method"]) + tool.name = "test_tool" + + # Wrap tool - should log warning but not fail + wrapped_tool = wrap_tool_with_events(tool) + assert wrapped_tool is tool + + def test_wrap_tool_includes_tool_input(self): + """Test that tool input is included in events""" + ToolEventEmitter.clear() + + # Create a real callable class + class InputTool: + def __init__(self): + self.name = "test_tool" + + def __call__(self, *args, **kwargs): + return "result" + + tool = InputTool() + + # Wrap tool + wrap_tool_with_events(tool) + + # Call wrapped __call__ method directly with args and kwargs + tool.__call__("arg1", "arg2", kwarg1="value1") + + # Verify tool input in events + events = ToolEventEmitter.get_events() + start_event = events[0] + assert "toolInput" in start_event.data + assert "args" in start_event.data["toolInput"] + assert "kwargs" in start_event.data["toolInput"] + + def test_wrap_tool_includes_tool_output(self): + """Test that tool output is included in completion event""" + ToolEventEmitter.clear() + + # Create a real callable class + class OutputTool: + def __init__(self): + self.name = "test_tool" + + def __call__(self, *args, **kwargs): + return "test output" + + tool = OutputTool() + + # Wrap tool + wrap_tool_with_events(tool) + + # Call wrapped __call__ method directly + tool.__call__("arg1") + + # Verify tool output in completion event + events = ToolEventEmitter.get_events() + completion_event = events[1] + assert "toolOutput" in completion_event.data + assert completion_event.data["toolOutput"] == "test output" + + def test_wrap_tool_truncates_long_output(self): + """Test that long tool output is truncated""" + ToolEventEmitter.clear() + + # Create a real callable class with long output + long_output = "x" * 600 + + class LongOutputTool: + def __init__(self): + self.name = "test_tool" + + def __call__(self, *args, **kwargs): + return long_output + + tool = LongOutputTool() + + # Wrap tool + wrap_tool_with_events(tool) + + # Call wrapped __call__ method directly + tool.__call__("arg1") + + # Verify output is truncated + events = ToolEventEmitter.get_events() + completion_event = events[1] + assert len(completion_event.data["toolOutput"]) <= 520 # 500 + "... (truncated)" + assert "truncated" in completion_event.data["toolOutput"] diff --git a/deployment/ecr/gaab-strands-common/test/utils/test_helpers.py b/deployment/ecr/gaab-strands-common/test/utils/test_helpers.py new file mode 100644 index 00000000..e49ec734 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/test/utils/test_helpers.py @@ -0,0 +1,234 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Tests for helper functions +""" + +import logging +import time +from unittest.mock import Mock, patch + +import pytest +from gaab_strands_common.utils.constants import RETRY_CONFIG +from gaab_strands_common.utils.helpers import ( + extract_user_message, + get_file_category_from_extension, + is_supported_file_type, + retry_with_backoff, +) + + +class TestGetFileCategoryFromExtension: + """Tests for get_file_category_from_extension function""" + + def test_image_extensions(self): + """Test that image extensions return 'image' category""" + image_extensions = ["png", "jpeg", "gif", "webp"] + for ext in image_extensions: + assert get_file_category_from_extension(ext) == "image" + # Test case insensitivity + assert get_file_category_from_extension(ext.upper()) == "image" + + def test_document_extensions(self): + """Test that document extensions return 'document' category""" + document_extensions = ["pdf", "csv", "doc", "docx", "xls", "xlsx", "html", "txt", "md"] + for ext in document_extensions: + assert get_file_category_from_extension(ext) == "document" + # Test case insensitivity + assert get_file_category_from_extension(ext.upper()) == "document" + + def test_unknown_extension(self): + """Test that unknown extensions return 'unknown' category""" + assert get_file_category_from_extension("unknown") == "unknown" + assert get_file_category_from_extension("exe") == "unknown" + assert get_file_category_from_extension("") == "unknown" + + +class TestIsSupportedFileType: + """Tests for is_supported_file_type function""" + + def test_supported_image_extensions(self): + """Test that supported image extensions return True""" + supported_images = ["png", "jpeg", "gif", "webp"] + for ext in supported_images: + assert is_supported_file_type(ext) is True + # Test case insensitivity + assert is_supported_file_type(ext.upper()) is True + + def test_supported_document_extensions(self): + """Test that supported document extensions return True""" + supported_documents = ["pdf", "csv", "doc", "docx", "xls", "xlsx", "html", "txt", "md"] + for ext in supported_documents: + assert is_supported_file_type(ext) is True + # Test case insensitivity + assert is_supported_file_type(ext.upper()) is True + + def test_unsupported_extensions(self): + """Test that unsupported extensions return False""" + unsupported_extensions = ["exe", "dmg", "zip", "rar", "unknown"] + for ext in unsupported_extensions: + assert is_supported_file_type(ext) is False + # Test case insensitivity + assert is_supported_file_type(ext.upper()) is False + + def test_empty_extension(self): + """Test that empty extension returns False""" + assert is_supported_file_type("") is False + + +class TestExtractUserMessage: + """Tests for extract_user_message function""" + + def test_valid_message(self): + """Test extracting valid message from payload""" + payload = {"input": "Hello, world!"} + assert extract_user_message(payload) == "Hello, world!" + + def test_valid_message_with_whitespace(self): + """Test extracting message with leading/trailing whitespace""" + payload = {"input": " Hello, world! "} + assert extract_user_message(payload) == "Hello, world!" + + def test_numeric_input(self): + """Test extracting numeric input""" + payload = {"input": 123} + assert extract_user_message(payload) == "123" + + def test_invalid_payload_type(self): + """Test that non-dict payload raises ValueError""" + with pytest.raises(ValueError, match="Payload must be a dictionary"): + extract_user_message("invalid-payload") + + with pytest.raises(ValueError, match="Payload must be a dictionary"): + extract_user_message(None) + + with pytest.raises(ValueError, match="Payload must be a dictionary"): + extract_user_message([]) + + def test_missing_input_field(self): + """Test that missing input field returns error message""" + payload = {"other_field": "value"} + result = extract_user_message(payload) + assert "Please provide your message in the 'input' field" in result + + def test_none_input(self): + """Test that None input returns error message""" + payload = {"input": None} + result = extract_user_message(payload) + assert "Please provide your message in the 'input' field" in result + + def test_empty_string_input(self): + """Test that empty string input returns error message""" + payload = {"input": ""} + result = extract_user_message(payload) + assert "Please provide your message in the 'input' field" in result + + def test_whitespace_only_input(self): + """Test that whitespace-only input returns error message""" + payload = {"input": " "} + result = extract_user_message(payload) + assert "Please provide your message in the 'input' field" in result + + +class TestRetryWithBackoff: + """Tests for retry_with_backoff function""" + + def test_successful_function_call_no_retries(self): + """Test that successful function call returns immediately without retries""" + mock_func = Mock(return_value="success") + result = retry_with_backoff(mock_func) + assert result == "success" + mock_func.assert_called_once() + + @patch("time.sleep") + def test_retry_on_exception(self, mock_sleep): + """Test that function retries on exception""" + mock_func = Mock(side_effect=[Exception("fail"), Exception("fail"), "success"]) + result = retry_with_backoff(mock_func, exception_types=(Exception,)) + assert result == "success" + assert mock_func.call_count == 3 + assert mock_sleep.call_count == 2 # Two retries means two sleeps + + @patch("time.sleep") + def test_max_retries_exceeded(self, mock_sleep): + """Test that function raises exception when max retries exceeded""" + mock_func = Mock(side_effect=Exception("fail")) + with pytest.raises(Exception, match="fail"): + retry_with_backoff(mock_func, max_retries=2, exception_types=(Exception,)) + assert mock_func.call_count == 3 # Initial call + 2 retries + assert mock_sleep.call_count == 2 + + @patch("time.sleep") + def test_retry_condition_met(self, mock_sleep): + """Test that function retries when retry condition is met""" + mock_func = Mock(side_effect=["retry", "retry", "success"]) + retry_condition = lambda result: result == "retry" + result = retry_with_backoff(mock_func, retry_condition=retry_condition) + assert result == "success" + assert mock_func.call_count == 3 + assert mock_sleep.call_count == 2 + + @patch("time.sleep") + def test_retry_condition_not_met(self, mock_sleep): + """Test that function doesn't retry when retry condition is not met""" + mock_func = Mock(return_value="success") + retry_condition = lambda result: result == "retry" + result = retry_with_backoff(mock_func, retry_condition=retry_condition) + assert result == "success" + mock_func.assert_called_once() + mock_sleep.assert_not_called() + + @patch("time.sleep") + def test_retry_condition_still_met_after_max_retries(self, mock_sleep): + """Test that function returns last result when retry condition still met after max retries""" + mock_func = Mock(return_value="retry") + retry_condition = lambda result: result == "retry" + with patch("logging.Logger.warning") as mock_warning: + result = retry_with_backoff(mock_func, retry_condition=retry_condition, max_retries=2) + assert result == "retry" + assert mock_func.call_count == 3 # Initial call + 2 retries + assert mock_sleep.call_count == 2 + mock_warning.assert_called_once() + + def test_default_parameters_from_config(self): + """Test that function uses default parameters from RETRY_CONFIG""" + mock_func = Mock(return_value="success") + with patch( + "gaab_strands_common.utils.helpers.RETRY_CONFIG", + {"max_retries": 1, "initial_delay_ms": 100, "max_delay": 1.0, "back_off_rate": 2}, + ): + result = retry_with_backoff(mock_func) + assert result == "success" + mock_func.assert_called_once() + + @patch("time.sleep") + def test_custom_parameters(self, mock_sleep): + """Test that function uses custom parameters""" + mock_func = Mock(side_effect=[Exception("fail"), "success"]) + result = retry_with_backoff( + mock_func, max_retries=1, base_delay=0.1, max_delay=1.0, exception_types=(Exception,) + ) + assert result == "success" + assert mock_func.call_count == 2 + mock_sleep.assert_called_once_with(0.1) + + @patch("logging.Logger.info") + @patch("time.sleep") + def test_logging_on_retry_success(self, mock_sleep, mock_info): + """Test that function logs success after retries""" + mock_func = Mock(side_effect=[Exception("fail"), "success"]) + result = retry_with_backoff(mock_func, exception_types=(Exception,)) + assert result == "success" + assert mock_info.call_count == 1 # One log for success after retry + mock_sleep.assert_called_once() + + @patch("logging.Logger.error") + @patch("time.sleep") + def test_logging_on_max_retries_exceeded(self, mock_sleep, mock_error): + """Test that function logs error when max retries exceeded""" + mock_func = Mock(side_effect=Exception("fail")) + with pytest.raises(Exception, match="fail"): + retry_with_backoff(mock_func, max_retries=1, exception_types=(Exception,)) + mock_error.assert_called_once() + assert mock_sleep.call_count == 1 diff --git a/deployment/ecr/gaab-strands-common/uv.lock b/deployment/ecr/gaab-strands-common/uv.lock new file mode 100644 index 00000000..6531a620 --- /dev/null +++ b/deployment/ecr/gaab-strands-common/uv.lock @@ -0,0 +1,1265 @@ +version = 1 +revision = 2 +requires-python = ">=3.13" + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "sniffio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c6/78/7d432127c41b50bccba979505f272c16cbcadcc33645d5fa3a738110ae75/anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4", size = 219094, upload-time = "2025-09-23T09:19:12.58Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/b3/9b1a8074496371342ec1e796a96f99c82c945a339cd81a8e73de28b4cf9e/anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc", size = 109097, upload-time = "2025-09-23T09:19:10.601Z" }, +] + +[[package]] +name = "attrs" +version = "25.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, +] + +[[package]] +name = "bedrock-agentcore" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "boto3" }, + { name = "botocore" }, + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, + { name = "urllib3" }, + { name = "uvicorn" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b0/87/4c0bacf09430e559657fc986cbb1003f76d597ab7e7365ab247dbef73940/bedrock_agentcore-0.1.7.tar.gz", hash = "sha256:e518e8f5e6fb5a5a80182db95757a20e32b0ac2b33d0a1909dfafcba950c6356", size = 263080, upload-time = "2025-10-01T16:18:39.255Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/f3/a9d961cfba236dc85f27f2f2c6eab88e12698754aaa02459ba7dfafc5062/bedrock_agentcore-0.1.7-py3-none-any.whl", hash = "sha256:441dde64fea596e9571e47ae37ee3b033e58d8d255018f13bdcde8ae8bef2075", size = 77216, upload-time = "2025-10-01T16:18:38.153Z" }, +] + +[[package]] +name = "black" +version = "25.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "mypy-extensions" }, + { name = "packaging" }, + { name = "pathspec" }, + { name = "platformdirs" }, + { name = "pytokens" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/43/20b5c90612d7bdb2bdbcceeb53d588acca3bb8f0e4c5d5c751a2c8fdd55a/black-25.9.0.tar.gz", hash = "sha256:0474bca9a0dd1b51791fcc507a4e02078a1c63f6d4e4ae5544b9848c7adfb619", size = 648393, upload-time = "2025-09-19T00:27:37.758Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/99/3acfea65f5e79f45472c45f87ec13037b506522719cd9d4ac86484ff51ac/black-25.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0172a012f725b792c358d57fe7b6b6e8e67375dd157f64fa7a3097b3ed3e2175", size = 1742165, upload-time = "2025-09-19T00:34:10.402Z" }, + { url = "https://files.pythonhosted.org/packages/3a/18/799285282c8236a79f25d590f0222dbd6850e14b060dfaa3e720241fd772/black-25.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3bec74ee60f8dfef564b573a96b8930f7b6a538e846123d5ad77ba14a8d7a64f", size = 1581259, upload-time = "2025-09-19T00:32:49.685Z" }, + { url = "https://files.pythonhosted.org/packages/f1/ce/883ec4b6303acdeca93ee06b7622f1fa383c6b3765294824165d49b1a86b/black-25.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b756fc75871cb1bcac5499552d771822fd9db5a2bb8db2a7247936ca48f39831", size = 1655583, upload-time = "2025-09-19T00:30:44.505Z" }, + { url = "https://files.pythonhosted.org/packages/21/17/5c253aa80a0639ccc427a5c7144534b661505ae2b5a10b77ebe13fa25334/black-25.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:846d58e3ce7879ec1ffe816bb9df6d006cd9590515ed5d17db14e17666b2b357", size = 1343428, upload-time = "2025-09-19T00:32:13.839Z" }, + { url = "https://files.pythonhosted.org/packages/1b/46/863c90dcd3f9d41b109b7f19032ae0db021f0b2a81482ba0a1e28c84de86/black-25.9.0-py3-none-any.whl", hash = "sha256:474b34c1342cdc157d307b56c4c65bce916480c4a8f6551fdc6bf9b486a7c4ae", size = 203363, upload-time = "2025-09-19T00:27:35.724Z" }, +] + +[[package]] +name = "boto3" +version = "1.40.51" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, + { name = "jmespath" }, + { name = "s3transfer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/70/02/d3ee174681961dc2ebfe81f326e1b5e1ba338b7ff939e1e94eef76e7c65a/boto3-1.40.51.tar.gz", hash = "sha256:ed1b7750df07b2f2ece0141ff2ed0489db2ec2b5311a956d00a496b05fd4fadb", size = 111571, upload-time = "2025-10-13T19:20:44.594Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/da/67c37721d8f8f3480c03a04e93ef1fae0f7620c9a2d305b3ed8a93600d79/boto3-1.40.51-py3-none-any.whl", hash = "sha256:6aa81b9acb4eff87b5505ae26c948883a2015cc3308ce5168a9df6e00ab815d2", size = 139343, upload-time = "2025-10-13T19:20:42.009Z" }, +] + +[[package]] +name = "botocore" +version = "1.40.51" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jmespath" }, + { name = "python-dateutil" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ca/33/9e197d872adf1eadf8ecc259d0f451143326ff044d236b8971ff8bf980ee/botocore-1.40.51.tar.gz", hash = "sha256:a06de20408c3009e59e8f161a1146f1801d279d0923ab950349154900951bb20", size = 14421749, upload-time = "2025-10-13T19:20:32.491Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/f1/a6d9fdfec74c658cd8d639dc98de415fa42ce3c023602514c67f68e556d9/botocore-1.40.51-py3-none-any.whl", hash = "sha256:d5811a834bbdec6dac540565eab9f5ad954ed439b535686fbc3219e1cbfbfc49", size = 14090941, upload-time = "2025-10-13T19:20:28.165Z" }, +] + +[[package]] +name = "certifi" +version = "2025.10.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4c/5b/b6ce21586237c77ce67d01dc5507039d444b630dd76611bbca2d8e5dcd91/certifi-2025.10.5.tar.gz", hash = "sha256:47c09d31ccf2acf0be3f701ea53595ee7e0b8fa08801c6624be771df09ae7b43", size = 164519, upload-time = "2025-10-05T04:12:15.808Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/37/af0d2ef3967ac0d6113837b44a4f0bfe1328c2b9763bd5b1744520e5cfed/certifi-2025.10.5-py3-none-any.whl", hash = "sha256:0f212c2744a9bb6de0c56639a6f68afe01ecd92d91f14ae897c4fe7bbeeef0de", size = 163286, upload-time = "2025-10-05T04:12:14.03Z" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, + { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, + { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, + { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, + { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, + { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, + { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, + { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, + { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, + { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, + { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, + { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, + { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, + { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, + { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, + { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, + { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, + { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, + { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, + { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, + { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, + { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, + { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, + { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, + { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, + { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, + { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, + { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, + { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "click" +version = "8.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/61/de6cd827efad202d7057d93e0fed9294b96952e188f7384832791c7b2254/click-8.3.0.tar.gz", hash = "sha256:e7b8232224eba16f4ebe410c25ced9f7875cb5f3263ffc93cc3e8da705e229c4", size = 276943, upload-time = "2025-09-18T17:32:23.696Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/d3/9dcc0f5797f070ec8edf30fbadfb200e71d9db6b84d211e3b2085a7589a0/click-8.3.0-py3-none-any.whl", hash = "sha256:9b9f285302c6e3064f4330c05f05b81945b2a39544279343e6e7c5f27a9baddc", size = 107295, upload-time = "2025-09-18T17:32:22.42Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "coverage" +version = "7.10.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/51/26/d22c300112504f5f9a9fd2297ce33c35f3d353e4aeb987c8419453b2a7c2/coverage-7.10.7.tar.gz", hash = "sha256:f4ab143ab113be368a3e9b795f9cd7906c5ef407d6173fe9675a902e1fffc239", size = 827704, upload-time = "2025-09-21T20:03:56.815Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/94/b765c1abcb613d103b64fcf10395f54d69b0ef8be6a0dd9c524384892cc7/coverage-7.10.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:981a651f543f2854abd3b5fcb3263aac581b18209be49863ba575de6edf4c14d", size = 218320, upload-time = "2025-09-21T20:01:56.629Z" }, + { url = "https://files.pythonhosted.org/packages/72/4f/732fff31c119bb73b35236dd333030f32c4bfe909f445b423e6c7594f9a2/coverage-7.10.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:73ab1601f84dc804f7812dc297e93cd99381162da39c47040a827d4e8dafe63b", size = 218575, upload-time = "2025-09-21T20:01:58.203Z" }, + { url = "https://files.pythonhosted.org/packages/87/02/ae7e0af4b674be47566707777db1aa375474f02a1d64b9323e5813a6cdd5/coverage-7.10.7-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a8b6f03672aa6734e700bbcd65ff050fd19cddfec4b031cc8cf1c6967de5a68e", size = 249568, upload-time = "2025-09-21T20:01:59.748Z" }, + { url = "https://files.pythonhosted.org/packages/a2/77/8c6d22bf61921a59bce5471c2f1f7ac30cd4ac50aadde72b8c48d5727902/coverage-7.10.7-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10b6ba00ab1132a0ce4428ff68cf50a25efd6840a42cdf4239c9b99aad83be8b", size = 252174, upload-time = "2025-09-21T20:02:01.192Z" }, + { url = "https://files.pythonhosted.org/packages/b1/20/b6ea4f69bbb52dac0aebd62157ba6a9dddbfe664f5af8122dac296c3ee15/coverage-7.10.7-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c79124f70465a150e89340de5963f936ee97097d2ef76c869708c4248c63ca49", size = 253447, upload-time = "2025-09-21T20:02:02.701Z" }, + { url = "https://files.pythonhosted.org/packages/f9/28/4831523ba483a7f90f7b259d2018fef02cb4d5b90bc7c1505d6e5a84883c/coverage-7.10.7-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:69212fbccdbd5b0e39eac4067e20a4a5256609e209547d86f740d68ad4f04911", size = 249779, upload-time = "2025-09-21T20:02:04.185Z" }, + { url = "https://files.pythonhosted.org/packages/a7/9f/4331142bc98c10ca6436d2d620c3e165f31e6c58d43479985afce6f3191c/coverage-7.10.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7ea7c6c9d0d286d04ed3541747e6597cbe4971f22648b68248f7ddcd329207f0", size = 251604, upload-time = "2025-09-21T20:02:06.034Z" }, + { url = "https://files.pythonhosted.org/packages/ce/60/bda83b96602036b77ecf34e6393a3836365481b69f7ed7079ab85048202b/coverage-7.10.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b9be91986841a75042b3e3243d0b3cb0b2434252b977baaf0cd56e960fe1e46f", size = 249497, upload-time = "2025-09-21T20:02:07.619Z" }, + { url = "https://files.pythonhosted.org/packages/5f/af/152633ff35b2af63977edd835d8e6430f0caef27d171edf2fc76c270ef31/coverage-7.10.7-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:b281d5eca50189325cfe1f365fafade89b14b4a78d9b40b05ddd1fc7d2a10a9c", size = 249350, upload-time = "2025-09-21T20:02:10.34Z" }, + { url = "https://files.pythonhosted.org/packages/9d/71/d92105d122bd21cebba877228990e1646d862e34a98bb3374d3fece5a794/coverage-7.10.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:99e4aa63097ab1118e75a848a28e40d68b08a5e19ce587891ab7fd04475e780f", size = 251111, upload-time = "2025-09-21T20:02:12.122Z" }, + { url = "https://files.pythonhosted.org/packages/a2/9e/9fdb08f4bf476c912f0c3ca292e019aab6712c93c9344a1653986c3fd305/coverage-7.10.7-cp313-cp313-win32.whl", hash = "sha256:dc7c389dce432500273eaf48f410b37886be9208b2dd5710aaf7c57fd442c698", size = 220746, upload-time = "2025-09-21T20:02:13.919Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b1/a75fd25df44eab52d1931e89980d1ada46824c7a3210be0d3c88a44aaa99/coverage-7.10.7-cp313-cp313-win_amd64.whl", hash = "sha256:cac0fdca17b036af3881a9d2729a850b76553f3f716ccb0360ad4dbc06b3b843", size = 221541, upload-time = "2025-09-21T20:02:15.57Z" }, + { url = "https://files.pythonhosted.org/packages/14/3a/d720d7c989562a6e9a14b2c9f5f2876bdb38e9367126d118495b89c99c37/coverage-7.10.7-cp313-cp313-win_arm64.whl", hash = "sha256:4b6f236edf6e2f9ae8fcd1332da4e791c1b6ba0dc16a2dc94590ceccb482e546", size = 220170, upload-time = "2025-09-21T20:02:17.395Z" }, + { url = "https://files.pythonhosted.org/packages/bb/22/e04514bf2a735d8b0add31d2b4ab636fc02370730787c576bb995390d2d5/coverage-7.10.7-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a0ec07fd264d0745ee396b666d47cef20875f4ff2375d7c4f58235886cc1ef0c", size = 219029, upload-time = "2025-09-21T20:02:18.936Z" }, + { url = "https://files.pythonhosted.org/packages/11/0b/91128e099035ece15da3445d9015e4b4153a6059403452d324cbb0a575fa/coverage-7.10.7-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:dd5e856ebb7bfb7672b0086846db5afb4567a7b9714b8a0ebafd211ec7ce6a15", size = 219259, upload-time = "2025-09-21T20:02:20.44Z" }, + { url = "https://files.pythonhosted.org/packages/8b/51/66420081e72801536a091a0c8f8c1f88a5c4bf7b9b1bdc6222c7afe6dc9b/coverage-7.10.7-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f57b2a3c8353d3e04acf75b3fed57ba41f5c0646bbf1d10c7c282291c97936b4", size = 260592, upload-time = "2025-09-21T20:02:22.313Z" }, + { url = "https://files.pythonhosted.org/packages/5d/22/9b8d458c2881b22df3db5bb3e7369e63d527d986decb6c11a591ba2364f7/coverage-7.10.7-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1ef2319dd15a0b009667301a3f84452a4dc6fddfd06b0c5c53ea472d3989fbf0", size = 262768, upload-time = "2025-09-21T20:02:24.287Z" }, + { url = "https://files.pythonhosted.org/packages/f7/08/16bee2c433e60913c610ea200b276e8eeef084b0d200bdcff69920bd5828/coverage-7.10.7-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:83082a57783239717ceb0ad584de3c69cf581b2a95ed6bf81ea66034f00401c0", size = 264995, upload-time = "2025-09-21T20:02:26.133Z" }, + { url = "https://files.pythonhosted.org/packages/20/9d/e53eb9771d154859b084b90201e5221bca7674ba449a17c101a5031d4054/coverage-7.10.7-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:50aa94fb1fb9a397eaa19c0d5ec15a5edd03a47bf1a3a6111a16b36e190cff65", size = 259546, upload-time = "2025-09-21T20:02:27.716Z" }, + { url = "https://files.pythonhosted.org/packages/ad/b0/69bc7050f8d4e56a89fb550a1577d5d0d1db2278106f6f626464067b3817/coverage-7.10.7-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2120043f147bebb41c85b97ac45dd173595ff14f2a584f2963891cbcc3091541", size = 262544, upload-time = "2025-09-21T20:02:29.216Z" }, + { url = "https://files.pythonhosted.org/packages/ef/4b/2514b060dbd1bc0aaf23b852c14bb5818f244c664cb16517feff6bb3a5ab/coverage-7.10.7-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2fafd773231dd0378fdba66d339f84904a8e57a262f583530f4f156ab83863e6", size = 260308, upload-time = "2025-09-21T20:02:31.226Z" }, + { url = "https://files.pythonhosted.org/packages/54/78/7ba2175007c246d75e496f64c06e94122bdb914790a1285d627a918bd271/coverage-7.10.7-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:0b944ee8459f515f28b851728ad224fa2d068f1513ef6b7ff1efafeb2185f999", size = 258920, upload-time = "2025-09-21T20:02:32.823Z" }, + { url = "https://files.pythonhosted.org/packages/c0/b3/fac9f7abbc841409b9a410309d73bfa6cfb2e51c3fada738cb607ce174f8/coverage-7.10.7-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4b583b97ab2e3efe1b3e75248a9b333bd3f8b0b1b8e5b45578e05e5850dfb2c2", size = 261434, upload-time = "2025-09-21T20:02:34.86Z" }, + { url = "https://files.pythonhosted.org/packages/ee/51/a03bec00d37faaa891b3ff7387192cef20f01604e5283a5fabc95346befa/coverage-7.10.7-cp313-cp313t-win32.whl", hash = "sha256:2a78cd46550081a7909b3329e2266204d584866e8d97b898cd7fb5ac8d888b1a", size = 221403, upload-time = "2025-09-21T20:02:37.034Z" }, + { url = "https://files.pythonhosted.org/packages/53/22/3cf25d614e64bf6d8e59c7c669b20d6d940bb337bdee5900b9ca41c820bb/coverage-7.10.7-cp313-cp313t-win_amd64.whl", hash = "sha256:33a5e6396ab684cb43dc7befa386258acb2d7fae7f67330ebb85ba4ea27938eb", size = 222469, upload-time = "2025-09-21T20:02:39.011Z" }, + { url = "https://files.pythonhosted.org/packages/49/a1/00164f6d30d8a01c3c9c48418a7a5be394de5349b421b9ee019f380df2a0/coverage-7.10.7-cp313-cp313t-win_arm64.whl", hash = "sha256:86b0e7308289ddde73d863b7683f596d8d21c7d8664ce1dee061d0bcf3fbb4bb", size = 220731, upload-time = "2025-09-21T20:02:40.939Z" }, + { url = "https://files.pythonhosted.org/packages/23/9c/5844ab4ca6a4dd97a1850e030a15ec7d292b5c5cb93082979225126e35dd/coverage-7.10.7-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b06f260b16ead11643a5a9f955bd4b5fd76c1a4c6796aeade8520095b75de520", size = 218302, upload-time = "2025-09-21T20:02:42.527Z" }, + { url = "https://files.pythonhosted.org/packages/f0/89/673f6514b0961d1f0e20ddc242e9342f6da21eaba3489901b565c0689f34/coverage-7.10.7-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:212f8f2e0612778f09c55dd4872cb1f64a1f2b074393d139278ce902064d5b32", size = 218578, upload-time = "2025-09-21T20:02:44.468Z" }, + { url = "https://files.pythonhosted.org/packages/05/e8/261cae479e85232828fb17ad536765c88dd818c8470aca690b0ac6feeaa3/coverage-7.10.7-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3445258bcded7d4aa630ab8296dea4d3f15a255588dd535f980c193ab6b95f3f", size = 249629, upload-time = "2025-09-21T20:02:46.503Z" }, + { url = "https://files.pythonhosted.org/packages/82/62/14ed6546d0207e6eda876434e3e8475a3e9adbe32110ce896c9e0c06bb9a/coverage-7.10.7-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bb45474711ba385c46a0bfe696c695a929ae69ac636cda8f532be9e8c93d720a", size = 252162, upload-time = "2025-09-21T20:02:48.689Z" }, + { url = "https://files.pythonhosted.org/packages/ff/49/07f00db9ac6478e4358165a08fb41b469a1b053212e8a00cb02f0d27a05f/coverage-7.10.7-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:813922f35bd800dca9994c5971883cbc0d291128a5de6b167c7aa697fcf59360", size = 253517, upload-time = "2025-09-21T20:02:50.31Z" }, + { url = "https://files.pythonhosted.org/packages/a2/59/c5201c62dbf165dfbc91460f6dbbaa85a8b82cfa6131ac45d6c1bfb52deb/coverage-7.10.7-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:93c1b03552081b2a4423091d6fb3787265b8f86af404cff98d1b5342713bdd69", size = 249632, upload-time = "2025-09-21T20:02:51.971Z" }, + { url = "https://files.pythonhosted.org/packages/07/ae/5920097195291a51fb00b3a70b9bbd2edbfe3c84876a1762bd1ef1565ebc/coverage-7.10.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:cc87dd1b6eaf0b848eebb1c86469b9f72a1891cb42ac7adcfbce75eadb13dd14", size = 251520, upload-time = "2025-09-21T20:02:53.858Z" }, + { url = "https://files.pythonhosted.org/packages/b9/3c/a815dde77a2981f5743a60b63df31cb322c944843e57dbd579326625a413/coverage-7.10.7-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:39508ffda4f343c35f3236fe8d1a6634a51f4581226a1262769d7f970e73bffe", size = 249455, upload-time = "2025-09-21T20:02:55.807Z" }, + { url = "https://files.pythonhosted.org/packages/aa/99/f5cdd8421ea656abefb6c0ce92556709db2265c41e8f9fc6c8ae0f7824c9/coverage-7.10.7-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:925a1edf3d810537c5a3abe78ec5530160c5f9a26b1f4270b40e62cc79304a1e", size = 249287, upload-time = "2025-09-21T20:02:57.784Z" }, + { url = "https://files.pythonhosted.org/packages/c3/7a/e9a2da6a1fc5d007dd51fca083a663ab930a8c4d149c087732a5dbaa0029/coverage-7.10.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2c8b9a0636f94c43cd3576811e05b89aa9bc2d0a85137affc544ae5cb0e4bfbd", size = 250946, upload-time = "2025-09-21T20:02:59.431Z" }, + { url = "https://files.pythonhosted.org/packages/ef/5b/0b5799aa30380a949005a353715095d6d1da81927d6dbed5def2200a4e25/coverage-7.10.7-cp314-cp314-win32.whl", hash = "sha256:b7b8288eb7cdd268b0304632da8cb0bb93fadcfec2fe5712f7b9cc8f4d487be2", size = 221009, upload-time = "2025-09-21T20:03:01.324Z" }, + { url = "https://files.pythonhosted.org/packages/da/b0/e802fbb6eb746de006490abc9bb554b708918b6774b722bb3a0e6aa1b7de/coverage-7.10.7-cp314-cp314-win_amd64.whl", hash = "sha256:1ca6db7c8807fb9e755d0379ccc39017ce0a84dcd26d14b5a03b78563776f681", size = 221804, upload-time = "2025-09-21T20:03:03.4Z" }, + { url = "https://files.pythonhosted.org/packages/9e/e8/71d0c8e374e31f39e3389bb0bd19e527d46f00ea8571ec7ec8fd261d8b44/coverage-7.10.7-cp314-cp314-win_arm64.whl", hash = "sha256:097c1591f5af4496226d5783d036bf6fd6cd0cbc132e071b33861de756efb880", size = 220384, upload-time = "2025-09-21T20:03:05.111Z" }, + { url = "https://files.pythonhosted.org/packages/62/09/9a5608d319fa3eba7a2019addeacb8c746fb50872b57a724c9f79f146969/coverage-7.10.7-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:a62c6ef0d50e6de320c270ff91d9dd0a05e7250cac2a800b7784bae474506e63", size = 219047, upload-time = "2025-09-21T20:03:06.795Z" }, + { url = "https://files.pythonhosted.org/packages/f5/6f/f58d46f33db9f2e3647b2d0764704548c184e6f5e014bef528b7f979ef84/coverage-7.10.7-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:9fa6e4dd51fe15d8738708a973470f67a855ca50002294852e9571cdbd9433f2", size = 219266, upload-time = "2025-09-21T20:03:08.495Z" }, + { url = "https://files.pythonhosted.org/packages/74/5c/183ffc817ba68e0b443b8c934c8795553eb0c14573813415bd59941ee165/coverage-7.10.7-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:8fb190658865565c549b6b4706856d6a7b09302c797eb2cf8e7fe9dabb043f0d", size = 260767, upload-time = "2025-09-21T20:03:10.172Z" }, + { url = "https://files.pythonhosted.org/packages/0f/48/71a8abe9c1ad7e97548835e3cc1adbf361e743e9d60310c5f75c9e7bf847/coverage-7.10.7-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:affef7c76a9ef259187ef31599a9260330e0335a3011732c4b9effa01e1cd6e0", size = 262931, upload-time = "2025-09-21T20:03:11.861Z" }, + { url = "https://files.pythonhosted.org/packages/84/fd/193a8fb132acfc0a901f72020e54be5e48021e1575bb327d8ee1097a28fd/coverage-7.10.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6e16e07d85ca0cf8bafe5f5d23a0b850064e8e945d5677492b06bbe6f09cc699", size = 265186, upload-time = "2025-09-21T20:03:13.539Z" }, + { url = "https://files.pythonhosted.org/packages/b1/8f/74ecc30607dd95ad50e3034221113ccb1c6d4e8085cc761134782995daae/coverage-7.10.7-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:03ffc58aacdf65d2a82bbeb1ffe4d01ead4017a21bfd0454983b88ca73af94b9", size = 259470, upload-time = "2025-09-21T20:03:15.584Z" }, + { url = "https://files.pythonhosted.org/packages/0f/55/79ff53a769f20d71b07023ea115c9167c0bb56f281320520cf64c5298a96/coverage-7.10.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1b4fd784344d4e52647fd7857b2af5b3fbe6c239b0b5fa63e94eb67320770e0f", size = 262626, upload-time = "2025-09-21T20:03:17.673Z" }, + { url = "https://files.pythonhosted.org/packages/88/e2/dac66c140009b61ac3fc13af673a574b00c16efdf04f9b5c740703e953c0/coverage-7.10.7-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:0ebbaddb2c19b71912c6f2518e791aa8b9f054985a0769bdb3a53ebbc765c6a1", size = 260386, upload-time = "2025-09-21T20:03:19.36Z" }, + { url = "https://files.pythonhosted.org/packages/a2/f1/f48f645e3f33bb9ca8a496bc4a9671b52f2f353146233ebd7c1df6160440/coverage-7.10.7-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:a2d9a3b260cc1d1dbdb1c582e63ddcf5363426a1a68faa0f5da28d8ee3c722a0", size = 258852, upload-time = "2025-09-21T20:03:21.007Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3b/8442618972c51a7affeead957995cfa8323c0c9bcf8fa5a027421f720ff4/coverage-7.10.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a3cc8638b2480865eaa3926d192e64ce6c51e3d29c849e09d5b4ad95efae5399", size = 261534, upload-time = "2025-09-21T20:03:23.12Z" }, + { url = "https://files.pythonhosted.org/packages/b2/dc/101f3fa3a45146db0cb03f5b4376e24c0aac818309da23e2de0c75295a91/coverage-7.10.7-cp314-cp314t-win32.whl", hash = "sha256:67f8c5cbcd3deb7a60b3345dffc89a961a484ed0af1f6f73de91705cc6e31235", size = 221784, upload-time = "2025-09-21T20:03:24.769Z" }, + { url = "https://files.pythonhosted.org/packages/4c/a1/74c51803fc70a8a40d7346660379e144be772bab4ac7bb6e6b905152345c/coverage-7.10.7-cp314-cp314t-win_amd64.whl", hash = "sha256:e1ed71194ef6dea7ed2d5cb5f7243d4bcd334bfb63e59878519be558078f848d", size = 222905, upload-time = "2025-09-21T20:03:26.93Z" }, + { url = "https://files.pythonhosted.org/packages/12/65/f116a6d2127df30bcafbceef0302d8a64ba87488bf6f73a6d8eebf060873/coverage-7.10.7-cp314-cp314t-win_arm64.whl", hash = "sha256:7fe650342addd8524ca63d77b2362b02345e5f1a093266787d210c70a50b471a", size = 220922, upload-time = "2025-09-21T20:03:28.672Z" }, + { url = "https://files.pythonhosted.org/packages/ec/16/114df1c291c22cac3b0c127a73e0af5c12ed7bbb6558d310429a0ae24023/coverage-7.10.7-py3-none-any.whl", hash = "sha256:f7941f6f2fe6dd6807a1208737b8a0cbcf1cc6d7b07d24998ad2d63590868260", size = 209952, upload-time = "2025-09-21T20:03:53.918Z" }, +] + +[[package]] +name = "cryptography" +version = "46.0.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/33/c00162f49c0e2fe8064a62cb92b93e50c74a72bc370ab92f86112b33ff62/cryptography-46.0.3.tar.gz", hash = "sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1", size = 749258, upload-time = "2025-10-15T23:18:31.74Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/42/9c391dd801d6cf0d561b5890549d4b27bafcc53b39c31a817e69d87c625b/cryptography-46.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a", size = 7225004, upload-time = "2025-10-15T23:16:52.239Z" }, + { url = "https://files.pythonhosted.org/packages/1c/67/38769ca6b65f07461eb200e85fc1639b438bdc667be02cf7f2cd6a64601c/cryptography-46.0.3-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc", size = 4296667, upload-time = "2025-10-15T23:16:54.369Z" }, + { url = "https://files.pythonhosted.org/packages/5c/49/498c86566a1d80e978b42f0d702795f69887005548c041636df6ae1ca64c/cryptography-46.0.3-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d", size = 4450807, upload-time = "2025-10-15T23:16:56.414Z" }, + { url = "https://files.pythonhosted.org/packages/4b/0a/863a3604112174c8624a2ac3c038662d9e59970c7f926acdcfaed8d61142/cryptography-46.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb", size = 4299615, upload-time = "2025-10-15T23:16:58.442Z" }, + { url = "https://files.pythonhosted.org/packages/64/02/b73a533f6b64a69f3cd3872acb6ebc12aef924d8d103133bb3ea750dc703/cryptography-46.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849", size = 4016800, upload-time = "2025-10-15T23:17:00.378Z" }, + { url = "https://files.pythonhosted.org/packages/25/d5/16e41afbfa450cde85a3b7ec599bebefaef16b5c6ba4ec49a3532336ed72/cryptography-46.0.3-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8", size = 4984707, upload-time = "2025-10-15T23:17:01.98Z" }, + { url = "https://files.pythonhosted.org/packages/c9/56/e7e69b427c3878352c2fb9b450bd0e19ed552753491d39d7d0a2f5226d41/cryptography-46.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec", size = 4482541, upload-time = "2025-10-15T23:17:04.078Z" }, + { url = "https://files.pythonhosted.org/packages/78/f6/50736d40d97e8483172f1bb6e698895b92a223dba513b0ca6f06b2365339/cryptography-46.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91", size = 4299464, upload-time = "2025-10-15T23:17:05.483Z" }, + { url = "https://files.pythonhosted.org/packages/00/de/d8e26b1a855f19d9994a19c702fa2e93b0456beccbcfe437eda00e0701f2/cryptography-46.0.3-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e", size = 4950838, upload-time = "2025-10-15T23:17:07.425Z" }, + { url = "https://files.pythonhosted.org/packages/8f/29/798fc4ec461a1c9e9f735f2fc58741b0daae30688f41b2497dcbc9ed1355/cryptography-46.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926", size = 4481596, upload-time = "2025-10-15T23:17:09.343Z" }, + { url = "https://files.pythonhosted.org/packages/15/8d/03cd48b20a573adfff7652b76271078e3045b9f49387920e7f1f631d125e/cryptography-46.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71", size = 4426782, upload-time = "2025-10-15T23:17:11.22Z" }, + { url = "https://files.pythonhosted.org/packages/fa/b1/ebacbfe53317d55cf33165bda24c86523497a6881f339f9aae5c2e13e57b/cryptography-46.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac", size = 4698381, upload-time = "2025-10-15T23:17:12.829Z" }, + { url = "https://files.pythonhosted.org/packages/96/92/8a6a9525893325fc057a01f654d7efc2c64b9de90413adcf605a85744ff4/cryptography-46.0.3-cp311-abi3-win32.whl", hash = "sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018", size = 3055988, upload-time = "2025-10-15T23:17:14.65Z" }, + { url = "https://files.pythonhosted.org/packages/7e/bf/80fbf45253ea585a1e492a6a17efcb93467701fa79e71550a430c5e60df0/cryptography-46.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb", size = 3514451, upload-time = "2025-10-15T23:17:16.142Z" }, + { url = "https://files.pythonhosted.org/packages/2e/af/9b302da4c87b0beb9db4e756386a7c6c5b8003cd0e742277888d352ae91d/cryptography-46.0.3-cp311-abi3-win_arm64.whl", hash = "sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c", size = 2928007, upload-time = "2025-10-15T23:17:18.04Z" }, + { url = "https://files.pythonhosted.org/packages/f5/e2/a510aa736755bffa9d2f75029c229111a1d02f8ecd5de03078f4c18d91a3/cryptography-46.0.3-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217", size = 7158012, upload-time = "2025-10-15T23:17:19.982Z" }, + { url = "https://files.pythonhosted.org/packages/73/dc/9aa866fbdbb95b02e7f9d086f1fccfeebf8953509b87e3f28fff927ff8a0/cryptography-46.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5", size = 4288728, upload-time = "2025-10-15T23:17:21.527Z" }, + { url = "https://files.pythonhosted.org/packages/c5/fd/bc1daf8230eaa075184cbbf5f8cd00ba9db4fd32d63fb83da4671b72ed8a/cryptography-46.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715", size = 4435078, upload-time = "2025-10-15T23:17:23.042Z" }, + { url = "https://files.pythonhosted.org/packages/82/98/d3bd5407ce4c60017f8ff9e63ffee4200ab3e23fe05b765cab805a7db008/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54", size = 4293460, upload-time = "2025-10-15T23:17:24.885Z" }, + { url = "https://files.pythonhosted.org/packages/26/e9/e23e7900983c2b8af7a08098db406cf989d7f09caea7897e347598d4cd5b/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459", size = 3995237, upload-time = "2025-10-15T23:17:26.449Z" }, + { url = "https://files.pythonhosted.org/packages/91/15/af68c509d4a138cfe299d0d7ddb14afba15233223ebd933b4bbdbc7155d3/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422", size = 4967344, upload-time = "2025-10-15T23:17:28.06Z" }, + { url = "https://files.pythonhosted.org/packages/ca/e3/8643d077c53868b681af077edf6b3cb58288b5423610f21c62aadcbe99f4/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7", size = 4466564, upload-time = "2025-10-15T23:17:29.665Z" }, + { url = "https://files.pythonhosted.org/packages/0e/43/c1e8726fa59c236ff477ff2b5dc071e54b21e5a1e51aa2cee1676f1c986f/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044", size = 4292415, upload-time = "2025-10-15T23:17:31.686Z" }, + { url = "https://files.pythonhosted.org/packages/42/f9/2f8fefdb1aee8a8e3256a0568cffc4e6d517b256a2fe97a029b3f1b9fe7e/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665", size = 4931457, upload-time = "2025-10-15T23:17:33.478Z" }, + { url = "https://files.pythonhosted.org/packages/79/30/9b54127a9a778ccd6d27c3da7563e9f2d341826075ceab89ae3b41bf5be2/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3", size = 4466074, upload-time = "2025-10-15T23:17:35.158Z" }, + { url = "https://files.pythonhosted.org/packages/ac/68/b4f4a10928e26c941b1b6a179143af9f4d27d88fe84a6a3c53592d2e76bf/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20", size = 4420569, upload-time = "2025-10-15T23:17:37.188Z" }, + { url = "https://files.pythonhosted.org/packages/a3/49/3746dab4c0d1979888f125226357d3262a6dd40e114ac29e3d2abdf1ec55/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de", size = 4681941, upload-time = "2025-10-15T23:17:39.236Z" }, + { url = "https://files.pythonhosted.org/packages/fd/30/27654c1dbaf7e4a3531fa1fc77986d04aefa4d6d78259a62c9dc13d7ad36/cryptography-46.0.3-cp314-cp314t-win32.whl", hash = "sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914", size = 3022339, upload-time = "2025-10-15T23:17:40.888Z" }, + { url = "https://files.pythonhosted.org/packages/f6/30/640f34ccd4d2a1bc88367b54b926b781b5a018d65f404d409aba76a84b1c/cryptography-46.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db", size = 3494315, upload-time = "2025-10-15T23:17:42.769Z" }, + { url = "https://files.pythonhosted.org/packages/ba/8b/88cc7e3bd0a8e7b861f26981f7b820e1f46aa9d26cc482d0feba0ecb4919/cryptography-46.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21", size = 2919331, upload-time = "2025-10-15T23:17:44.468Z" }, + { url = "https://files.pythonhosted.org/packages/fd/23/45fe7f376a7df8daf6da3556603b36f53475a99ce4faacb6ba2cf3d82021/cryptography-46.0.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936", size = 7218248, upload-time = "2025-10-15T23:17:46.294Z" }, + { url = "https://files.pythonhosted.org/packages/27/32/b68d27471372737054cbd34c84981f9edbc24fe67ca225d389799614e27f/cryptography-46.0.3-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683", size = 4294089, upload-time = "2025-10-15T23:17:48.269Z" }, + { url = "https://files.pythonhosted.org/packages/26/42/fa8389d4478368743e24e61eea78846a0006caffaf72ea24a15159215a14/cryptography-46.0.3-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d", size = 4440029, upload-time = "2025-10-15T23:17:49.837Z" }, + { url = "https://files.pythonhosted.org/packages/5f/eb/f483db0ec5ac040824f269e93dd2bd8a21ecd1027e77ad7bdf6914f2fd80/cryptography-46.0.3-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0", size = 4297222, upload-time = "2025-10-15T23:17:51.357Z" }, + { url = "https://files.pythonhosted.org/packages/fd/cf/da9502c4e1912cb1da3807ea3618a6829bee8207456fbbeebc361ec38ba3/cryptography-46.0.3-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc", size = 4012280, upload-time = "2025-10-15T23:17:52.964Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8f/9adb86b93330e0df8b3dcf03eae67c33ba89958fc2e03862ef1ac2b42465/cryptography-46.0.3-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3", size = 4978958, upload-time = "2025-10-15T23:17:54.965Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a0/5fa77988289c34bdb9f913f5606ecc9ada1adb5ae870bd0d1054a7021cc4/cryptography-46.0.3-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971", size = 4473714, upload-time = "2025-10-15T23:17:56.754Z" }, + { url = "https://files.pythonhosted.org/packages/14/e5/fc82d72a58d41c393697aa18c9abe5ae1214ff6f2a5c18ac470f92777895/cryptography-46.0.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac", size = 4296970, upload-time = "2025-10-15T23:17:58.588Z" }, + { url = "https://files.pythonhosted.org/packages/78/06/5663ed35438d0b09056973994f1aec467492b33bd31da36e468b01ec1097/cryptography-46.0.3-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04", size = 4940236, upload-time = "2025-10-15T23:18:00.897Z" }, + { url = "https://files.pythonhosted.org/packages/fc/59/873633f3f2dcd8a053b8dd1d38f783043b5fce589c0f6988bf55ef57e43e/cryptography-46.0.3-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506", size = 4472642, upload-time = "2025-10-15T23:18:02.749Z" }, + { url = "https://files.pythonhosted.org/packages/3d/39/8e71f3930e40f6877737d6f69248cf74d4e34b886a3967d32f919cc50d3b/cryptography-46.0.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963", size = 4423126, upload-time = "2025-10-15T23:18:04.85Z" }, + { url = "https://files.pythonhosted.org/packages/cd/c7/f65027c2810e14c3e7268353b1681932b87e5a48e65505d8cc17c99e36ae/cryptography-46.0.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4", size = 4686573, upload-time = "2025-10-15T23:18:06.908Z" }, + { url = "https://files.pythonhosted.org/packages/0a/6e/1c8331ddf91ca4730ab3086a0f1be19c65510a33b5a441cb334e7a2d2560/cryptography-46.0.3-cp38-abi3-win32.whl", hash = "sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df", size = 3036695, upload-time = "2025-10-15T23:18:08.672Z" }, + { url = "https://files.pythonhosted.org/packages/90/45/b0d691df20633eff80955a0fc7695ff9051ffce8b69741444bd9ed7bd0db/cryptography-46.0.3-cp38-abi3-win_amd64.whl", hash = "sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f", size = 3501720, upload-time = "2025-10-15T23:18:10.632Z" }, + { url = "https://files.pythonhosted.org/packages/e8/cb/2da4cc83f5edb9c3257d09e1e7ab7b23f049c7962cae8d842bbef0a9cec9/cryptography-46.0.3-cp38-abi3-win_arm64.whl", hash = "sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372", size = 2918740, upload-time = "2025-10-15T23:18:12.277Z" }, +] + +[[package]] +name = "docstring-parser" +version = "0.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/9d/c3b43da9515bd270df0f80548d9944e389870713cc1fe2b8fb35fe2bcefd/docstring_parser-0.17.0.tar.gz", hash = "sha256:583de4a309722b3315439bb31d64ba3eebada841f2e2cee23b99df001434c912", size = 27442, upload-time = "2025-07-21T07:35:01.868Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/55/e2/2537ebcff11c1ee1ff17d8d0b6f4db75873e3b0fb32c2d4a2ee31ecb310a/docstring_parser-0.17.0-py3-none-any.whl", hash = "sha256:cf2569abd23dce8099b300f9b4fa8191e9582dda731fd533daf54c4551658708", size = 36896, upload-time = "2025-07-21T07:35:00.684Z" }, +] + +[[package]] +name = "gaab-strands-common" +version = "0.1.0" +source = { editable = "." } +dependencies = [ + { name = "bedrock-agentcore" }, + { name = "boto3" }, + { name = "pydantic" }, + { name = "strands-agents" }, +] + +[package.dev-dependencies] +dev = [ + { name = "black" }, + { name = "isort" }, + { name = "moto" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "pytest-cov" }, +] + +[package.metadata] +requires-dist = [ + { name = "bedrock-agentcore", specifier = ">=0.1.0" }, + { name = "boto3", specifier = ">=1.34.0" }, + { name = "pydantic", specifier = ">=2.0.0" }, + { name = "strands-agents", specifier = ">=1.10.0" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "black", specifier = ">=24.0.0" }, + { name = "isort", specifier = ">=5.13.0" }, + { name = "moto", specifier = ">=5.1.0" }, + { name = "pytest", specifier = ">=8.0.0" }, + { name = "pytest-asyncio", specifier = ">=0.23.0" }, + { name = "pytest-cov", specifier = ">=4.1.0" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "httpx-sse" +version = "0.4.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/4c/751061ffa58615a32c31b2d82e8482be8dd4a89154f003147acee90f2be9/httpx_sse-0.4.3.tar.gz", hash = "sha256:9b1ed0127459a66014aec3c56bebd93da3c1bc8bb6618c8082039a44889a755d", size = 15943, upload-time = "2025-10-10T21:48:22.271Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/fd/6668e5aec43ab844de6fc74927e155a3b37bf40d7c3790e49fc0406b6578/httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc", size = 8960, upload-time = "2025-10-10T21:48:21.158Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, +] + +[[package]] +name = "isort" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/63/53/4f3c058e3bace40282876f9b553343376ee687f3c35a525dc79dbd450f88/isort-7.0.0.tar.gz", hash = "sha256:5513527951aadb3ac4292a41a16cbc50dd1642432f5e8c20057d414bdafb4187", size = 805049, upload-time = "2025-10-11T13:30:59.107Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/ed/e3705d6d02b4f7aea715a353c8ce193efd0b5db13e204df895d38734c244/isort-7.0.0-py3-none-any.whl", hash = "sha256:1bcabac8bc3c36c7fb7b98a76c8abb18e0f841a3ba81decac7691008592499c1", size = 94672, upload-time = "2025-10-11T13:30:57.665Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/2a/e867e8531cf3e36b41201936b7fa7ba7b5702dbef42922193f05c8976cd6/jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe", size = 25843, upload-time = "2022-06-17T18:00:12.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256, upload-time = "2022-06-17T18:00:10.251Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313, upload-time = "2025-09-27T18:37:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/2f/907b9c7bbba283e68f20259574b13d005c121a0fa4c175f9bed27c4597ff/markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795", size = 11622, upload-time = "2025-09-27T18:36:41.777Z" }, + { url = "https://files.pythonhosted.org/packages/9c/d9/5f7756922cdd676869eca1c4e3c0cd0df60ed30199ffd775e319089cb3ed/markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219", size = 12029, upload-time = "2025-09-27T18:36:43.257Z" }, + { url = "https://files.pythonhosted.org/packages/00/07/575a68c754943058c78f30db02ee03a64b3c638586fba6a6dd56830b30a3/markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6", size = 24374, upload-time = "2025-09-27T18:36:44.508Z" }, + { url = "https://files.pythonhosted.org/packages/a9/21/9b05698b46f218fc0e118e1f8168395c65c8a2c750ae2bab54fc4bd4e0e8/markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676", size = 22980, upload-time = "2025-09-27T18:36:45.385Z" }, + { url = "https://files.pythonhosted.org/packages/7f/71/544260864f893f18b6827315b988c146b559391e6e7e8f7252839b1b846a/markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9", size = 21990, upload-time = "2025-09-27T18:36:46.916Z" }, + { url = "https://files.pythonhosted.org/packages/c2/28/b50fc2f74d1ad761af2f5dcce7492648b983d00a65b8c0e0cb457c82ebbe/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1", size = 23784, upload-time = "2025-09-27T18:36:47.884Z" }, + { url = "https://files.pythonhosted.org/packages/ed/76/104b2aa106a208da8b17a2fb72e033a5a9d7073c68f7e508b94916ed47a9/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc", size = 21588, upload-time = "2025-09-27T18:36:48.82Z" }, + { url = "https://files.pythonhosted.org/packages/b5/99/16a5eb2d140087ebd97180d95249b00a03aa87e29cc224056274f2e45fd6/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12", size = 23041, upload-time = "2025-09-27T18:36:49.797Z" }, + { url = "https://files.pythonhosted.org/packages/19/bc/e7140ed90c5d61d77cea142eed9f9c303f4c4806f60a1044c13e3f1471d0/markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed", size = 14543, upload-time = "2025-09-27T18:36:51.584Z" }, + { url = "https://files.pythonhosted.org/packages/05/73/c4abe620b841b6b791f2edc248f556900667a5a1cf023a6646967ae98335/markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5", size = 15113, upload-time = "2025-09-27T18:36:52.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/3a/fa34a0f7cfef23cf9500d68cb7c32dd64ffd58a12b09225fb03dd37d5b80/markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485", size = 13911, upload-time = "2025-09-27T18:36:53.513Z" }, + { url = "https://files.pythonhosted.org/packages/e4/d7/e05cd7efe43a88a17a37b3ae96e79a19e846f3f456fe79c57ca61356ef01/markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73", size = 11658, upload-time = "2025-09-27T18:36:54.819Z" }, + { url = "https://files.pythonhosted.org/packages/99/9e/e412117548182ce2148bdeacdda3bb494260c0b0184360fe0d56389b523b/markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37", size = 12066, upload-time = "2025-09-27T18:36:55.714Z" }, + { url = "https://files.pythonhosted.org/packages/bc/e6/fa0ffcda717ef64a5108eaa7b4f5ed28d56122c9a6d70ab8b72f9f715c80/markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19", size = 25639, upload-time = "2025-09-27T18:36:56.908Z" }, + { url = "https://files.pythonhosted.org/packages/96/ec/2102e881fe9d25fc16cb4b25d5f5cde50970967ffa5dddafdb771237062d/markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025", size = 23569, upload-time = "2025-09-27T18:36:57.913Z" }, + { url = "https://files.pythonhosted.org/packages/4b/30/6f2fce1f1f205fc9323255b216ca8a235b15860c34b6798f810f05828e32/markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6", size = 23284, upload-time = "2025-09-27T18:36:58.833Z" }, + { url = "https://files.pythonhosted.org/packages/58/47/4a0ccea4ab9f5dcb6f79c0236d954acb382202721e704223a8aafa38b5c8/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f", size = 24801, upload-time = "2025-09-27T18:36:59.739Z" }, + { url = "https://files.pythonhosted.org/packages/6a/70/3780e9b72180b6fecb83a4814d84c3bf4b4ae4bf0b19c27196104149734c/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb", size = 22769, upload-time = "2025-09-27T18:37:00.719Z" }, + { url = "https://files.pythonhosted.org/packages/98/c5/c03c7f4125180fc215220c035beac6b9cb684bc7a067c84fc69414d315f5/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009", size = 23642, upload-time = "2025-09-27T18:37:01.673Z" }, + { url = "https://files.pythonhosted.org/packages/80/d6/2d1b89f6ca4bff1036499b1e29a1d02d282259f3681540e16563f27ebc23/markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354", size = 14612, upload-time = "2025-09-27T18:37:02.639Z" }, + { url = "https://files.pythonhosted.org/packages/2b/98/e48a4bfba0a0ffcf9925fe2d69240bfaa19c6f7507b8cd09c70684a53c1e/markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218", size = 15200, upload-time = "2025-09-27T18:37:03.582Z" }, + { url = "https://files.pythonhosted.org/packages/0e/72/e3cc540f351f316e9ed0f092757459afbc595824ca724cbc5a5d4263713f/markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287", size = 13973, upload-time = "2025-09-27T18:37:04.929Z" }, + { url = "https://files.pythonhosted.org/packages/33/8a/8e42d4838cd89b7dde187011e97fe6c3af66d8c044997d2183fbd6d31352/markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe", size = 11619, upload-time = "2025-09-27T18:37:06.342Z" }, + { url = "https://files.pythonhosted.org/packages/b5/64/7660f8a4a8e53c924d0fa05dc3a55c9cee10bbd82b11c5afb27d44b096ce/markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026", size = 12029, upload-time = "2025-09-27T18:37:07.213Z" }, + { url = "https://files.pythonhosted.org/packages/da/ef/e648bfd021127bef5fa12e1720ffed0c6cbb8310c8d9bea7266337ff06de/markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737", size = 24408, upload-time = "2025-09-27T18:37:09.572Z" }, + { url = "https://files.pythonhosted.org/packages/41/3c/a36c2450754618e62008bf7435ccb0f88053e07592e6028a34776213d877/markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97", size = 23005, upload-time = "2025-09-27T18:37:10.58Z" }, + { url = "https://files.pythonhosted.org/packages/bc/20/b7fdf89a8456b099837cd1dc21974632a02a999ec9bf7ca3e490aacd98e7/markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d", size = 22048, upload-time = "2025-09-27T18:37:11.547Z" }, + { url = "https://files.pythonhosted.org/packages/9a/a7/591f592afdc734f47db08a75793a55d7fbcc6902a723ae4cfbab61010cc5/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda", size = 23821, upload-time = "2025-09-27T18:37:12.48Z" }, + { url = "https://files.pythonhosted.org/packages/7d/33/45b24e4f44195b26521bc6f1a82197118f74df348556594bd2262bda1038/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf", size = 21606, upload-time = "2025-09-27T18:37:13.485Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0e/53dfaca23a69fbfbbf17a4b64072090e70717344c52eaaaa9c5ddff1e5f0/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe", size = 23043, upload-time = "2025-09-27T18:37:14.408Z" }, + { url = "https://files.pythonhosted.org/packages/46/11/f333a06fc16236d5238bfe74daccbca41459dcd8d1fa952e8fbd5dccfb70/markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9", size = 14747, upload-time = "2025-09-27T18:37:15.36Z" }, + { url = "https://files.pythonhosted.org/packages/28/52/182836104b33b444e400b14f797212f720cbc9ed6ba34c800639d154e821/markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581", size = 15341, upload-time = "2025-09-27T18:37:16.496Z" }, + { url = "https://files.pythonhosted.org/packages/6f/18/acf23e91bd94fd7b3031558b1f013adfa21a8e407a3fdb32745538730382/markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4", size = 14073, upload-time = "2025-09-27T18:37:17.476Z" }, + { url = "https://files.pythonhosted.org/packages/3c/f0/57689aa4076e1b43b15fdfa646b04653969d50cf30c32a102762be2485da/markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab", size = 11661, upload-time = "2025-09-27T18:37:18.453Z" }, + { url = "https://files.pythonhosted.org/packages/89/c3/2e67a7ca217c6912985ec766c6393b636fb0c2344443ff9d91404dc4c79f/markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175", size = 12069, upload-time = "2025-09-27T18:37:19.332Z" }, + { url = "https://files.pythonhosted.org/packages/f0/00/be561dce4e6ca66b15276e184ce4b8aec61fe83662cce2f7d72bd3249d28/markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634", size = 25670, upload-time = "2025-09-27T18:37:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/50/09/c419f6f5a92e5fadde27efd190eca90f05e1261b10dbd8cbcb39cd8ea1dc/markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50", size = 23598, upload-time = "2025-09-27T18:37:21.177Z" }, + { url = "https://files.pythonhosted.org/packages/22/44/a0681611106e0b2921b3033fc19bc53323e0b50bc70cffdd19f7d679bb66/markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e", size = 23261, upload-time = "2025-09-27T18:37:22.167Z" }, + { url = "https://files.pythonhosted.org/packages/5f/57/1b0b3f100259dc9fffe780cfb60d4be71375510e435efec3d116b6436d43/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5", size = 24835, upload-time = "2025-09-27T18:37:23.296Z" }, + { url = "https://files.pythonhosted.org/packages/26/6a/4bf6d0c97c4920f1597cc14dd720705eca0bf7c787aebc6bb4d1bead5388/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523", size = 22733, upload-time = "2025-09-27T18:37:24.237Z" }, + { url = "https://files.pythonhosted.org/packages/14/c7/ca723101509b518797fedc2fdf79ba57f886b4aca8a7d31857ba3ee8281f/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc", size = 23672, upload-time = "2025-09-27T18:37:25.271Z" }, + { url = "https://files.pythonhosted.org/packages/fb/df/5bd7a48c256faecd1d36edc13133e51397e41b73bb77e1a69deab746ebac/markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d", size = 14819, upload-time = "2025-09-27T18:37:26.285Z" }, + { url = "https://files.pythonhosted.org/packages/1a/8a/0402ba61a2f16038b48b39bccca271134be00c5c9f0f623208399333c448/markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9", size = 15426, upload-time = "2025-09-27T18:37:27.316Z" }, + { url = "https://files.pythonhosted.org/packages/70/bc/6f1c2f612465f5fa89b95bead1f44dcb607670fd42891d8fdcd5d039f4f4/markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa", size = 14146, upload-time = "2025-09-27T18:37:28.327Z" }, +] + +[[package]] +name = "mcp" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "httpx" }, + { name = "httpx-sse" }, + { name = "jsonschema" }, + { name = "pydantic" }, + { name = "pydantic-settings" }, + { name = "python-multipart" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "sse-starlette" }, + { name = "starlette" }, + { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/79/5724a540df19e192e8606c543cdcf162de8eb435077520cca150f7365ec0/mcp-1.17.0.tar.gz", hash = "sha256:1b57fabf3203240ccc48e39859faf3ae1ccb0b571ff798bbedae800c73c6df90", size = 477951, upload-time = "2025-10-10T12:16:44.519Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/72/3751feae343a5ad07959df713907b5c3fbaed269d697a14b0c449080cf2e/mcp-1.17.0-py3-none-any.whl", hash = "sha256:0660ef275cada7a545af154db3082f176cf1d2681d5e35ae63e014faf0a35d40", size = 167737, upload-time = "2025-10-10T12:16:42.863Z" }, +] + +[[package]] +name = "moto" +version = "5.1.14" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "boto3" }, + { name = "botocore" }, + { name = "cryptography" }, + { name = "jinja2" }, + { name = "python-dateutil" }, + { name = "requests" }, + { name = "responses" }, + { name = "werkzeug" }, + { name = "xmltodict" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/d9/ec94955a1b14ef45ccbda81f2256b30bf1f21ae5c5739fca14130bb1f048/moto-5.1.14.tar.gz", hash = "sha256:450690abb0b152fea7f93e497ac2172f15d8a838b15f22b514db801a6b857ae4", size = 7264025, upload-time = "2025-10-05T13:32:38.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/a0/4c5955187853536c7d337709074a5f3ef391654a32a3379096b2d16bfd9b/moto-5.1.14-py3-none-any.whl", hash = "sha256:b9767848953beaf6650f1fd91615a3bcef84d93bd00603fa64dae38c656548e8", size = 5384022, upload-time = "2025-10-05T13:32:35.763Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/04/05040d7ce33a907a2a02257e601992f0cdf11c73b33f13c4492bf6c3d6d5/opentelemetry_api-1.37.0.tar.gz", hash = "sha256:540735b120355bd5112738ea53621f8d5edb35ebcd6fe21ada3ab1c61d1cd9a7", size = 64923, upload-time = "2025-09-11T10:29:01.662Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/48/28ed9e55dcf2f453128df738210a980e09f4e468a456fa3c763dbc8be70a/opentelemetry_api-1.37.0-py3-none-any.whl", hash = "sha256:accf2024d3e89faec14302213bc39550ec0f4095d1cf5ca688e1bfb1c8612f47", size = 65732, upload-time = "2025-09-11T10:28:41.826Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation" +version = "0.58b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "packaging" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f6/36/7c307d9be8ce4ee7beb86d7f1d31027f2a6a89228240405a858d6e4d64f9/opentelemetry_instrumentation-0.58b0.tar.gz", hash = "sha256:df640f3ac715a3e05af145c18f527f4422c6ab6c467e40bd24d2ad75a00cb705", size = 31549, upload-time = "2025-09-11T11:42:14.084Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d4/db/5ff1cd6c5ca1d12ecf1b73be16fbb2a8af2114ee46d4b0e6d4b23f4f4db7/opentelemetry_instrumentation-0.58b0-py3-none-any.whl", hash = "sha256:50f97ac03100676c9f7fc28197f8240c7290ca1baa12da8bfbb9a1de4f34cc45", size = 33019, upload-time = "2025-09-11T11:41:00.624Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-threading" +version = "0.58b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/70/a9/3888cb0470e6eb48ea17b6802275ae71df411edd6382b9a8e8f391936fda/opentelemetry_instrumentation_threading-0.58b0.tar.gz", hash = "sha256:f68c61f77841f9ff6270176f4d496c10addbceacd782af434d705f83e4504862", size = 8770, upload-time = "2025-09-11T11:42:56.308Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/54/add1076cb37980e617723a96e29c84006983e8ad6fc589dde7f69ddc57d4/opentelemetry_instrumentation_threading-0.58b0-py3-none-any.whl", hash = "sha256:eacc072881006aceb5b9b6831bcdce718c67ef6f31ac0b32bd6a23a94d979b4a", size = 9312, upload-time = "2025-09-11T11:41:58.603Z" }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f4/62/2e0ca80d7fe94f0b193135375da92c640d15fe81f636658d2acf373086bc/opentelemetry_sdk-1.37.0.tar.gz", hash = "sha256:cc8e089c10953ded765b5ab5669b198bbe0af1b3f89f1007d19acd32dc46dda5", size = 170404, upload-time = "2025-09-11T10:29:11.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9f/62/9f4ad6a54126fb00f7ed4bb5034964c6e4f00fcd5a905e115bd22707e20d/opentelemetry_sdk-1.37.0-py3-none-any.whl", hash = "sha256:8f3c3c22063e52475c5dbced7209495c2c16723d016d39287dfc215d1771257c", size = 131941, upload-time = "2025-09-11T10:28:57.83Z" }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.58b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/aa/1b/90701d91e6300d9f2fb352153fb1721ed99ed1f6ea14fa992c756016e63a/opentelemetry_semantic_conventions-0.58b0.tar.gz", hash = "sha256:6bd46f51264279c433755767bb44ad00f1c9e2367e1b42af563372c5a6fa0c25", size = 129867, upload-time = "2025-09-11T10:29:12.597Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/90/68152b7465f50285d3ce2481b3aec2f82822e3f52e5152eeeaf516bab841/opentelemetry_semantic_conventions-0.58b0-py3-none-any.whl", hash = "sha256:5564905ab1458b96684db1340232729fce3b5375a06e140e8904c78e4f815b28", size = 207954, upload-time = "2025-09-11T10:28:59.218Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/61/33/9611380c2bdb1225fdef633e2a9610622310fed35ab11dac9620972ee088/platformdirs-4.5.0.tar.gz", hash = "sha256:70ddccdd7c99fc5942e9fc25636a8b34d04c24b335100223152c2803e4063312", size = 21632, upload-time = "2025-10-08T17:44:48.791Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/cb/ac7874b3e5d58441674fb70742e6c374b28b0c7cb988d37d991cde47166c/platformdirs-4.5.0-py3-none-any.whl", hash = "sha256:e578a81bb873cbb89a41fcc904c7ef523cc18284b7e3b3ccf06aca1403b7ebd3", size = 18651, upload-time = "2025-10-08T17:44:47.223Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "pycparser" +version = "2.23" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, +] + +[[package]] +name = "pydantic" +version = "2.12.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8d/35/d319ed522433215526689bad428a94058b6dd12190ce7ddd78618ac14b28/pydantic-2.12.2.tar.gz", hash = "sha256:7b8fa15b831a4bbde9d5b84028641ac3080a4ca2cbd4a621a661687e741624fd", size = 816358, upload-time = "2025-10-14T15:02:21.842Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6c/98/468cb649f208a6f1279448e6e5247b37ae79cf5e4041186f1e2ef3d16345/pydantic-2.12.2-py3-none-any.whl", hash = "sha256:25ff718ee909acd82f1ff9b1a4acfd781bb23ab3739adaa7144f19a6a4e231ae", size = 460628, upload-time = "2025-10-14T15:02:19.623Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/18/d0944e8eaaa3efd0a91b0f1fc537d3be55ad35091b6a87638211ba691964/pydantic_core-2.41.4.tar.gz", hash = "sha256:70e47929a9d4a1905a67e4b687d5946026390568a8e952b92824118063cee4d5", size = 457557, upload-time = "2025-10-14T10:23:47.909Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/d0/c20adabd181a029a970738dfe23710b52a31f1258f591874fcdec7359845/pydantic_core-2.41.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:85e050ad9e5f6fe1004eec65c914332e52f429bc0ae12d6fa2092407a462c746", size = 2105688, upload-time = "2025-10-14T10:20:54.448Z" }, + { url = "https://files.pythonhosted.org/packages/00/b6/0ce5c03cec5ae94cca220dfecddc453c077d71363b98a4bbdb3c0b22c783/pydantic_core-2.41.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7393f1d64792763a48924ba31d1e44c2cfbc05e3b1c2c9abb4ceeadd912cced", size = 1910807, upload-time = "2025-10-14T10:20:56.115Z" }, + { url = "https://files.pythonhosted.org/packages/68/3e/800d3d02c8beb0b5c069c870cbb83799d085debf43499c897bb4b4aaff0d/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94dab0940b0d1fb28bcab847adf887c66a27a40291eedf0b473be58761c9799a", size = 1956669, upload-time = "2025-10-14T10:20:57.874Z" }, + { url = "https://files.pythonhosted.org/packages/60/a4/24271cc71a17f64589be49ab8bd0751f6a0a03046c690df60989f2f95c2c/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de7c42f897e689ee6f9e93c4bec72b99ae3b32a2ade1c7e4798e690ff5246e02", size = 2051629, upload-time = "2025-10-14T10:21:00.006Z" }, + { url = "https://files.pythonhosted.org/packages/68/de/45af3ca2f175d91b96bfb62e1f2d2f1f9f3b14a734afe0bfeff079f78181/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:664b3199193262277b8b3cd1e754fb07f2c6023289c815a1e1e8fb415cb247b1", size = 2224049, upload-time = "2025-10-14T10:21:01.801Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/ae4e1ff84672bf869d0a77af24fd78387850e9497753c432875066b5d622/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95b253b88f7d308b1c0b417c4624f44553ba4762816f94e6986819b9c273fb2", size = 2342409, upload-time = "2025-10-14T10:21:03.556Z" }, + { url = "https://files.pythonhosted.org/packages/18/62/273dd70b0026a085c7b74b000394e1ef95719ea579c76ea2f0cc8893736d/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1351f5bbdbbabc689727cb91649a00cb9ee7203e0a6e54e9f5ba9e22e384b84", size = 2069635, upload-time = "2025-10-14T10:21:05.385Z" }, + { url = "https://files.pythonhosted.org/packages/30/03/cf485fff699b4cdaea469bc481719d3e49f023241b4abb656f8d422189fc/pydantic_core-2.41.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1affa4798520b148d7182da0615d648e752de4ab1a9566b7471bc803d88a062d", size = 2194284, upload-time = "2025-10-14T10:21:07.122Z" }, + { url = "https://files.pythonhosted.org/packages/f9/7e/c8e713db32405dfd97211f2fc0a15d6bf8adb7640f3d18544c1f39526619/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7b74e18052fea4aa8dea2fb7dbc23d15439695da6cbe6cfc1b694af1115df09d", size = 2137566, upload-time = "2025-10-14T10:21:08.981Z" }, + { url = "https://files.pythonhosted.org/packages/04/f7/db71fd4cdccc8b75990f79ccafbbd66757e19f6d5ee724a6252414483fb4/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:285b643d75c0e30abda9dc1077395624f314a37e3c09ca402d4015ef5979f1a2", size = 2316809, upload-time = "2025-10-14T10:21:10.805Z" }, + { url = "https://files.pythonhosted.org/packages/76/63/a54973ddb945f1bca56742b48b144d85c9fc22f819ddeb9f861c249d5464/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f52679ff4218d713b3b33f88c89ccbf3a5c2c12ba665fb80ccc4192b4608dbab", size = 2311119, upload-time = "2025-10-14T10:21:12.583Z" }, + { url = "https://files.pythonhosted.org/packages/f8/03/5d12891e93c19218af74843a27e32b94922195ded2386f7b55382f904d2f/pydantic_core-2.41.4-cp313-cp313-win32.whl", hash = "sha256:ecde6dedd6fff127c273c76821bb754d793be1024bc33314a120f83a3c69460c", size = 1981398, upload-time = "2025-10-14T10:21:14.584Z" }, + { url = "https://files.pythonhosted.org/packages/be/d8/fd0de71f39db91135b7a26996160de71c073d8635edfce8b3c3681be0d6d/pydantic_core-2.41.4-cp313-cp313-win_amd64.whl", hash = "sha256:d081a1f3800f05409ed868ebb2d74ac39dd0c1ff6c035b5162356d76030736d4", size = 2030735, upload-time = "2025-10-14T10:21:16.432Z" }, + { url = "https://files.pythonhosted.org/packages/72/86/c99921c1cf6650023c08bfab6fe2d7057a5142628ef7ccfa9921f2dda1d5/pydantic_core-2.41.4-cp313-cp313-win_arm64.whl", hash = "sha256:f8e49c9c364a7edcbe2a310f12733aad95b022495ef2a8d653f645e5d20c1564", size = 1973209, upload-time = "2025-10-14T10:21:18.213Z" }, + { url = "https://files.pythonhosted.org/packages/36/0d/b5706cacb70a8414396efdda3d72ae0542e050b591119e458e2490baf035/pydantic_core-2.41.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ed97fd56a561f5eb5706cebe94f1ad7c13b84d98312a05546f2ad036bafe87f4", size = 1877324, upload-time = "2025-10-14T10:21:20.363Z" }, + { url = "https://files.pythonhosted.org/packages/de/2d/cba1fa02cfdea72dfb3a9babb067c83b9dff0bbcb198368e000a6b756ea7/pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a870c307bf1ee91fc58a9a61338ff780d01bfae45922624816878dce784095d2", size = 1884515, upload-time = "2025-10-14T10:21:22.339Z" }, + { url = "https://files.pythonhosted.org/packages/07/ea/3df927c4384ed9b503c9cc2d076cf983b4f2adb0c754578dfb1245c51e46/pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25e97bc1f5f8f7985bdc2335ef9e73843bb561eb1fa6831fdfc295c1c2061cf", size = 2042819, upload-time = "2025-10-14T10:21:26.683Z" }, + { url = "https://files.pythonhosted.org/packages/6a/ee/df8e871f07074250270a3b1b82aad4cd0026b588acd5d7d3eb2fcb1471a3/pydantic_core-2.41.4-cp313-cp313t-win_amd64.whl", hash = "sha256:d405d14bea042f166512add3091c1af40437c2e7f86988f3915fabd27b1e9cd2", size = 1995866, upload-time = "2025-10-14T10:21:28.951Z" }, + { url = "https://files.pythonhosted.org/packages/fc/de/b20f4ab954d6d399499c33ec4fafc46d9551e11dc1858fb7f5dca0748ceb/pydantic_core-2.41.4-cp313-cp313t-win_arm64.whl", hash = "sha256:19f3684868309db5263a11bace3c45d93f6f24afa2ffe75a647583df22a2ff89", size = 1970034, upload-time = "2025-10-14T10:21:30.869Z" }, + { url = "https://files.pythonhosted.org/packages/54/28/d3325da57d413b9819365546eb9a6e8b7cbd9373d9380efd5f74326143e6/pydantic_core-2.41.4-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:e9205d97ed08a82ebb9a307e92914bb30e18cdf6f6b12ca4bedadb1588a0bfe1", size = 2102022, upload-time = "2025-10-14T10:21:32.809Z" }, + { url = "https://files.pythonhosted.org/packages/9e/24/b58a1bc0d834bf1acc4361e61233ee217169a42efbdc15a60296e13ce438/pydantic_core-2.41.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:82df1f432b37d832709fbcc0e24394bba04a01b6ecf1ee87578145c19cde12ac", size = 1905495, upload-time = "2025-10-14T10:21:34.812Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a4/71f759cc41b7043e8ecdaab81b985a9b6cad7cec077e0b92cff8b71ecf6b/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3b4cc4539e055cfa39a3763c939f9d409eb40e85813257dcd761985a108554", size = 1956131, upload-time = "2025-10-14T10:21:36.924Z" }, + { url = "https://files.pythonhosted.org/packages/b0/64/1e79ac7aa51f1eec7c4cda8cbe456d5d09f05fdd68b32776d72168d54275/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b1eb1754fce47c63d2ff57fdb88c351a6c0150995890088b33767a10218eaa4e", size = 2052236, upload-time = "2025-10-14T10:21:38.927Z" }, + { url = "https://files.pythonhosted.org/packages/e9/e3/a3ffc363bd4287b80f1d43dc1c28ba64831f8dfc237d6fec8f2661138d48/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6ab5ab30ef325b443f379ddb575a34969c333004fca5a1daa0133a6ffaad616", size = 2223573, upload-time = "2025-10-14T10:21:41.574Z" }, + { url = "https://files.pythonhosted.org/packages/28/27/78814089b4d2e684a9088ede3790763c64693c3d1408ddc0a248bc789126/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31a41030b1d9ca497634092b46481b937ff9397a86f9f51bd41c4767b6fc04af", size = 2342467, upload-time = "2025-10-14T10:21:44.018Z" }, + { url = "https://files.pythonhosted.org/packages/92/97/4de0e2a1159cb85ad737e03306717637842c88c7fd6d97973172fb183149/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a44ac1738591472c3d020f61c6df1e4015180d6262ebd39bf2aeb52571b60f12", size = 2063754, upload-time = "2025-10-14T10:21:46.466Z" }, + { url = "https://files.pythonhosted.org/packages/0f/50/8cb90ce4b9efcf7ae78130afeb99fd1c86125ccdf9906ef64b9d42f37c25/pydantic_core-2.41.4-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d72f2b5e6e82ab8f94ea7d0d42f83c487dc159c5240d8f83beae684472864e2d", size = 2196754, upload-time = "2025-10-14T10:21:48.486Z" }, + { url = "https://files.pythonhosted.org/packages/34/3b/ccdc77af9cd5082723574a1cc1bcae7a6acacc829d7c0a06201f7886a109/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c4d1e854aaf044487d31143f541f7aafe7b482ae72a022c664b2de2e466ed0ad", size = 2137115, upload-time = "2025-10-14T10:21:50.63Z" }, + { url = "https://files.pythonhosted.org/packages/ca/ba/e7c7a02651a8f7c52dc2cff2b64a30c313e3b57c7d93703cecea76c09b71/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:b568af94267729d76e6ee5ececda4e283d07bbb28e8148bb17adad93d025d25a", size = 2317400, upload-time = "2025-10-14T10:21:52.959Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ba/6c533a4ee8aec6b812c643c49bb3bd88d3f01e3cebe451bb85512d37f00f/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:6d55fb8b1e8929b341cc313a81a26e0d48aa3b519c1dbaadec3a6a2b4fcad025", size = 2312070, upload-time = "2025-10-14T10:21:55.419Z" }, + { url = "https://files.pythonhosted.org/packages/22/ae/f10524fcc0ab8d7f96cf9a74c880243576fd3e72bd8ce4f81e43d22bcab7/pydantic_core-2.41.4-cp314-cp314-win32.whl", hash = "sha256:5b66584e549e2e32a1398df11da2e0a7eff45d5c2d9db9d5667c5e6ac764d77e", size = 1982277, upload-time = "2025-10-14T10:21:57.474Z" }, + { url = "https://files.pythonhosted.org/packages/b4/dc/e5aa27aea1ad4638f0c3fb41132f7eb583bd7420ee63204e2d4333a3bbf9/pydantic_core-2.41.4-cp314-cp314-win_amd64.whl", hash = "sha256:557a0aab88664cc552285316809cab897716a372afaf8efdbef756f8b890e894", size = 2024608, upload-time = "2025-10-14T10:21:59.557Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/51d89cc2612bd147198e120a13f150afbf0bcb4615cddb049ab10b81b79e/pydantic_core-2.41.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f1ea6f48a045745d0d9f325989d8abd3f1eaf47dd00485912d1a3a63c623a8d", size = 1967614, upload-time = "2025-10-14T10:22:01.847Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c2/472f2e31b95eff099961fa050c376ab7156a81da194f9edb9f710f68787b/pydantic_core-2.41.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6c1fe4c5404c448b13188dd8bd2ebc2bdd7e6727fa61ff481bcc2cca894018da", size = 1876904, upload-time = "2025-10-14T10:22:04.062Z" }, + { url = "https://files.pythonhosted.org/packages/4a/07/ea8eeb91173807ecdae4f4a5f4b150a520085b35454350fc219ba79e66a3/pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:523e7da4d43b113bf8e7b49fa4ec0c35bf4fe66b2230bfc5c13cc498f12c6c3e", size = 1882538, upload-time = "2025-10-14T10:22:06.39Z" }, + { url = "https://files.pythonhosted.org/packages/1e/29/b53a9ca6cd366bfc928823679c6a76c7a4c69f8201c0ba7903ad18ebae2f/pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5729225de81fb65b70fdb1907fcf08c75d498f4a6f15af005aabb1fdadc19dfa", size = 2041183, upload-time = "2025-10-14T10:22:08.812Z" }, + { url = "https://files.pythonhosted.org/packages/c7/3d/f8c1a371ceebcaf94d6dd2d77c6cf4b1c078e13a5837aee83f760b4f7cfd/pydantic_core-2.41.4-cp314-cp314t-win_amd64.whl", hash = "sha256:de2cfbb09e88f0f795fd90cf955858fc2c691df65b1f21f0aa00b99f3fbc661d", size = 1993542, upload-time = "2025-10-14T10:22:11.332Z" }, + { url = "https://files.pythonhosted.org/packages/8a/ac/9fc61b4f9d079482a290afe8d206b8f490e9fd32d4fc03ed4fc698214e01/pydantic_core-2.41.4-cp314-cp314t-win_arm64.whl", hash = "sha256:d34f950ae05a83e0ede899c595f312ca976023ea1db100cd5aa188f7005e3ab0", size = 1973897, upload-time = "2025-10-14T10:22:13.444Z" }, +] + +[[package]] +name = "pydantic-settings" +version = "2.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/20/c5/dbbc27b814c71676593d1c3f718e6cd7d4f00652cefa24b75f7aa3efb25e/pydantic_settings-2.11.0.tar.gz", hash = "sha256:d0e87a1c7d33593beb7194adb8470fc426e95ba02af83a0f23474a04c9a08180", size = 188394, upload-time = "2025-09-24T14:19:11.764Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/d6/887a1ff844e64aa823fb4905978d882a633cfe295c32eacad582b78a7d8b/pydantic_settings-2.11.0-py3-none-any.whl", hash = "sha256:fe2cea3413b9530d10f3a5875adffb17ada5c1e1bab0b2885546d7310415207c", size = 48608, upload-time = "2025-09-24T14:19:10.015Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/86/9e3c5f48f7b7b638b216e4b9e645f54d199d7abbbab7a64a13b4e12ba10f/pytest_asyncio-1.2.0.tar.gz", hash = "sha256:c609a64a2a8768462d0c99811ddb8bd2583c33fd33cf7f21af1c142e824ffb57", size = 50119, upload-time = "2025-09-12T07:33:53.816Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/93/2fa34714b7a4ae72f2f8dad66ba17dd9a2c793220719e736dda28b7aec27/pytest_asyncio-1.2.0-py3-none-any.whl", hash = "sha256:8e17ae5e46d8e7efe51ab6494dd2010f4ca8dae51652aa3c8d55acf50bfb2e99", size = 15095, upload-time = "2025-09-12T07:33:52.639Z" }, +] + +[[package]] +name = "pytest-cov" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coverage" }, + { name = "pluggy" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5e/f7/c933acc76f5208b3b00089573cf6a2bc26dc80a8aece8f52bb7d6b1855ca/pytest_cov-7.0.0.tar.gz", hash = "sha256:33c97eda2e049a0c5298e91f519302a1334c26ac65c1a483d6206fd458361af1", size = 54328, upload-time = "2025-09-09T10:57:02.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/49/1377b49de7d0c1ce41292161ea0f721913fa8722c19fb9c1e3aa0367eecb/pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861", size = 22424, upload-time = "2025-09-09T10:57:00.695Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload-time = "2025-06-24T04:21:07.341Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" }, +] + +[[package]] +name = "python-multipart" +version = "0.0.20" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158, upload-time = "2024-12-16T19:45:46.972Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" }, +] + +[[package]] +name = "pytokens" +version = "0.1.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/30/5f/e959a442435e24f6fb5a01aec6c657079ceaca1b3baf18561c3728d681da/pytokens-0.1.10.tar.gz", hash = "sha256:c9a4bfa0be1d26aebce03e6884ba454e842f186a59ea43a6d3b25af58223c044", size = 12171, upload-time = "2025-02-19T14:51:22.001Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/e5/63bed382f6a7a5ba70e7e132b8b7b8abbcf4888ffa6be4877698dcfbed7d/pytokens-0.1.10-py3-none-any.whl", hash = "sha256:db7b72284e480e69fb085d9f251f66b3d2df8b7166059261258ff35f50fb711b", size = 12046, upload-time = "2025-02-19T14:51:18.694Z" }, +] + +[[package]] +name = "pywin32" +version = "311" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/be/3fd5de0979fcb3994bfee0d65ed8ca9506a8a1260651b86174f6a86f52b3/pywin32-311-cp313-cp313-win32.whl", hash = "sha256:f95ba5a847cba10dd8c4d8fefa9f2a6cf283b8b88ed6178fa8a6c1ab16054d0d", size = 8705700, upload-time = "2025-07-14T20:13:26.471Z" }, + { url = "https://files.pythonhosted.org/packages/e3/28/e0a1909523c6890208295a29e05c2adb2126364e289826c0a8bc7297bd5c/pywin32-311-cp313-cp313-win_amd64.whl", hash = "sha256:718a38f7e5b058e76aee1c56ddd06908116d35147e133427e59a3983f703a20d", size = 9494700, upload-time = "2025-07-14T20:13:28.243Z" }, + { url = "https://files.pythonhosted.org/packages/04/bf/90339ac0f55726dce7d794e6d79a18a91265bdf3aa70b6b9ca52f35e022a/pywin32-311-cp313-cp313-win_arm64.whl", hash = "sha256:7b4075d959648406202d92a2310cb990fea19b535c7f4a78d3f5e10b926eeb8a", size = 8709318, upload-time = "2025-07-14T20:13:30.348Z" }, + { url = "https://files.pythonhosted.org/packages/c9/31/097f2e132c4f16d99a22bfb777e0fd88bd8e1c634304e102f313af69ace5/pywin32-311-cp314-cp314-win32.whl", hash = "sha256:b7a2c10b93f8986666d0c803ee19b5990885872a7de910fc460f9b0c2fbf92ee", size = 8840714, upload-time = "2025-07-14T20:13:32.449Z" }, + { url = "https://files.pythonhosted.org/packages/90/4b/07c77d8ba0e01349358082713400435347df8426208171ce297da32c313d/pywin32-311-cp314-cp314-win_amd64.whl", hash = "sha256:3aca44c046bd2ed8c90de9cb8427f581c479e594e99b5c0bb19b29c10fd6cb87", size = 9656800, upload-time = "2025-07-14T20:13:34.312Z" }, + { url = "https://files.pythonhosted.org/packages/c0/d2/21af5c535501a7233e734b8af901574572da66fcc254cb35d0609c9080dd/pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42", size = 8932540, upload-time = "2025-07-14T20:13:36.379Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" }, + { url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" }, + { url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" }, + { url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" }, + { url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" }, + { url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" }, + { url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" }, + { url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" }, + { url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" }, + { url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" }, + { url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" }, + { url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" }, + { url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" }, + { url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" }, + { url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" }, + { url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" }, + { url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" }, + { url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" }, + { url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" }, + { url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" }, + { url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" }, + { url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" }, + { url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" }, + { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, +] + +[[package]] +name = "referencing" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "responses" +version = "0.25.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyyaml" }, + { name = "requests" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0e/95/89c054ad70bfef6da605338b009b2e283485835351a9935c7bfbfaca7ffc/responses-0.25.8.tar.gz", hash = "sha256:9374d047a575c8f781b94454db5cab590b6029505f488d12899ddb10a4af1cf4", size = 79320, upload-time = "2025-08-08T19:01:46.709Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/4c/cc276ce57e572c102d9542d383b2cfd551276581dc60004cb94fe8774c11/responses-0.25.8-py3-none-any.whl", hash = "sha256:0c710af92def29c8352ceadff0c3fe340ace27cf5af1bbe46fb71275bcd2831c", size = 34769, upload-time = "2025-08-08T19:01:45.018Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.27.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e9/dd/2c0cbe774744272b0ae725f44032c77bdcab6e8bcf544bffa3b6e70c8dba/rpds_py-0.27.1.tar.gz", hash = "sha256:26a1c73171d10b7acccbded82bf6a586ab8203601e565badc74bbbf8bc5a10f8", size = 27479, upload-time = "2025-08-27T12:16:36.024Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/77/610aeee8d41e39080c7e14afa5387138e3c9fa9756ab893d09d99e7d8e98/rpds_py-0.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e4b9fcfbc021633863a37e92571d6f91851fa656f0180246e84cbd8b3f6b329b", size = 361741, upload-time = "2025-08-27T12:13:31.039Z" }, + { url = "https://files.pythonhosted.org/packages/3a/fc/c43765f201c6a1c60be2043cbdb664013def52460a4c7adace89d6682bf4/rpds_py-0.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1441811a96eadca93c517d08df75de45e5ffe68aa3089924f963c782c4b898cf", size = 345574, upload-time = "2025-08-27T12:13:32.902Z" }, + { url = "https://files.pythonhosted.org/packages/20/42/ee2b2ca114294cd9847d0ef9c26d2b0851b2e7e00bf14cc4c0b581df0fc3/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55266dafa22e672f5a4f65019015f90336ed31c6383bd53f5e7826d21a0e0b83", size = 385051, upload-time = "2025-08-27T12:13:34.228Z" }, + { url = "https://files.pythonhosted.org/packages/fd/e8/1e430fe311e4799e02e2d1af7c765f024e95e17d651612425b226705f910/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d78827d7ac08627ea2c8e02c9e5b41180ea5ea1f747e9db0915e3adf36b62dcf", size = 398395, upload-time = "2025-08-27T12:13:36.132Z" }, + { url = "https://files.pythonhosted.org/packages/82/95/9dc227d441ff2670651c27a739acb2535ccaf8b351a88d78c088965e5996/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae92443798a40a92dc5f0b01d8a7c93adde0c4dc965310a29ae7c64d72b9fad2", size = 524334, upload-time = "2025-08-27T12:13:37.562Z" }, + { url = "https://files.pythonhosted.org/packages/87/01/a670c232f401d9ad461d9a332aa4080cd3cb1d1df18213dbd0d2a6a7ab51/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c46c9dd2403b66a2a3b9720ec4b74d4ab49d4fabf9f03dfdce2d42af913fe8d0", size = 407691, upload-time = "2025-08-27T12:13:38.94Z" }, + { url = "https://files.pythonhosted.org/packages/03/36/0a14aebbaa26fe7fab4780c76f2239e76cc95a0090bdb25e31d95c492fcd/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2efe4eb1d01b7f5f1939f4ef30ecea6c6b3521eec451fb93191bf84b2a522418", size = 386868, upload-time = "2025-08-27T12:13:40.192Z" }, + { url = "https://files.pythonhosted.org/packages/3b/03/8c897fb8b5347ff6c1cc31239b9611c5bf79d78c984430887a353e1409a1/rpds_py-0.27.1-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:15d3b4d83582d10c601f481eca29c3f138d44c92187d197aff663a269197c02d", size = 405469, upload-time = "2025-08-27T12:13:41.496Z" }, + { url = "https://files.pythonhosted.org/packages/da/07/88c60edc2df74850d496d78a1fdcdc7b54360a7f610a4d50008309d41b94/rpds_py-0.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4ed2e16abbc982a169d30d1a420274a709949e2cbdef119fe2ec9d870b42f274", size = 422125, upload-time = "2025-08-27T12:13:42.802Z" }, + { url = "https://files.pythonhosted.org/packages/6b/86/5f4c707603e41b05f191a749984f390dabcbc467cf833769b47bf14ba04f/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a75f305c9b013289121ec0f1181931975df78738cdf650093e6b86d74aa7d8dd", size = 562341, upload-time = "2025-08-27T12:13:44.472Z" }, + { url = "https://files.pythonhosted.org/packages/b2/92/3c0cb2492094e3cd9baf9e49bbb7befeceb584ea0c1a8b5939dca4da12e5/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:67ce7620704745881a3d4b0ada80ab4d99df390838839921f99e63c474f82cf2", size = 592511, upload-time = "2025-08-27T12:13:45.898Z" }, + { url = "https://files.pythonhosted.org/packages/10/bb/82e64fbb0047c46a168faa28d0d45a7851cd0582f850b966811d30f67ad8/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9d992ac10eb86d9b6f369647b6a3f412fc0075cfd5d799530e84d335e440a002", size = 557736, upload-time = "2025-08-27T12:13:47.408Z" }, + { url = "https://files.pythonhosted.org/packages/00/95/3c863973d409210da7fb41958172c6b7dbe7fc34e04d3cc1f10bb85e979f/rpds_py-0.27.1-cp313-cp313-win32.whl", hash = "sha256:4f75e4bd8ab8db624e02c8e2fc4063021b58becdbe6df793a8111d9343aec1e3", size = 221462, upload-time = "2025-08-27T12:13:48.742Z" }, + { url = "https://files.pythonhosted.org/packages/ce/2c/5867b14a81dc217b56d95a9f2a40fdbc56a1ab0181b80132beeecbd4b2d6/rpds_py-0.27.1-cp313-cp313-win_amd64.whl", hash = "sha256:f9025faafc62ed0b75a53e541895ca272815bec18abe2249ff6501c8f2e12b83", size = 232034, upload-time = "2025-08-27T12:13:50.11Z" }, + { url = "https://files.pythonhosted.org/packages/c7/78/3958f3f018c01923823f1e47f1cc338e398814b92d83cd278364446fac66/rpds_py-0.27.1-cp313-cp313-win_arm64.whl", hash = "sha256:ed10dc32829e7d222b7d3b93136d25a406ba9788f6a7ebf6809092da1f4d279d", size = 222392, upload-time = "2025-08-27T12:13:52.587Z" }, + { url = "https://files.pythonhosted.org/packages/01/76/1cdf1f91aed5c3a7bf2eba1f1c4e4d6f57832d73003919a20118870ea659/rpds_py-0.27.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:92022bbbad0d4426e616815b16bc4127f83c9a74940e1ccf3cfe0b387aba0228", size = 358355, upload-time = "2025-08-27T12:13:54.012Z" }, + { url = "https://files.pythonhosted.org/packages/c3/6f/bf142541229374287604caf3bb2a4ae17f0a580798fd72d3b009b532db4e/rpds_py-0.27.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:47162fdab9407ec3f160805ac3e154df042e577dd53341745fc7fb3f625e6d92", size = 342138, upload-time = "2025-08-27T12:13:55.791Z" }, + { url = "https://files.pythonhosted.org/packages/1a/77/355b1c041d6be40886c44ff5e798b4e2769e497b790f0f7fd1e78d17e9a8/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb89bec23fddc489e5d78b550a7b773557c9ab58b7946154a10a6f7a214a48b2", size = 380247, upload-time = "2025-08-27T12:13:57.683Z" }, + { url = "https://files.pythonhosted.org/packages/d6/a4/d9cef5c3946ea271ce2243c51481971cd6e34f21925af2783dd17b26e815/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e48af21883ded2b3e9eb48cb7880ad8598b31ab752ff3be6457001d78f416723", size = 390699, upload-time = "2025-08-27T12:13:59.137Z" }, + { url = "https://files.pythonhosted.org/packages/3a/06/005106a7b8c6c1a7e91b73169e49870f4af5256119d34a361ae5240a0c1d/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f5b7bd8e219ed50299e58551a410b64daafb5017d54bbe822e003856f06a802", size = 521852, upload-time = "2025-08-27T12:14:00.583Z" }, + { url = "https://files.pythonhosted.org/packages/e5/3e/50fb1dac0948e17a02eb05c24510a8fe12d5ce8561c6b7b7d1339ab7ab9c/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08f1e20bccf73b08d12d804d6e1c22ca5530e71659e6673bce31a6bb71c1e73f", size = 402582, upload-time = "2025-08-27T12:14:02.034Z" }, + { url = "https://files.pythonhosted.org/packages/cb/b0/f4e224090dc5b0ec15f31a02d746ab24101dd430847c4d99123798661bfc/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dc5dceeaefcc96dc192e3a80bbe1d6c410c469e97bdd47494a7d930987f18b2", size = 384126, upload-time = "2025-08-27T12:14:03.437Z" }, + { url = "https://files.pythonhosted.org/packages/54/77/ac339d5f82b6afff1df8f0fe0d2145cc827992cb5f8eeb90fc9f31ef7a63/rpds_py-0.27.1-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:d76f9cc8665acdc0c9177043746775aa7babbf479b5520b78ae4002d889f5c21", size = 399486, upload-time = "2025-08-27T12:14:05.443Z" }, + { url = "https://files.pythonhosted.org/packages/d6/29/3e1c255eee6ac358c056a57d6d6869baa00a62fa32eea5ee0632039c50a3/rpds_py-0.27.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:134fae0e36022edad8290a6661edf40c023562964efea0cc0ec7f5d392d2aaef", size = 414832, upload-time = "2025-08-27T12:14:06.902Z" }, + { url = "https://files.pythonhosted.org/packages/3f/db/6d498b844342deb3fa1d030598db93937a9964fcf5cb4da4feb5f17be34b/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:eb11a4f1b2b63337cfd3b4d110af778a59aae51c81d195768e353d8b52f88081", size = 557249, upload-time = "2025-08-27T12:14:08.37Z" }, + { url = "https://files.pythonhosted.org/packages/60/f3/690dd38e2310b6f68858a331399b4d6dbb9132c3e8ef8b4333b96caf403d/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:13e608ac9f50a0ed4faec0e90ece76ae33b34c0e8656e3dceb9a7db994c692cd", size = 587356, upload-time = "2025-08-27T12:14:10.034Z" }, + { url = "https://files.pythonhosted.org/packages/86/e3/84507781cccd0145f35b1dc32c72675200c5ce8d5b30f813e49424ef68fc/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dd2135527aa40f061350c3f8f89da2644de26cd73e4de458e79606384f4f68e7", size = 555300, upload-time = "2025-08-27T12:14:11.783Z" }, + { url = "https://files.pythonhosted.org/packages/e5/ee/375469849e6b429b3516206b4580a79e9ef3eb12920ddbd4492b56eaacbe/rpds_py-0.27.1-cp313-cp313t-win32.whl", hash = "sha256:3020724ade63fe320a972e2ffd93b5623227e684315adce194941167fee02688", size = 216714, upload-time = "2025-08-27T12:14:13.629Z" }, + { url = "https://files.pythonhosted.org/packages/21/87/3fc94e47c9bd0742660e84706c311a860dcae4374cf4a03c477e23ce605a/rpds_py-0.27.1-cp313-cp313t-win_amd64.whl", hash = "sha256:8ee50c3e41739886606388ba3ab3ee2aae9f35fb23f833091833255a31740797", size = 228943, upload-time = "2025-08-27T12:14:14.937Z" }, + { url = "https://files.pythonhosted.org/packages/70/36/b6e6066520a07cf029d385de869729a895917b411e777ab1cde878100a1d/rpds_py-0.27.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:acb9aafccaae278f449d9c713b64a9e68662e7799dbd5859e2c6b3c67b56d334", size = 362472, upload-time = "2025-08-27T12:14:16.333Z" }, + { url = "https://files.pythonhosted.org/packages/af/07/b4646032e0dcec0df9c73a3bd52f63bc6c5f9cda992f06bd0e73fe3fbebd/rpds_py-0.27.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b7fb801aa7f845ddf601c49630deeeccde7ce10065561d92729bfe81bd21fb33", size = 345676, upload-time = "2025-08-27T12:14:17.764Z" }, + { url = "https://files.pythonhosted.org/packages/b0/16/2f1003ee5d0af4bcb13c0cf894957984c32a6751ed7206db2aee7379a55e/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe0dd05afb46597b9a2e11c351e5e4283c741237e7f617ffb3252780cca9336a", size = 385313, upload-time = "2025-08-27T12:14:19.829Z" }, + { url = "https://files.pythonhosted.org/packages/05/cd/7eb6dd7b232e7f2654d03fa07f1414d7dfc980e82ba71e40a7c46fd95484/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b6dfb0e058adb12d8b1d1b25f686e94ffa65d9995a5157afe99743bf7369d62b", size = 399080, upload-time = "2025-08-27T12:14:21.531Z" }, + { url = "https://files.pythonhosted.org/packages/20/51/5829afd5000ec1cb60f304711f02572d619040aa3ec033d8226817d1e571/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed090ccd235f6fa8bb5861684567f0a83e04f52dfc2e5c05f2e4b1309fcf85e7", size = 523868, upload-time = "2025-08-27T12:14:23.485Z" }, + { url = "https://files.pythonhosted.org/packages/05/2c/30eebca20d5db95720ab4d2faec1b5e4c1025c473f703738c371241476a2/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf876e79763eecf3e7356f157540d6a093cef395b65514f17a356f62af6cc136", size = 408750, upload-time = "2025-08-27T12:14:24.924Z" }, + { url = "https://files.pythonhosted.org/packages/90/1a/cdb5083f043597c4d4276eae4e4c70c55ab5accec078da8611f24575a367/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12ed005216a51b1d6e2b02a7bd31885fe317e45897de81d86dcce7d74618ffff", size = 387688, upload-time = "2025-08-27T12:14:27.537Z" }, + { url = "https://files.pythonhosted.org/packages/7c/92/cf786a15320e173f945d205ab31585cc43969743bb1a48b6888f7a2b0a2d/rpds_py-0.27.1-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:ee4308f409a40e50593c7e3bb8cbe0b4d4c66d1674a316324f0c2f5383b486f9", size = 407225, upload-time = "2025-08-27T12:14:28.981Z" }, + { url = "https://files.pythonhosted.org/packages/33/5c/85ee16df5b65063ef26017bef33096557a4c83fbe56218ac7cd8c235f16d/rpds_py-0.27.1-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0b08d152555acf1f455154d498ca855618c1378ec810646fcd7c76416ac6dc60", size = 423361, upload-time = "2025-08-27T12:14:30.469Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8e/1c2741307fcabd1a334ecf008e92c4f47bb6f848712cf15c923becfe82bb/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:dce51c828941973a5684d458214d3a36fcd28da3e1875d659388f4f9f12cc33e", size = 562493, upload-time = "2025-08-27T12:14:31.987Z" }, + { url = "https://files.pythonhosted.org/packages/04/03/5159321baae9b2222442a70c1f988cbbd66b9be0675dd3936461269be360/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:c1476d6f29eb81aa4151c9a31219b03f1f798dc43d8af1250a870735516a1212", size = 592623, upload-time = "2025-08-27T12:14:33.543Z" }, + { url = "https://files.pythonhosted.org/packages/ff/39/c09fd1ad28b85bc1d4554a8710233c9f4cefd03d7717a1b8fbfd171d1167/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:3ce0cac322b0d69b63c9cdb895ee1b65805ec9ffad37639f291dd79467bee675", size = 558800, upload-time = "2025-08-27T12:14:35.436Z" }, + { url = "https://files.pythonhosted.org/packages/c5/d6/99228e6bbcf4baa764b18258f519a9035131d91b538d4e0e294313462a98/rpds_py-0.27.1-cp314-cp314-win32.whl", hash = "sha256:dfbfac137d2a3d0725758cd141f878bf4329ba25e34979797c89474a89a8a3a3", size = 221943, upload-time = "2025-08-27T12:14:36.898Z" }, + { url = "https://files.pythonhosted.org/packages/be/07/c802bc6b8e95be83b79bdf23d1aa61d68324cb1006e245d6c58e959e314d/rpds_py-0.27.1-cp314-cp314-win_amd64.whl", hash = "sha256:a6e57b0abfe7cc513450fcf529eb486b6e4d3f8aee83e92eb5f1ef848218d456", size = 233739, upload-time = "2025-08-27T12:14:38.386Z" }, + { url = "https://files.pythonhosted.org/packages/c8/89/3e1b1c16d4c2d547c5717377a8df99aee8099ff050f87c45cb4d5fa70891/rpds_py-0.27.1-cp314-cp314-win_arm64.whl", hash = "sha256:faf8d146f3d476abfee026c4ae3bdd9ca14236ae4e4c310cbd1cf75ba33d24a3", size = 223120, upload-time = "2025-08-27T12:14:39.82Z" }, + { url = "https://files.pythonhosted.org/packages/62/7e/dc7931dc2fa4a6e46b2a4fa744a9fe5c548efd70e0ba74f40b39fa4a8c10/rpds_py-0.27.1-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:ba81d2b56b6d4911ce735aad0a1d4495e808b8ee4dc58715998741a26874e7c2", size = 358944, upload-time = "2025-08-27T12:14:41.199Z" }, + { url = "https://files.pythonhosted.org/packages/e6/22/4af76ac4e9f336bfb1a5f240d18a33c6b2fcaadb7472ac7680576512b49a/rpds_py-0.27.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:84f7d509870098de0e864cad0102711c1e24e9b1a50ee713b65928adb22269e4", size = 342283, upload-time = "2025-08-27T12:14:42.699Z" }, + { url = "https://files.pythonhosted.org/packages/1c/15/2a7c619b3c2272ea9feb9ade67a45c40b3eeb500d503ad4c28c395dc51b4/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e960fc78fecd1100539f14132425e1d5fe44ecb9239f8f27f079962021523e", size = 380320, upload-time = "2025-08-27T12:14:44.157Z" }, + { url = "https://files.pythonhosted.org/packages/a2/7d/4c6d243ba4a3057e994bb5bedd01b5c963c12fe38dde707a52acdb3849e7/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:62f85b665cedab1a503747617393573995dac4600ff51869d69ad2f39eb5e817", size = 391760, upload-time = "2025-08-27T12:14:45.845Z" }, + { url = "https://files.pythonhosted.org/packages/b4/71/b19401a909b83bcd67f90221330bc1ef11bc486fe4e04c24388d28a618ae/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fed467af29776f6556250c9ed85ea5a4dd121ab56a5f8b206e3e7a4c551e48ec", size = 522476, upload-time = "2025-08-27T12:14:47.364Z" }, + { url = "https://files.pythonhosted.org/packages/e4/44/1a3b9715c0455d2e2f0f6df5ee6d6f5afdc423d0773a8a682ed2b43c566c/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2729615f9d430af0ae6b36cf042cb55c0936408d543fb691e1a9e36648fd35a", size = 403418, upload-time = "2025-08-27T12:14:49.991Z" }, + { url = "https://files.pythonhosted.org/packages/1c/4b/fb6c4f14984eb56673bc868a66536f53417ddb13ed44b391998100a06a96/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b207d881a9aef7ba753d69c123a35d96ca7cb808056998f6b9e8747321f03b8", size = 384771, upload-time = "2025-08-27T12:14:52.159Z" }, + { url = "https://files.pythonhosted.org/packages/c0/56/d5265d2d28b7420d7b4d4d85cad8ef891760f5135102e60d5c970b976e41/rpds_py-0.27.1-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:639fd5efec029f99b79ae47e5d7e00ad8a773da899b6309f6786ecaf22948c48", size = 400022, upload-time = "2025-08-27T12:14:53.859Z" }, + { url = "https://files.pythonhosted.org/packages/8f/e9/9f5fc70164a569bdd6ed9046486c3568d6926e3a49bdefeeccfb18655875/rpds_py-0.27.1-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fecc80cb2a90e28af8a9b366edacf33d7a91cbfe4c2c4544ea1246e949cfebeb", size = 416787, upload-time = "2025-08-27T12:14:55.673Z" }, + { url = "https://files.pythonhosted.org/packages/d4/64/56dd03430ba491db943a81dcdef115a985aac5f44f565cd39a00c766d45c/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:42a89282d711711d0a62d6f57d81aa43a1368686c45bc1c46b7f079d55692734", size = 557538, upload-time = "2025-08-27T12:14:57.245Z" }, + { url = "https://files.pythonhosted.org/packages/3f/36/92cc885a3129993b1d963a2a42ecf64e6a8e129d2c7cc980dbeba84e55fb/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:cf9931f14223de59551ab9d38ed18d92f14f055a5f78c1d8ad6493f735021bbb", size = 588512, upload-time = "2025-08-27T12:14:58.728Z" }, + { url = "https://files.pythonhosted.org/packages/dd/10/6b283707780a81919f71625351182b4f98932ac89a09023cb61865136244/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f39f58a27cc6e59f432b568ed8429c7e1641324fbe38131de852cd77b2d534b0", size = 555813, upload-time = "2025-08-27T12:15:00.334Z" }, + { url = "https://files.pythonhosted.org/packages/04/2e/30b5ea18c01379da6272a92825dd7e53dc9d15c88a19e97932d35d430ef7/rpds_py-0.27.1-cp314-cp314t-win32.whl", hash = "sha256:d5fa0ee122dc09e23607a28e6d7b150da16c662e66409bbe85230e4c85bb528a", size = 217385, upload-time = "2025-08-27T12:15:01.937Z" }, + { url = "https://files.pythonhosted.org/packages/32/7d/97119da51cb1dd3f2f3c0805f155a3aa4a95fa44fe7d78ae15e69edf4f34/rpds_py-0.27.1-cp314-cp314t-win_amd64.whl", hash = "sha256:6567d2bb951e21232c2f660c24cf3470bb96de56cdcb3f071a83feeaff8a2772", size = 230097, upload-time = "2025-08-27T12:15:03.961Z" }, +] + +[[package]] +name = "s3transfer" +version = "0.14.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/62/74/8d69dcb7a9efe8baa2046891735e5dfe433ad558ae23d9e3c14c633d1d58/s3transfer-0.14.0.tar.gz", hash = "sha256:eff12264e7c8b4985074ccce27a3b38a485bb7f7422cc8046fee9be4983e4125", size = 151547, upload-time = "2025-09-09T19:23:31.089Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/f0/ae7ca09223a81a1d890b2557186ea015f6e0502e9b8cb8e1813f1d8cfa4e/s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456", size = 85712, upload-time = "2025-09-09T19:23:30.041Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "sse-starlette" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/6f/22ed6e33f8a9e76ca0a412405f31abb844b779d52c5f96660766edcd737c/sse_starlette-3.0.2.tar.gz", hash = "sha256:ccd60b5765ebb3584d0de2d7a6e4f745672581de4f5005ab31c3a25d10b52b3a", size = 20985, upload-time = "2025-07-27T09:07:44.565Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/10/c78f463b4ef22eef8491f218f692be838282cd65480f6e423d7730dfd1fb/sse_starlette-3.0.2-py3-none-any.whl", hash = "sha256:16b7cbfddbcd4eaca11f7b586f3b8a080f1afe952c15813455b162edea619e5a", size = 11297, upload-time = "2025-07-27T09:07:43.268Z" }, +] + +[[package]] +name = "starlette" +version = "0.48.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/a5/d6f429d43394057b67a6b5bbe6eae2f77a6bf7459d961fdb224bf206eee6/starlette-0.48.0.tar.gz", hash = "sha256:7e8cee469a8ab2352911528110ce9088fdc6a37d9876926e73da7ce4aa4c7a46", size = 2652949, upload-time = "2025-09-13T08:41:05.699Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/72/2db2f49247d0a18b4f1bb9a5a39a0162869acf235f3a96418363947b3d46/starlette-0.48.0-py3-none-any.whl", hash = "sha256:0764ca97b097582558ecb498132ed0c7d942f233f365b86ba37770e026510659", size = 73736, upload-time = "2025-09-13T08:41:03.869Z" }, +] + +[[package]] +name = "strands-agents" +version = "1.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "boto3" }, + { name = "botocore" }, + { name = "docstring-parser" }, + { name = "mcp" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation-threading" }, + { name = "opentelemetry-sdk" }, + { name = "pydantic" }, + { name = "typing-extensions" }, + { name = "watchdog" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/56/3d3cb9bf62d45f97befe82fbb73ad65b46e9a6efd21151c38c466cd87c11/strands_agents-1.12.0.tar.gz", hash = "sha256:8f17e775971505ab7841a3139cde9879632a26cdd9cd55be74de83f0e7f804c0", size = 418141, upload-time = "2025-10-10T15:16:45.753Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/05/2f0fbce4a3acd7b9c042368bbe8038409a7b30d65138bd3b37a06d1a4cc4/strands_agents-1.12.0-py3-none-any.whl", hash = "sha256:af0f9c8a175666009863d0fb4438e71000ea3a2f0cbda3dc308c35dd4f9a1eb0", size = 216043, upload-time = "2025-10-10T15:16:44.043Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + +[[package]] +name = "urllib3" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/57/1616c8274c3442d802621abf5deb230771c7a0fec9414cb6763900eb3868/uvicorn-0.37.0.tar.gz", hash = "sha256:4115c8add6d3fd536c8ee77f0e14a7fd2ebba939fed9b02583a97f80648f9e13", size = 80367, upload-time = "2025-09-23T13:33:47.486Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/cd/584a2ceb5532af99dd09e50919e3615ba99aa127e9850eafe5f31ddfdb9a/uvicorn-0.37.0-py3-none-any.whl", hash = "sha256:913b2b88672343739927ce381ff9e2ad62541f9f8289664fa1d1d3803fa2ce6c", size = 67976, upload-time = "2025-09-23T13:33:45.842Z" }, +] + +[[package]] +name = "watchdog" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/98/b0345cabdce2041a01293ba483333582891a3bd5769b08eceb0d406056ef/watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c", size = 96480, upload-time = "2024-11-01T14:06:42.952Z" }, + { url = "https://files.pythonhosted.org/packages/85/83/cdf13902c626b28eedef7ec4f10745c52aad8a8fe7eb04ed7b1f111ca20e/watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134", size = 88451, upload-time = "2024-11-01T14:06:45.084Z" }, + { url = "https://files.pythonhosted.org/packages/fe/c4/225c87bae08c8b9ec99030cd48ae9c4eca050a59bf5c2255853e18c87b50/watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b", size = 89057, upload-time = "2024-11-01T14:06:47.324Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" }, + { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" }, + { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" }, + { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" }, + { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" }, + { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" }, + { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" }, + { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" }, + { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" }, +] + +[[package]] +name = "werkzeug" +version = "3.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/69/83029f1f6300c5fb2471d621ab06f6ec6b3324685a2ce0f9777fd4a8b71e/werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746", size = 806925, upload-time = "2024-11-08T15:52:18.093Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/24/ab44c871b0f07f491e5d2ad12c9bd7358e527510618cb1b803a88e986db1/werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e", size = 224498, upload-time = "2024-11-08T15:52:16.132Z" }, +] + +[[package]] +name = "wrapt" +version = "1.17.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/8f/aeb76c5b46e273670962298c23e7ddde79916cb74db802131d49a85e4b7d/wrapt-1.17.3.tar.gz", hash = "sha256:f66eb08feaa410fe4eebd17f2a2c8e2e46d3476e9f8c783daa8e09e0faa666d0", size = 55547, upload-time = "2025-08-12T05:53:21.714Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/f6/759ece88472157acb55fc195e5b116e06730f1b651b5b314c66291729193/wrapt-1.17.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a47681378a0439215912ef542c45a783484d4dd82bac412b71e59cf9c0e1cea0", size = 54003, upload-time = "2025-08-12T05:51:48.627Z" }, + { url = "https://files.pythonhosted.org/packages/4f/a9/49940b9dc6d47027dc850c116d79b4155f15c08547d04db0f07121499347/wrapt-1.17.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a30837587c6ee3cd1a4d1c2ec5d24e77984d44e2f34547e2323ddb4e22eb77", size = 39025, upload-time = "2025-08-12T05:51:37.156Z" }, + { url = "https://files.pythonhosted.org/packages/45/35/6a08de0f2c96dcdd7fe464d7420ddb9a7655a6561150e5fc4da9356aeaab/wrapt-1.17.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:16ecf15d6af39246fe33e507105d67e4b81d8f8d2c6598ff7e3ca1b8a37213f7", size = 39108, upload-time = "2025-08-12T05:51:58.425Z" }, + { url = "https://files.pythonhosted.org/packages/0c/37/6faf15cfa41bf1f3dba80cd3f5ccc6622dfccb660ab26ed79f0178c7497f/wrapt-1.17.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6fd1ad24dc235e4ab88cda009e19bf347aabb975e44fd5c2fb22a3f6e4141277", size = 88072, upload-time = "2025-08-12T05:52:37.53Z" }, + { url = "https://files.pythonhosted.org/packages/78/f2/efe19ada4a38e4e15b6dff39c3e3f3f73f5decf901f66e6f72fe79623a06/wrapt-1.17.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ed61b7c2d49cee3c027372df5809a59d60cf1b6c2f81ee980a091f3afed6a2d", size = 88214, upload-time = "2025-08-12T05:52:15.886Z" }, + { url = "https://files.pythonhosted.org/packages/40/90/ca86701e9de1622b16e09689fc24b76f69b06bb0150990f6f4e8b0eeb576/wrapt-1.17.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:423ed5420ad5f5529db9ce89eac09c8a2f97da18eb1c870237e84c5a5c2d60aa", size = 87105, upload-time = "2025-08-12T05:52:17.914Z" }, + { url = "https://files.pythonhosted.org/packages/fd/e0/d10bd257c9a3e15cbf5523025252cc14d77468e8ed644aafb2d6f54cb95d/wrapt-1.17.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e01375f275f010fcbf7f643b4279896d04e571889b8a5b3f848423d91bf07050", size = 87766, upload-time = "2025-08-12T05:52:39.243Z" }, + { url = "https://files.pythonhosted.org/packages/e8/cf/7d848740203c7b4b27eb55dbfede11aca974a51c3d894f6cc4b865f42f58/wrapt-1.17.3-cp313-cp313-win32.whl", hash = "sha256:53e5e39ff71b3fc484df8a522c933ea2b7cdd0d5d15ae82e5b23fde87d44cbd8", size = 36711, upload-time = "2025-08-12T05:53:10.074Z" }, + { url = "https://files.pythonhosted.org/packages/57/54/35a84d0a4d23ea675994104e667ceff49227ce473ba6a59ba2c84f250b74/wrapt-1.17.3-cp313-cp313-win_amd64.whl", hash = "sha256:1f0b2f40cf341ee8cc1a97d51ff50dddb9fcc73241b9143ec74b30fc4f44f6cb", size = 38885, upload-time = "2025-08-12T05:53:08.695Z" }, + { url = "https://files.pythonhosted.org/packages/01/77/66e54407c59d7b02a3c4e0af3783168fff8e5d61def52cda8728439d86bc/wrapt-1.17.3-cp313-cp313-win_arm64.whl", hash = "sha256:7425ac3c54430f5fc5e7b6f41d41e704db073309acfc09305816bc6a0b26bb16", size = 36896, upload-time = "2025-08-12T05:52:55.34Z" }, + { url = "https://files.pythonhosted.org/packages/02/a2/cd864b2a14f20d14f4c496fab97802001560f9f41554eef6df201cd7f76c/wrapt-1.17.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cf30f6e3c077c8e6a9a7809c94551203c8843e74ba0c960f4a98cd80d4665d39", size = 54132, upload-time = "2025-08-12T05:51:49.864Z" }, + { url = "https://files.pythonhosted.org/packages/d5/46/d011725b0c89e853dc44cceb738a307cde5d240d023d6d40a82d1b4e1182/wrapt-1.17.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e228514a06843cae89621384cfe3a80418f3c04aadf8a3b14e46a7be704e4235", size = 39091, upload-time = "2025-08-12T05:51:38.935Z" }, + { url = "https://files.pythonhosted.org/packages/2e/9e/3ad852d77c35aae7ddebdbc3b6d35ec8013af7d7dddad0ad911f3d891dae/wrapt-1.17.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5ea5eb3c0c071862997d6f3e02af1d055f381b1d25b286b9d6644b79db77657c", size = 39172, upload-time = "2025-08-12T05:51:59.365Z" }, + { url = "https://files.pythonhosted.org/packages/c3/f7/c983d2762bcce2326c317c26a6a1e7016f7eb039c27cdf5c4e30f4160f31/wrapt-1.17.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:281262213373b6d5e4bb4353bc36d1ba4084e6d6b5d242863721ef2bf2c2930b", size = 87163, upload-time = "2025-08-12T05:52:40.965Z" }, + { url = "https://files.pythonhosted.org/packages/e4/0f/f673f75d489c7f22d17fe0193e84b41540d962f75fce579cf6873167c29b/wrapt-1.17.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc4a8d2b25efb6681ecacad42fca8859f88092d8732b170de6a5dddd80a1c8fa", size = 87963, upload-time = "2025-08-12T05:52:20.326Z" }, + { url = "https://files.pythonhosted.org/packages/df/61/515ad6caca68995da2fac7a6af97faab8f78ebe3bf4f761e1b77efbc47b5/wrapt-1.17.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:373342dd05b1d07d752cecbec0c41817231f29f3a89aa8b8843f7b95992ed0c7", size = 86945, upload-time = "2025-08-12T05:52:21.581Z" }, + { url = "https://files.pythonhosted.org/packages/d3/bd/4e70162ce398462a467bc09e768bee112f1412e563620adc353de9055d33/wrapt-1.17.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d40770d7c0fd5cbed9d84b2c3f2e156431a12c9a37dc6284060fb4bec0b7ffd4", size = 86857, upload-time = "2025-08-12T05:52:43.043Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b8/da8560695e9284810b8d3df8a19396a6e40e7518059584a1a394a2b35e0a/wrapt-1.17.3-cp314-cp314-win32.whl", hash = "sha256:fbd3c8319de8e1dc79d346929cd71d523622da527cca14e0c1d257e31c2b8b10", size = 37178, upload-time = "2025-08-12T05:53:12.605Z" }, + { url = "https://files.pythonhosted.org/packages/db/c8/b71eeb192c440d67a5a0449aaee2310a1a1e8eca41676046f99ed2487e9f/wrapt-1.17.3-cp314-cp314-win_amd64.whl", hash = "sha256:e1a4120ae5705f673727d3253de3ed0e016f7cd78dc463db1b31e2463e1f3cf6", size = 39310, upload-time = "2025-08-12T05:53:11.106Z" }, + { url = "https://files.pythonhosted.org/packages/45/20/2cda20fd4865fa40f86f6c46ed37a2a8356a7a2fde0773269311f2af56c7/wrapt-1.17.3-cp314-cp314-win_arm64.whl", hash = "sha256:507553480670cab08a800b9463bdb881b2edeed77dc677b0a5915e6106e91a58", size = 37266, upload-time = "2025-08-12T05:52:56.531Z" }, + { url = "https://files.pythonhosted.org/packages/77/ed/dd5cf21aec36c80443c6f900449260b80e2a65cf963668eaef3b9accce36/wrapt-1.17.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ed7c635ae45cfbc1a7371f708727bf74690daedc49b4dba310590ca0bd28aa8a", size = 56544, upload-time = "2025-08-12T05:51:51.109Z" }, + { url = "https://files.pythonhosted.org/packages/8d/96/450c651cc753877ad100c7949ab4d2e2ecc4d97157e00fa8f45df682456a/wrapt-1.17.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:249f88ed15503f6492a71f01442abddd73856a0032ae860de6d75ca62eed8067", size = 40283, upload-time = "2025-08-12T05:51:39.912Z" }, + { url = "https://files.pythonhosted.org/packages/d1/86/2fcad95994d9b572db57632acb6f900695a648c3e063f2cd344b3f5c5a37/wrapt-1.17.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5a03a38adec8066d5a37bea22f2ba6bbf39fcdefbe2d91419ab864c3fb515454", size = 40366, upload-time = "2025-08-12T05:52:00.693Z" }, + { url = "https://files.pythonhosted.org/packages/64/0e/f4472f2fdde2d4617975144311f8800ef73677a159be7fe61fa50997d6c0/wrapt-1.17.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5d4478d72eb61c36e5b446e375bbc49ed002430d17cdec3cecb36993398e1a9e", size = 108571, upload-time = "2025-08-12T05:52:44.521Z" }, + { url = "https://files.pythonhosted.org/packages/cc/01/9b85a99996b0a97c8a17484684f206cbb6ba73c1ce6890ac668bcf3838fb/wrapt-1.17.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:223db574bb38637e8230eb14b185565023ab624474df94d2af18f1cdb625216f", size = 113094, upload-time = "2025-08-12T05:52:22.618Z" }, + { url = "https://files.pythonhosted.org/packages/25/02/78926c1efddcc7b3aa0bc3d6b33a822f7d898059f7cd9ace8c8318e559ef/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e405adefb53a435f01efa7ccdec012c016b5a1d3f35459990afc39b6be4d5056", size = 110659, upload-time = "2025-08-12T05:52:24.057Z" }, + { url = "https://files.pythonhosted.org/packages/dc/ee/c414501ad518ac3e6fe184753632fe5e5ecacdcf0effc23f31c1e4f7bfcf/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:88547535b787a6c9ce4086917b6e1d291aa8ed914fdd3a838b3539dc95c12804", size = 106946, upload-time = "2025-08-12T05:52:45.976Z" }, + { url = "https://files.pythonhosted.org/packages/be/44/a1bd64b723d13bb151d6cc91b986146a1952385e0392a78567e12149c7b4/wrapt-1.17.3-cp314-cp314t-win32.whl", hash = "sha256:41b1d2bc74c2cac6f9074df52b2efbef2b30bdfe5f40cb78f8ca22963bc62977", size = 38717, upload-time = "2025-08-12T05:53:15.214Z" }, + { url = "https://files.pythonhosted.org/packages/79/d9/7cfd5a312760ac4dd8bf0184a6ee9e43c33e47f3dadc303032ce012b8fa3/wrapt-1.17.3-cp314-cp314t-win_amd64.whl", hash = "sha256:73d496de46cd2cdbdbcce4ae4bcdb4afb6a11234a1df9c085249d55166b95116", size = 41334, upload-time = "2025-08-12T05:53:14.178Z" }, + { url = "https://files.pythonhosted.org/packages/46/78/10ad9781128ed2f99dbc474f43283b13fea8ba58723e98844367531c18e9/wrapt-1.17.3-cp314-cp314t-win_arm64.whl", hash = "sha256:f38e60678850c42461d4202739f9bf1e3a737c7ad283638251e79cc49effb6b6", size = 38471, upload-time = "2025-08-12T05:52:57.784Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f6/a933bd70f98e9cf3e08167fc5cd7aaaca49147e48411c0bd5ae701bb2194/wrapt-1.17.3-py3-none-any.whl", hash = "sha256:7171ae35d2c33d326ac19dd8facb1e82e5fd04ef8c6c0e394d7af55a55051c22", size = 23591, upload-time = "2025-08-12T05:53:20.674Z" }, +] + +[[package]] +name = "xmltodict" +version = "1.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/aa/917ceeed4dbb80d2f04dbd0c784b7ee7bba8ae5a54837ef0e5e062cd3cfb/xmltodict-1.0.2.tar.gz", hash = "sha256:54306780b7c2175a3967cad1db92f218207e5bc1aba697d887807c0fb68b7649", size = 25725, upload-time = "2025-09-17T21:59:26.459Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/20/69a0e6058bc5ea74892d089d64dfc3a62ba78917ec5e2cfa70f7c92ba3a5/xmltodict-1.0.2-py3-none-any.whl", hash = "sha256:62d0fddb0dcbc9f642745d8bbf4d81fd17d6dfaec5a15b5c1876300aad92af0d", size = 13893, upload-time = "2025-09-17T21:59:24.859Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +] diff --git a/deployment/ecr/gaab-strands-workflow-agent/.dockerignore b/deployment/ecr/gaab-strands-workflow-agent/.dockerignore new file mode 100644 index 00000000..d39378e1 --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/.dockerignore @@ -0,0 +1,23 @@ +# Ignore virtual environments +.venv +venv +__pycache__ +*.pyc +*.pyo +*.pyd + +# Ignore test artifacts +.pytest_cache +.coverage +htmlcov + +# Ignore IDE files +.vscode +.idea +*.swp + +# Ignore gaab-strands-common's venv and test artifacts +gaab-strands-common/.venv +gaab-strands-common/.pytest_cache +gaab-strands-common/htmlcov +gaab-strands-common/__pycache__ diff --git a/deployment/ecr/gaab-strands-workflow-agent/Dockerfile b/deployment/ecr/gaab-strands-workflow-agent/Dockerfile new file mode 100644 index 00000000..f38bf71c --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/Dockerfile @@ -0,0 +1,66 @@ +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/python:3.13-slim + +# Install system dependencies with security updates +RUN apt-get update && apt-get upgrade -y \ + && apt-get install -y --no-install-recommends \ + curl \ + openssl \ + && rm -rf /var/lib/apt/lists/* \ + && apt-get clean + +WORKDIR /app + +# Install UV for dependency management +RUN pip install --no-cache-dir --upgrade "pip>=25.3" \ + && pip install --no-cache-dir "uv>=0.5.0" + +# Debug: Show build context structure +RUN echo "=== Build context contents ===" && ls -la . 2>/dev/null || true + +# Copy shared library - must be present in build context +COPY gaab-strands-common /tmp/gaab-strands-common + +# Debug: Verify shared library was copied +RUN echo "=== Verifying gaab-strands-common ===" && ls -la /tmp/gaab-strands-common + +# Copy project files for dependency resolution from current directory +COPY pyproject.toml uv.lock README.md ./ + +# Install gaab-strands-common as editable dependency +RUN uv pip install --system -e /tmp/gaab-strands-common + +# Install dependencies from pyproject.toml (excluding gaab-strands-common which is already installed) +# Extract non-local dependencies and install them +RUN python3 -c "import tomllib; \ + data = tomllib.load(open('pyproject.toml', 'rb')); \ + deps = [d for d in data['project']['dependencies'] if 'gaab-strands-common' not in d]; \ + print('\n'.join(deps))" > /tmp/deps.txt && \ + uv pip install --system -r /tmp/deps.txt + +# Debug: Show current directory before copying src +RUN echo "=== Current directory contents ===" && ls -la . + +# Copy source code from current directory +COPY src/ ./src/ + +# Install the package itself (no dependencies, they're already installed) +RUN uv pip install --system --no-deps -e . + +# Set Python path +ENV PYTHONPATH=/app/src + +# Create non-root user for security +RUN groupadd -r appuser && useradd -r -g appuser appuser \ + && chown -R appuser:appuser /app + +USER appuser + +# Expose port for AgentCore Runtime +EXPOSE 8080 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \ + CMD python -c "import sys; sys.exit(0)" || exit 1 + +# Run the application with OpenTelemetry instrumentation +CMD ["opentelemetry-instrument", "python", "src/main.py"] diff --git a/deployment/ecr/gaab-strands-workflow-agent/README.md b/deployment/ecr/gaab-strands-workflow-agent/README.md new file mode 100644 index 00000000..ee67c252 --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/README.md @@ -0,0 +1,75 @@ +# GAAB Strands Workflow Agent + +This is the workflow orchestration agent for the Generative AI Application Builder (GAAB) on AWS. It implements the "Agents as Tools" pattern using the Strands SDK to orchestrate multiple specialized agents. + +## Overview + +The workflow agent treats specialized agents as tools for a client agent, enabling complex multi-step workflows where the client agent can delegate tasks to specialized agents based on user requests. + +## Architecture + +- **main.py**: Entry point using BedrockAgentCoreApp +- **workflow_agent.py**: Workflow-specific agent creation and orchestration +- **agents_loader.py**: Loading and instantiation of specialized agents + +## Dependencies + +This agent depends on the `gaab-strands-common` shared library which provides: +- Runtime streaming logic +- DynamoDB helper utilities +- Data models +- Tool wrapper for event emission +- Base agent class + +## Configuration + +The workflow agent expects the following environment variables: +- `USE_CASE_TABLE_NAME`: DynamoDB table name for configurations +- `USE_CASE_CONFIG_KEY`: Configuration key to load +- `AWS_REGION`: AWS region for Bedrock and DynamoDB + +## Prerequisites + +- Python 3.13+ +- UV package manager (install via `pip install uv>=0.5.0`) + +## Development Setup + +```bash +# Install UV if not already installed +pip install uv>=0.5.0 + +# Sync dependencies (creates virtual environment and installs all dependencies) +uv sync + +# Activate the virtual environment +source .venv/bin/activate +``` + +## Building + +```bash +# From the deployment/ecr directory +docker build -f gaab-strands-workflow-agent/Dockerfile -t gaab-strands-workflow-agent . +``` + +## Testing + +```bash +# Run tests with UV (recommended) +uv run pytest test/ + +# Or use the test script +./scripts/run_unit_tests.sh + +# Run tests with coverage +uv run pytest test/ --cov=src --cov-report=term --cov-report=html +``` + +## Message Format + +The workflow agent maintains the same message format as the existing tool-based agent: +- Content chunks: `{type: "content", text: string, agent_name: string, model_id: string}` +- Tool usage chunks: `{type: "tool_use", toolUsage: {...}}` +- Completion chunks: `{type: "completion", agent_name: string, model_id: string}` +- Error chunks: `{type: "error", error: string, message: string, agent_name: string, model_id: string}` diff --git a/deployment/ecr/gaab-strands-workflow-agent/pyproject.toml b/deployment/ecr/gaab-strands-workflow-agent/pyproject.toml new file mode 100644 index 00000000..a1aa0d43 --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/pyproject.toml @@ -0,0 +1,78 @@ +[project] +name = "gaab-strands-workflow-agent" +version = "1.0.0" +description = "GAAB Strands Workflow Agent Runtime for Amazon Bedrock AgentCore" +readme = "README.md" +requires-python = ">=3.13" +license = { text = "Apache-2.0" } +authors = [{ name = "Amazon Web Services" }] +classifiers = [ + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.13", +] + +dependencies = [ + "setuptools>=70.0.0", + "pip>=25.0", + "wheel>=0.42.0", + + # AWS SDK + "boto3>=1.35.0", + + # Strands SDK with OpenTelemetry support + "strands-agents[otel]>=1.10.0", + "strands-agents-tools>=0.2.9", + + # bedrock-agentcore + "bedrock-agentcore>=0.1.5", + + # OpenTelemetry for observability + "aws-opentelemetry-distro>=0.12.1", + + # Pydantic for data validation and parsing + "pydantic>=2.0.0", + + # Local shared library (managed via tool.uv.sources) + "gaab-strands-common", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src"] + +[tool.uv] +dev-dependencies = [ + "pytest>=8.0.0", + "pytest-cov>=5.0.0", + "black>=24.0.0", + "isort>=5.12.0", + "mypy>=1.8.0", + "moto>=5.0.0", + "pytest-mock>=3.12.0", +] + +[tool.uv.sources] +gaab-strands-common = { path = "../gaab-strands-common", editable = true } + +[tool.black] +line-length = 100 +target-version = ['py313'] + +[tool.isort] +profile = "black" +line_length = 100 + +[tool.mypy] +python_version = "3.13" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = true + +[tool.pytest.ini_options] +testpaths = ["test"] +python_files = ["test_*.py", "*_test.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] diff --git a/deployment/ecr/gaab-strands-workflow-agent/scripts/build-container.sh b/deployment/ecr/gaab-strands-workflow-agent/scripts/build-container.sh new file mode 100755 index 00000000..eae7f707 --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/scripts/build-container.sh @@ -0,0 +1,290 @@ +#!/bin/bash +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -e # Exit on any error +set -u # Exit on undefined variables + +# Enable debug mode if DEBUG environment variable is set +if [ "${DEBUG:-}" = "true" ]; then + set -x +fi + +echo "=== Building Workflow Agent Container ===" + +# Navigate to agent directory (parent of scripts/) +cd "$(dirname "$0")/.." + +# Logging functions for consistent output +log_info() { + echo "ℹ️ $1" +} + +log_success() { + echo "✅ $1" +} + +log_error() { + echo "❌ $1" >&2 +} + +log_warning() { + echo "⚠️ $1" +} + +# Enhanced configuration with environment variable support +IMAGE_NAME="${IMAGE_NAME:-gaab-strands-workflow-agent}" +TAG="${TAG:-latest}" + +# Build options configuration +BUILD_ARGS="${BUILD_ARGS:-}" +NO_CACHE="${NO_CACHE:-false}" +PLATFORM="${PLATFORM:-}" + +# UV configuration - UV is required for this build +REQUIRE_UV="${REQUIRE_UV:-true}" + +# Validation functions +validate_docker() { + log_info "Validating Docker environment..." + + # Check if Docker is available + if ! command -v docker &> /dev/null; then + log_error "Docker is not installed or not in PATH" + log_error "Please install Docker and ensure it's running" + exit 1 + fi + + # Check if Docker daemon is running + if ! docker info &> /dev/null; then + log_error "Docker daemon is not running" + log_error "Please start Docker and try again" + exit 1 + fi + + # Check Docker version for compatibility + local docker_version + docker_version=$(docker version --format '{{.Server.Version}}' 2>/dev/null || echo "unknown") + log_info "Docker version: $docker_version" + + log_success "Docker environment validated" +} + +validate_build_context() { + log_info "Validating build context..." + + # Check if Dockerfile exists + if [ ! -f "Dockerfile" ]; then + log_error "Dockerfile not found in current directory" + log_error "Current directory: $(pwd)" + log_error "Please ensure you're in the correct directory" + exit 1 + fi + + # Check if pyproject.toml exists for UV workflow + if [ ! -f "pyproject.toml" ]; then + log_warning "pyproject.toml not found - UV workflow may not be available" + fi + + # Check if required source files exist + if [ ! -d "src" ]; then + log_warning "Source directory 'src' not found - this may be expected" + fi + + # Verify gaab-strands-common package exists (required dependency) + if [ ! -d "../gaab-strands-common" ]; then + log_error "gaab-strands-common package not found at ../gaab-strands-common" + log_error "The shared library is required for building this agent" + log_error "Expected structure:" + log_error " deployment/ecr/" + log_error " ├── gaab-strands-workflow-agent/ (current)" + log_error " └── gaab-strands-common/ (required)" + exit 1 + fi + + log_success "Build context validated (including gaab-strands-common)" +} + +# UV detection - check if UV is available in the environment +check_uv_available() { + log_info "Checking for UV availability..." + + if ! command -v uv &> /dev/null; then + log_error "UV is not installed or not in PATH" + log_error "" + log_error "Please install UV manually using one of these methods:" + log_error "" + log_error " 1. Using pip (recommended for corporate environments):" + log_error " pip install uv>=0.5.0" + log_error "" + log_error " 2. Using pipx (isolated installation):" + log_error " pipx install uv>=0.5.0" + log_error "" + log_error " 3. Using your system package manager:" + log_error " - macOS: brew install uv" + log_error " - Linux: Check your distribution's package manager" + log_error "" + log_error " 4. For more installation options, visit:" + log_error " https://docs.astral.sh/uv/getting-started/installation/" + log_error "" + log_error "After installation, ensure UV is in your PATH and try again." + return 1 + fi + + # Verify UV is functional + local uv_version + uv_version=$(uv --version 2>/dev/null | cut -d' ' -f2 || echo "unknown") + + if [ "$uv_version" = "unknown" ]; then + log_error "UV found but version could not be determined" + log_error "UV may not be properly installed or configured" + return 1 + fi + + log_success "UV detected (version: $uv_version)" + return 0 +} + + + +# Enhanced build function with better error handling +build_docker_image() { + log_info "Starting Docker image build..." + + log_info "Configuration:" + echo " 📦 Image Name: $IMAGE_NAME" + echo " 🏷️ Tag: $TAG" + echo " 📁 Build Context: $(pwd)" + echo " 🔧 Package Manager: UV" + echo " 📚 Shared Library: gaab-strands-common (../gaab-strands-common)" + + if [ -n "$PLATFORM" ]; then + echo " 🏗️ Platform: $PLATFORM" + fi + + if [ "$NO_CACHE" = "true" ]; then + echo " 🚫 Cache: Disabled" + fi + + if [ -n "$BUILD_ARGS" ]; then + echo " ⚙️ Build Args: $BUILD_ARGS" + fi + + echo "" + + # Construct build command + local build_cmd="docker build" + + # Add no-cache flag if requested + if [ "$NO_CACHE" = "true" ]; then + build_cmd="$build_cmd --no-cache" + fi + + # Add platform if specified + if [ -n "$PLATFORM" ]; then + build_cmd="$build_cmd --platform $PLATFORM" + fi + + # Add build args if specified + if [ -n "$BUILD_ARGS" ]; then + build_cmd="$build_cmd $BUILD_ARGS" + fi + + # Add tag and context (build from current directory) + build_cmd="$build_cmd -t $IMAGE_NAME:$TAG ." + + log_info "Build command: $build_cmd" + log_info "Tests will run during build process..." + echo "" + + # Execute build with error handling + if eval "$build_cmd"; then + log_success "Docker image built successfully!" + log_success "Tests passed during build!" + + # Display image information + echo "" + log_info "Image Details:" + docker images --format "table {{.Repository}}:{{.Tag}}\t{{.Size}}\t{{.CreatedAt}}" | head -1 + docker images --format "table {{.Repository}}:{{.Tag}}\t{{.Size}}\t{{.CreatedAt}}" | grep "^$IMAGE_NAME:$TAG" + + # Get image ID and size + local image_id + local image_size + image_id=$(docker images --format "{{.ID}}" "$IMAGE_NAME:$TAG" | head -1) + image_size=$(docker images --format "{{.Size}}" "$IMAGE_NAME:$TAG" | head -1) + + echo "" + log_info "Build Summary:" + echo " 🆔 Image ID: $image_id" + echo " 📏 Image Size: $image_size" + echo " 🏷️ Full Tag: $IMAGE_NAME:$TAG" + + else + log_error "Docker build failed!" + log_error "" + log_error "Common causes of build failures:" + log_error " - Missing dependencies in Dockerfile" + log_error " - Test failures during build" + log_error " - Network connectivity issues" + log_error " - Insufficient disk space" + log_error " - Invalid Dockerfile syntax" + log_error " - UV/pip dependency resolution conflicts" + log_error "" + log_error "Troubleshooting steps:" + log_error " 1. Check Docker logs above for specific errors" + log_error " 2. Verify Dockerfile syntax" + log_error " 3. Ensure all required files are present" + log_error " 4. Check available disk space: df -h" + log_error " 5. Try building with --no-cache: NO_CACHE=true ./scripts/build-container.sh" + log_error " 6. Try fallback mode: USE_UV=false ./scripts/build-container.sh" + exit 1 + fi +} + +# Display usage information +display_usage() { + echo "" + log_info "Environment Variables:" + echo " IMAGE_NAME - Docker image name (default: gaab-strands-workflow-agent)" + echo " TAG - Docker image tag (default: latest)" + echo " BUILD_ARGS - Additional build arguments" + echo " NO_CACHE - Disable build cache (true/false, default: false)" + echo " PLATFORM - Target platform (e.g., linux/amd64, linux/arm64)" + echo " DEBUG - Enable debug output (true/false, default: false)" + echo " REQUIRE_UV - Require UV to be installed (true/false, default: true)" + echo "" + log_info "Prerequisites:" + echo " - gaab-strands-common package must exist at ../gaab-strands-common" + echo " - UV must be installed (pip install uv>=0.5.0)" + echo "" + log_info "Examples:" + echo " # Basic UV-based build (recommended)" + echo " ./scripts/build-container.sh" + echo "" + echo " # Build with custom tag" + echo " TAG=v1.0.0 ./scripts/build-container.sh" + echo "" + echo " # Build without cache" + echo " NO_CACHE=true ./scripts/build-container.sh" + echo "" + echo " # Build for specific platform (AgentCore ARM64)" + echo " PLATFORM=linux/arm64 ./scripts/build-container.sh" + echo "" + echo " # Build without requiring UV" + echo " REQUIRE_UV=false ./scripts/build-container.sh" + echo "" + echo " # Debug mode with verbose output" + echo " DEBUG=true ./scripts/build-container.sh" + echo "" +} + +# Main execution +main() { + validate_docker + validate_build_context + build_docker_image +} + +# Run main function +main diff --git a/deployment/ecr/gaab-strands-workflow-agent/scripts/deploy-ecr.sh b/deployment/ecr/gaab-strands-workflow-agent/scripts/deploy-ecr.sh new file mode 100755 index 00000000..0ac2b496 --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/scripts/deploy-ecr.sh @@ -0,0 +1,388 @@ +#!/bin/bash +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -e # Exit on any error +set -u # Exit on undefined variables + +# Enable debug mode if DEBUG environment variable is set +if [ "${DEBUG:-}" = "true" ]; then + set -x +fi + +echo "=== Deploying Workflow Agent to ECR ===" + +# Navigate to agent directory (parent of scripts/) +cd "$(dirname "$0")/.." + +# Verify we're in the correct directory +if [ ! -f "Dockerfile" ]; then + log_error "Dockerfile not found. Current directory: $(pwd)" + log_error "This script must be run from deployment/ecr/gaab-strands-workflow-agent/scripts/" + exit 1 +fi + +# Logging function for consistent output +log_info() { + echo "ℹ️ $1" +} + +log_success() { + echo "✅ $1" +} + +log_error() { + echo "❌ $1" >&2 +} + +log_warning() { + echo "⚠️ $1" +} + +# Validation function for required environment variables +validate_required_vars() { + local missing_vars=() + + # Check for required variables based on context + if [ -z "${AWS_REGION:-}" ] && [ -z "${AWS_DEFAULT_REGION:-}" ]; then + missing_vars+=("AWS_REGION or AWS_DEFAULT_REGION") + fi + + if [ ${#missing_vars[@]} -gt 0 ]; then + log_error "Missing required environment variables:" + for var in "${missing_vars[@]}"; do + log_error " - $var" + done + log_error "Please set the required variables and try again." + exit 1 + fi +} + +# Enhanced configuration with environment variable support +# Core AWS configuration +AWS_REGION="${AWS_REGION:-${AWS_DEFAULT_REGION:-us-east-1}}" + +# ECR repository configuration with enhanced customization +ECR_REPOSITORY="${ECR_REPOSITORY:-gaab-strands-workflow-agent}" +IMAGE_NAME="${IMAGE_NAME:-gaab-strands-workflow-agent}" + +# Image tag resolution with CI/CD support +if [ -n "${VERSION:-}" ]; then + # Use VERSION environment variable (CI/CD context) + # Remove double 'v' prefix if present (e.g., vv4.0.0 -> v4.0.0) + RESOLVED_VERSION=$(echo "$VERSION" | sed 's/^vv/v/') + IMAGE_TAG="${IMAGE_TAG:-$RESOLVED_VERSION}" +elif [ -n "${PUBLIC_ECR_TAG:-}" ]; then + # Use PUBLIC_ECR_TAG for CI/CD pipeline overrides + IMAGE_TAG="${IMAGE_TAG:-$PUBLIC_ECR_TAG}" +else + # Default to latest for local development + IMAGE_TAG="${IMAGE_TAG:-latest}" +fi + +# Registry configuration with CI/CD override support +if [ -n "${PUBLIC_ECR_REGISTRY:-}" ]; then + # CI/CD context with custom registry + ECR_REGISTRY_URL="$PUBLIC_ECR_REGISTRY" + log_info "Using custom ECR registry from PUBLIC_ECR_REGISTRY: $ECR_REGISTRY_URL" +else + # Local development or standard AWS ECR + ECR_REGISTRY_URL="" # Will be constructed with AWS account ID +fi + +# Validate required variables +validate_required_vars + +log_info "Starting ECR deployment process..." +log_info "Configuration validation passed" + +# Enhanced AWS Account ID resolution with better error handling +resolve_aws_account_id() { + if [ -n "${AWS_ACCOUNT_ID:-}" ]; then + log_info "Using provided AWS Account ID: $AWS_ACCOUNT_ID" + return 0 + fi + + log_info "AWS_ACCOUNT_ID not provided, auto-detecting from current AWS credentials..." + + # Try to get account ID with timeout and better error handling + if ! AWS_ACCOUNT_ID=$(timeout 30 aws sts get-caller-identity --query Account --output text 2>/dev/null); then + log_error "Failed to auto-detect AWS Account ID" + log_error "This could be due to:" + log_error " - AWS credentials not configured" + log_error " - Network connectivity issues" + log_error " - Insufficient permissions" + log_error "" + log_error "Solutions:" + log_error " 1. Configure AWS credentials: aws configure" + log_error " 2. Set AWS_ACCOUNT_ID manually: export AWS_ACCOUNT_ID=123456789012" + log_error " 3. Check network connectivity to AWS" + exit 1 + fi + + if [ -z "$AWS_ACCOUNT_ID" ]; then + log_error "AWS Account ID is empty after auto-detection" + exit 1 + fi + + log_success "Auto-detected AWS Account ID: $AWS_ACCOUNT_ID" +} + +# Enhanced ECR URI construction with registry override support +construct_ecr_uri() { + if [ -n "${ECR_REGISTRY_URL:-}" ]; then + # Custom registry (CI/CD context) + ECR_URI="$ECR_REGISTRY_URL/$ECR_REPOSITORY" + else + # Standard AWS ECR - use ECR_REPOSITORY as the full repository name + ECR_URI="$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$ECR_REPOSITORY" + fi +} + +# Resolve AWS Account ID +resolve_aws_account_id + +# Construct ECR URI +construct_ecr_uri + +# Enhanced configuration display +display_configuration() { + log_info "Deployment Configuration:" + echo " AWS Region: $AWS_REGION" + echo " AWS Account: $AWS_ACCOUNT_ID" + echo " ECR Repository: $ECR_REPOSITORY" + echo " Image Name: $IMAGE_NAME" + echo " Image Tag: $IMAGE_TAG" + echo " ECR URI: $ECR_URI:$IMAGE_TAG" + + if [ -n "${ECR_REGISTRY_URL:-}" ]; then + echo " Custom Registry: $ECR_REGISTRY_URL" + fi + + if [ -n "${VERSION:-}" ]; then + echo " Version Source: VERSION environment variable" + elif [ -n "${PUBLIC_ECR_TAG:-}" ]; then + echo " Version Source: PUBLIC_ECR_TAG environment variable" + else + echo " Version Source: Default (latest)" + fi + + echo "" +} + +# Enhanced Docker image validation +validate_docker_image() { + log_info "Validating local Docker image..." + + # Check if Docker is available + if ! command -v docker &> /dev/null; then + log_error "Docker is not installed or not in PATH" + log_error "Please install Docker and ensure it's running" + exit 1 + fi + + # Check if Docker daemon is running + if ! docker info &> /dev/null; then + log_error "Docker daemon is not running" + log_error "Please start Docker and try again" + exit 1 + fi + + # Check if the specific image exists + if ! docker images --format "table {{.Repository}}:{{.Tag}}" | grep -q "^$IMAGE_NAME:latest$"; then + log_error "Local Docker image '$IMAGE_NAME:latest' not found" + log_error "Available images:" + docker images --format "table {{.Repository}}:{{.Tag}}" | head -10 + log_error "" + log_error "Please run './scripts/build-container.sh' first to build the image" + exit 1 + fi + + log_success "Docker image '$IMAGE_NAME:latest' found locally" +} + +# Display configuration +display_configuration + +# Validate Docker image +validate_docker_image + +# Enhanced ECR login with better error handling +ecr_login() { + echo "" + log_info "Step 1: Logging into ECR..." + + local login_registry + if [ -n "${ECR_REGISTRY_URL:-}" ]; then + # Custom registry login + login_registry="$ECR_REGISTRY_URL" + log_info "Logging into custom registry: $login_registry" + else + # Standard AWS ECR login + login_registry="$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com" + log_info "Logging into AWS ECR: $login_registry" + fi + + # Attempt ECR login with timeout and better error handling + if ! timeout 60 aws ecr get-login-password --region "$AWS_REGION" | docker login --username AWS --password-stdin "$login_registry" 2>/dev/null; then + log_error "ECR login failed" + log_error "This could be due to:" + log_error " - Invalid AWS credentials" + log_error " - Insufficient ECR permissions" + log_error " - Network connectivity issues" + log_error " - Invalid region: $AWS_REGION" + log_error "" + log_error "Required permissions:" + log_error " - ecr:GetAuthorizationToken" + log_error " - ecr:BatchCheckLayerAvailability" + log_error " - ecr:GetDownloadUrlForLayer" + log_error " - ecr:BatchGetImage" + exit 1 + fi + + log_success "Successfully logged into ECR" +} + +# Perform ECR login +ecr_login + +# Enhanced ECR repository management +manage_ecr_repository() { + echo "" + log_info "Step 2: Managing ECR repository..." + + local full_repo_name="$ECR_REPOSITORY" + + # Skip repository creation for custom registries (CI/CD context) + if [ -n "${ECR_REGISTRY_URL:-}" ]; then + log_info "Using custom registry - skipping repository creation" + log_info "Repository: $full_repo_name" + return 0 + fi + + log_info "Checking repository: $full_repo_name" + + # Check if repository exists with better error handling + if aws ecr describe-repositories --repository-names "$full_repo_name" --region "$AWS_REGION" &>/dev/null; then + log_success "ECR repository exists: $full_repo_name" + else + log_info "Repository does not exist, creating: $full_repo_name" + + # Create repository with enhanced error handling + if ! aws ecr create-repository \ + --repository-name "$full_repo_name" \ + --region "$AWS_REGION" \ + --image-scanning-configuration scanOnPush=true \ + --encryption-configuration encryptionType=AES256 \ + &>/dev/null; then + + log_error "Failed to create ECR repository: $full_repo_name" + log_error "This could be due to:" + log_error " - Insufficient permissions (ecr:CreateRepository)" + log_error " - Repository name conflicts" + log_error " - Region-specific issues" + exit 1 + fi + + log_success "ECR repository created: $full_repo_name" + log_info "Repository features enabled:" + log_info " - Image scanning on push" + log_info " - AES256 encryption" + fi +} + +# Manage ECR repository +manage_ecr_repository + +# Enhanced image tagging with validation +tag_docker_image() { + echo "" + log_info "Step 3: Tagging Docker image..." + + local source_image="$IMAGE_NAME:latest" + local target_image="$ECR_URI:$IMAGE_TAG" + + log_info "Tagging: $source_image -> $target_image" + + if ! docker tag "$source_image" "$target_image"; then + log_error "Failed to tag Docker image" + log_error "Source: $source_image" + log_error "Target: $target_image" + exit 1 + fi + + log_success "Docker image tagged successfully" +} + +# Enhanced image pushing +push_docker_image() { + echo "" + log_info "Step 4: Pushing image to ECR..." + + local target_image="$ECR_URI:$IMAGE_TAG" + + log_info "Pushing: $target_image" + + # Standard docker push (image already built and tagged) + if ! docker push "$target_image"; then + log_error "Failed to push image to ECR" + log_error "Target: $target_image" + log_error "" + log_error "This could be due to:" + log_error " - Network connectivity issues" + log_error " - ECR repository permissions" + log_error " - Image size limits" + log_error " - Image not properly tagged" + exit 1 + fi + + log_success "Image pushed successfully to ECR" +} + +# Tag and push the image +tag_docker_image +push_docker_image + +# Enhanced deployment summary with usage instructions +deployment_summary() { + echo "" + echo "🎉 ==================================" + log_success "ECR Deployment Completed Successfully!" + echo "🎉 ==================================" + echo "" + + log_info "Deployment Summary:" + echo " 📦 Image URI: $ECR_URI:$IMAGE_TAG" + echo " 🏷️ Image Tag: $IMAGE_TAG" + echo " 🌍 AWS Region: $AWS_REGION" + echo " 🏢 AWS Account: $AWS_ACCOUNT_ID" + echo " 📁 Repository: $ECR_REPOSITORY/$IMAGE_NAME" + + if [ -n "${VERSION:-}" ]; then + echo " 🔖 Version Source: VERSION environment variable ($VERSION)" + elif [ -n "${PUBLIC_ECR_TAG:-}" ]; then + echo " 🔖 Version Source: PUBLIC_ECR_TAG environment variable ($PUBLIC_ECR_TAG)" + fi + + echo "" + log_info "Usage Instructions:" + echo " 🔧 In CDK/CloudFormation:" + echo " Use image URI: $ECR_URI:$IMAGE_TAG" + echo "" + echo " 🚀 In AgentCore Runtime:" + echo " Set container image to: $ECR_URI:$IMAGE_TAG" + echo "" + echo " 📋 For custom deployments:" + echo " export WORKFLOW_AGENT_IMAGE_URI=\"$ECR_URI:$IMAGE_TAG\"" + echo "" + + log_info "Next Steps:" + echo " 1. Update your CDK stack parameters with the new image URI" + echo " 2. Deploy your infrastructure: cdk deploy" + echo " 3. Verify the deployment in AWS Console" + echo " 4. Test the workflow agent with multiple specialized agents" + echo "" +} + +# Display deployment summary +deployment_summary diff --git a/deployment/ecr/gaab-strands-workflow-agent/scripts/run_unit_tests.sh b/deployment/ecr/gaab-strands-workflow-agent/scripts/run_unit_tests.sh new file mode 100755 index 00000000..81d756c4 --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/scripts/run_unit_tests.sh @@ -0,0 +1,380 @@ +#!/bin/bash +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# Script to run unit tests for the Workflow Agent + +set -e # Exit on any error +set -u # Exit on undefined variables + +# Enable debug mode if DEBUG environment variable is set +if [ "${DEBUG:-}" = "true" ]; then + set -x +fi + +# Change to the project root directory (one level up from scripts/) +cd "$(dirname "$0")/.." + +# Logging functions for consistent output +log_info() { + echo "ℹ️ $1" +} + +log_success() { + echo "✅ $1" +} + +log_error() { + echo "❌ $1" >&2 +} + +log_warning() { + echo "⚠️ $1" +} + +# Configuration +PYTHON_CMD="${PYTHON_CMD:-python3}" +COVERAGE="${COVERAGE:-true}" +VENV_DIR="${VENV_DIR:-.venv}" +SKIP_VENV="${SKIP_VENV:-false}" + +# Validation functions +validate_python() { + log_info "Validating Python environment..." + + if ! command -v "$PYTHON_CMD" &> /dev/null; then + log_error "Python command '$PYTHON_CMD' not found" + log_error "Please install Python 3 or set PYTHON_CMD environment variable" + exit 1 + fi + + local python_version + python_version=$($PYTHON_CMD --version 2>&1) + log_info "Using: $python_version" + + # Check Python version (require 3.8+) + local version_check + version_check=$($PYTHON_CMD -c "import sys; print(sys.version_info >= (3, 8))") + if [ "$version_check" != "True" ]; then + log_error "Python 3.8 or higher is required" + exit 1 + fi + + log_success "Python environment validated" +} + +# UV detection - check if UV is available in the environment +check_uv_available() { + if ! command -v uv &> /dev/null; then + return 1 + fi + + # Verify UV is functional + local uv_version + uv_version=$(uv --version 2>/dev/null | cut -d' ' -f2 || echo "unknown") + + if [ "$uv_version" = "unknown" ]; then + log_warning "UV found but version could not be determined" + return 1 + fi + + log_info "UV detected (version: $uv_version)" + return 0 +} + +show_uv_install_instructions() { + log_error "UV is not installed. This project requires UV for dependency management." + log_info "" + log_info " 1. Using pip (recommended for corporate environments):" + log_info " pip install uv>=0.5.0" + log_info "" + log_info " 2. Using pipx (isolated installation):" + log_info " pipx install uv>=0.5.0" + log_info "" + log_info " 3. Using your system package manager:" + log_info " - macOS: brew install uv" + log_info " - Linux: Check your distribution's package manager" + log_info "" + log_info " 4. For more installation options, visit:" + log_info " https://docs.astral.sh/uv/getting-started/installation/" + log_info "" + exit 1 +} + +# Virtual environment management with UV +setup_virtual_environment() { + if [ "$SKIP_VENV" = "true" ]; then + log_info "Skipping virtual environment setup (SKIP_VENV=true)" + return 0 + fi + + log_info "Setting up virtual environment..." + + # Check if UV is available + if ! check_uv_available; then + show_uv_install_instructions + fi + + log_info "Using UV for virtual environment management" + setup_uv_environment +} + +setup_uv_environment() { + log_info "Setting up UV-managed virtual environment..." + + # Check if pyproject.toml exists for UV workflow + if [ ! -f "pyproject.toml" ]; then + log_warning "pyproject.toml not found, falling back to pip workflow" + setup_pip_environment + return + fi + + # Create virtual environment using UV + if [ ! -d "$VENV_DIR" ]; then + log_info "Creating UV virtual environment at $VENV_DIR..." + uv venv "$VENV_DIR" + log_success "UV virtual environment created" + else + log_info "Using existing virtual environment at $VENV_DIR" + fi + + # Activate virtual environment + log_info "Activating virtual environment..." + # shellcheck source=/dev/null + source "$VENV_DIR/bin/activate" + + # Install gaab-strands-common first (local dependency) + local common_lib_path="../gaab-strands-common" + if [ -d "$common_lib_path" ]; then + log_info "Installing gaab-strands-common from local directory..." + uv pip install -e "$common_lib_path" + log_success "gaab-strands-common installed" + else + log_warning "gaab-strands-common directory not found at $common_lib_path" + fi + + # Install dependencies using UV sync (installs both dependencies and dev-dependencies) + log_info "Installing dependencies using UV sync..." + uv sync + + log_success "UV virtual environment setup completed" +} + + + +cleanup_virtual_environment() { + if [ "$SKIP_VENV" = "true" ]; then + return 0 + fi + + # Only show deactivation message if we actually have an active virtual environment + if [ -n "${VIRTUAL_ENV:-}" ]; then + log_info "Deactivating virtual environment..." + + # Try to deactivate and capture the result + if command -v deactivate >/dev/null 2>&1; then + deactivate 2>/dev/null || { + log_warning "Virtual environment deactivation failed, but continuing..." + } + else + # If deactivate function isn't available, just unset the environment variable + unset VIRTUAL_ENV + log_info "Virtual environment variables cleared" + fi + + # Verify deactivation + if [ -z "${VIRTUAL_ENV:-}" ]; then + log_success "Virtual environment deactivated successfully" + fi + fi +} + +validate_test_environment() { + log_info "Validating test environment..." + + if [ ! -d "test" ]; then + log_error "Test directory not found" + exit 1 + fi + + log_success "Test environment validated" +} + +# Enhanced test execution with better error handling +run_tests() { + log_info "Starting unit test execution..." + log_info "Configuration:" + echo " 🐍 Python Command: $PYTHON_CMD" + echo " 🧪 Test Framework: pytest" + echo " 📁 Working Directory: $(pwd)" + echo " 🔧 Virtual Environment: $VENV_DIR" + echo " 📦 Dependency Manager: UV" + + if [ "$COVERAGE" = "true" ]; then + echo " 📊 Coverage: Enabled" + fi + + if [ "$SKIP_VENV" = "true" ]; then + echo " ⚠️ Virtual Environment: Skipped" + fi + + echo "" + + # Determine which Python command to use + local python_exec + if [ "$SKIP_VENV" = "true" ]; then + python_exec="$PYTHON_CMD" + else + python_exec="python" # Use the activated venv python + fi + + # Run pytest with enhanced error handling + if [ "$COVERAGE" = "true" ]; then + log_info "Running tests with coverage..." + if ! $python_exec -m pytest test/ --cov --cov-report=term-missing --cov-report=xml -v; then + log_error "Unit tests failed!" + exit 1 + fi + + log_info "Coverage report generated" + + # Generate HTML coverage report if requested + if [ "${COVERAGE_HTML:-false}" = "true" ]; then + log_info "HTML coverage report available in htmlcov/" + fi + + else + log_info "Running tests..." + if ! $python_exec -m pytest test/ -v; then + log_error "Unit tests failed!" + log_error "" + log_error "Troubleshooting steps:" + log_error " 1. Check test output above for specific failures" + log_error " 2. Verify all dependencies are installed" + log_error " 3. Check Python version compatibility" + log_error " 4. Run with DEBUG=true for more verbose output" + log_error " 5. Try recreating virtual environment: rm -rf $VENV_DIR" + exit 1 + fi + fi + + log_success "All unit tests passed!" +} + +# Display usage information +display_usage() { + echo "Usage: $0 [options]" + log_info "Environment Variables:" + echo " PYTHON_CMD - Python command to use (default: python3)" + echo " COVERAGE - Enable coverage reporting (true/false, default: true)" + echo " COVERAGE_HTML - Generate HTML coverage report (true/false, default: false)" + echo " DEBUG - Enable debug output (true/false, default: false)" + echo " VENV_DIR - Virtual environment directory (default: .venv)" + echo " SKIP_VENV - Skip virtual environment setup (true/false, default: false)" + echo "" + log_info "Command Line Options:" + echo " -h, --help Show this help message" + echo " -c, --coverage Enable coverage reporting (default: enabled)" + echo " --no-coverage Disable coverage reporting" + echo " --coverage-html Enable coverage reporting with HTML output" + echo "" + log_info "Examples:" + echo " # Basic test run with coverage (default behavior)" + echo " ./scripts/run_unit_tests.sh" + echo "" + echo " # Run without coverage report" + echo " ./scripts/run_unit_tests.sh --no-coverage" + echo "" + echo " # Run with coverage and HTML report" + echo " ./scripts/run_unit_tests.sh --coverage-html" + echo "" + echo " # Use environment variables to disable coverage" + echo " COVERAGE=false ./scripts/run_unit_tests.sh" + echo "" + echo " # Use specific Python version" + echo " PYTHON_CMD=python3.13 ./scripts/run_unit_tests.sh" + echo "" + echo " # Skip virtual environment (use system Python with UV)" + echo " SKIP_VENV=true ./scripts/run_unit_tests.sh" + echo "" + echo " # Use custom virtual environment directory" + echo " VENV_DIR=test-env ./scripts/run_unit_tests.sh" + echo "" + echo " # Clean virtual environment and recreate" + echo " rm -rf .venv && ./scripts/run_unit_tests.sh" + echo "" + echo " # Run tests directly with UV (no script)" + echo " uv run pytest test/" + echo "" +} + +# Cleanup function for trap +cleanup_on_exit() { + local exit_code=$? + + # Only cleanup if we're not skipping venv and there's an active virtual environment + if [ "$SKIP_VENV" != "true" ] && [ -n "${VIRTUAL_ENV:-}" ]; then + cleanup_virtual_environment + fi + + if [ $exit_code -ne 0 ]; then + log_error "Script exited with error code $exit_code" + fi + + exit $exit_code +} + +# Set trap for cleanup +trap cleanup_on_exit EXIT INT TERM + +# Parse command line arguments +parse_arguments() { + while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + display_usage + exit 0 + ;; + -c|--coverage) + COVERAGE="true" + log_info "Coverage reporting enabled via command line" + ;; + --no-coverage) + COVERAGE="false" + log_info "Coverage reporting disabled via command line" + ;; + --coverage-html) + COVERAGE="true" + COVERAGE_HTML="true" + log_info "Coverage reporting with HTML output enabled via command line" + ;; + *) + log_error "Unknown option: $1" + display_usage + exit 1 + ;; + esac + shift + done +} + +# Main execution +main() { + echo "🧪 Running unit tests for Workflow Agent..." + echo "================================================" + validate_python + validate_test_environment + setup_virtual_environment + run_tests +} + +echo "================================================" + +# Parse arguments first +parse_arguments "$@" + +# Run main function +main + +echo "================================================" +log_success "Unit test execution completed!" diff --git a/deployment/ecr/gaab-strands-workflow-agent/src/agents_loader.py b/deployment/ecr/gaab-strands-workflow-agent/src/agents_loader.py new file mode 100644 index 00000000..2a9832ab --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/src/agents_loader.py @@ -0,0 +1,286 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +AgentsLoader - Loads and instantiates specialized agents for workflow orchestration + +This module handles loading specialized agents from configuration, including: +- Fetching agent configurations from DynamoDB +- Loading MCP server tools for each agent +- Creating agent models with appropriate fallbacks +- Graceful error handling for partial agent loading failures +""" + +import logging +from typing import Any, Dict, List, Optional + +from gaab_strands_common import BaseAgent, DynamoDBHelper, ToolsManager +from gaab_strands_common.models import ( + AgentBuilderParams, + AgentReference, + CustomToolReference, + LlmParams, + MCPServerReference, + StrandsToolReference, + UseCaseConfig, +) +from strands import Agent +from strands.models import BedrockModel + +logger = logging.getLogger(__name__) + + +class AgentsLoader(BaseAgent): + """ + Loads specialized agents from configuration. + + This class handles the complex process of loading specialized agents for + workflow orchestration, including: + - Fetching full agent configurations from DynamoDB when agent IDs are provided + - Loading MCP server tools for each agent + - Creating appropriate Bedrock models for each agent + - Graceful error handling that allows partial agent loading + """ + + def __init__(self, ddb_helper: DynamoDBHelper, region: str): + """ + Initialize AgentsLoader. + + Args: + ddb_helper: DynamoDB helper for fetching configurations + region: AWS region for Bedrock and other services + """ + super().__init__(region) + self.ddb_helper = ddb_helper + logger.info(f"Initialized AgentsLoader for region: {region}") + + def load_agents(self, agent_references: List[AgentReference]) -> List[Agent]: + """ + Load all specialized agents with graceful error handling. + + This method attempts to load all specified agents but continues even if + some agents fail to load. It only raises an error if ALL agents fail. + + Args: + agent_references: List of agent references with embedded AgentParams + + Returns: + List of successfully loaded Agent instances + + Raises: + RuntimeError: If all specialized agents failed to load + + Note: + Partial failures are logged as warnings but don't stop processing. + The workflow can continue with whatever agents successfully loaded. + """ + if not agent_references: + logger.warning("No specialized agents configured") + return [] + + logger.info( + f"Loading {len(agent_references)} specialized agent(s) from embedded AgentParams " + "(no DynamoDB lookup required)" + ) + + agents = [] + failed_agents = [] + + for idx, agent_ref in enumerate(agent_references): + try: + agent = self._load_single_agent(agent_ref, idx) + if agent: + agents.append(agent) + logger.info(f"Successfully loaded agent #{idx + 1}") + except Exception as e: + logger.error(f"Failed to load agent #{idx + 1}: {e}", exc_info=True) + failed_agents.append(f"agent-{idx + 1}") + + # Only fail if ALL agents failed to load + if not agents and failed_agents: + raise RuntimeError(f"All specialized agents failed to load: {failed_agents}") + + if failed_agents: + logger.warning( + f"Loaded {len(agents)} agent(s), {len(failed_agents)} failed: {failed_agents}" + ) + else: + logger.info(f"Successfully loaded all {len(agents)} specialized agent(s)") + + return agents + + def _load_single_agent(self, agent_ref: AgentReference, idx: int) -> Agent: + """ + Load a single specialized agent from AgentReference. + + This method uses the full agent configuration from the workflow, + including AgentBuilderParams and optional LlmParams. + + Args: + agent_ref: Agent reference containing full agent configuration + idx: Agent index for naming purposes + + Returns: + Instantiated Agent object + + Raises: + ValueError: If agent configuration is invalid + Exception: If agent loading fails for any reason + """ + agent_builder_params = agent_ref.agent_builder_params + agent_name = agent_ref.use_case_name or f"SpecializedAgent-{idx + 1}" + agent_description = agent_ref.use_case_description or f"Specialized agent: {agent_name}" + + logger.info(f"Loading agent: {agent_name} (type: {agent_ref.use_case_type})") + logger.debug(f"System prompt: {agent_builder_params.system_prompt[:50]}...") + + # Load tools from Strands tools, MCP servers, and required custom tools + strands_tools = agent_builder_params.tools + mcp_servers = agent_builder_params.mcp_servers + custom_tools = agent_builder_params.custom_tools + + logger.debug(f"Strands tools for {agent_name}: {[t.tool_id for t in strands_tools]}") + logger.debug(f"MCP servers for {agent_name}: {[s.use_case_id for s in mcp_servers]}") + logger.debug(f"Custom tools for {agent_name}: {[t.tool_id for t in custom_tools]}") + + agent_config = self._create_agent_use_case_config(agent_ref) + tools = self._load_agent_tools(strands_tools, mcp_servers, custom_tools, agent_config) + + # Create model from LlmParams if available, otherwise use default + if agent_ref.llm_params: + logger.info(f"🔧 Agent '{agent_name}': Using custom LlmParams from AgentReference") + llm_params = agent_ref.llm_params + bedrock_params = llm_params.bedrock_llm_params + + logger.info( + f"📊 Agent '{agent_name}' Model Configuration:\n" + f" - Model Provider: {llm_params.model_provider}\n" + f" - Inference Type: {bedrock_params.bedrock_inference_type}\n" + f" - Model Identifier: {bedrock_params.model_identifier}\n" + f" - Temperature: {llm_params.temperature}\n" + f" - Streaming: {llm_params.streaming}\n" + f" - Verbose: {llm_params.verbose}" + ) + + model = self._create_model(agent_ref.llm_params) + logger.info( + f"✅ Agent '{agent_name}': Model created successfully with {bedrock_params.model_identifier}" + ) + else: + logger.info(f"⚠️ Agent '{agent_name}': No LlmParams provided, using default model") + model = self._create_default_model() + logger.info(f"✅ Agent '{agent_name}': Default model created") + + logger.info(f"🚀 Creating agent '{agent_name}' with {len(tools)} tool(s)") + + return Agent( + name=agent_name, + description=agent_description, + system_prompt=agent_builder_params.system_prompt, + tools=tools, + model=model, + ) + + def _load_agent_tools( + self, + strands_tools: List[StrandsToolReference], + mcp_servers: List[MCPServerReference], + custom_tools: List[CustomToolReference], + agent_config: UseCaseConfig, + ) -> List[Any]: + """ + Load tools for a specialized agent from Strands tools, MCP servers, and custom tools. + + Args: + strands_tools: List of Strands tool references (e.g., current_time, calculator) + mcp_servers: List of MCP server references + custom_tools: List of custom tool references for this specific agent + agent_config: UseCaseConfig for this specific agent + + Returns: + List of tool objects for the agent + + Note: + Returns empty list if no tools are configured. + Specialized agents within workflows can use Strands tools, custom tools and MCPs. + """ + if not strands_tools and not mcp_servers and not custom_tools: + logger.debug("No tools configured for specialized agent") + return [] + + # Convert MCPServerReference objects to dict format for ToolsManager + mcp_servers_data = [ + {"use_case_id": server.use_case_id, "url": server.url, "type": server.type} + for server in mcp_servers + ] + logger.debug( + f"Loading tools from {len(mcp_servers_data)} MCP server(s): " + f"{[s['use_case_id'] for s in mcp_servers_data]}" + ) + strands_tool_ids = [tool.tool_id for tool in strands_tools] + custom_tool_ids = [tool.tool_id for tool in custom_tools] + + logger.debug(f"Loading {len(strands_tool_ids)} Strands tool(s): {strands_tool_ids}") + logger.debug(f"Loading {len(custom_tool_ids)} custom tool(s): {custom_tool_ids}") + + try: + tools_manager = ToolsManager(self.region, agent_config) + + tools = tools_manager.load_all_tools( + mcp_servers=mcp_servers_data, + strands_tool_ids=strands_tool_ids, + custom_tool_ids=custom_tool_ids, + ) + logger.info(f"✅ Successfully loaded {len(tools)} tool(s)") + return tools + except Exception as e: + logger.error(f"❌ Error loading tools: {e}", exc_info=True) + # Return empty list to allow agent to be created without tools + # Some agents may not need tools + return [] + + def _create_agent_use_case_config(self, agent_ref: AgentReference) -> UseCaseConfig: + """ + Create a UseCaseConfig for an individual agent to enable proper auto-attachment. + + This creates a minimal UseCaseConfig that contains the agent's LlmParams + so that auto-attachment conditions (like multimodal) work correctly. + + Args: + agent_ref: Agent reference containing the agent's configuration + + Returns: + UseCaseConfig for the individual agent + """ + return UseCaseConfig( + UseCaseName=agent_ref.use_case_name, + UseCaseType=agent_ref.use_case_type, + AgentBuilderParams=agent_ref.agent_builder_params, + LlmParams=agent_ref.llm_params, + ) + + def _create_default_model(self) -> BedrockModel: + """ + Create default Bedrock model when no configuration is available. + + This fallback ensures agents can be created even without full + configuration, using sensible defaults. + + Returns: + BedrockModel with default configuration + """ + default_model_id = "amazon.nova-lite-v1:0" + default_temperature = 0.7 + default_streaming = True + + logger.info( + f"Creating default model: {default_model_id} " + f"(temperature: {default_temperature}, streaming: {default_streaming})" + ) + + return BedrockModel( + model_id=default_model_id, + region_name=self.region, + temperature=default_temperature, + streaming=default_streaming, + ) diff --git a/deployment/ecr/gaab-strands-workflow-agent/src/main.py b/deployment/ecr/gaab-strands-workflow-agent/src/main.py new file mode 100644 index 00000000..c64502c4 --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/src/main.py @@ -0,0 +1,290 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#!/usr/bin/env python3 +""" +Workflow Agent - Main Entry Point +AgentCore Runtime Integration + +This module provides the entry point for the workflow agent that orchestrates +multiple specialized agents using the agents-as-tools pattern. It integrates +with the AgentCore Runtime and maintains compatibility with the existing +message format and streaming behavior. +""" + +import logging +import os +import sys +from typing import Any, Dict, Optional + +from gaab_strands_common import ( + ENV_AWS_REGION, + ENV_USE_CASE_CONFIG_KEY, + ENV_USE_CASE_TABLE_NAME, + ENV_MEMORY_ID, + ENV_MEMORY_STRATEGY_ID, + RuntimeStreaming, +) +from gaab_strands_common.multimodal.multimodal_processor import MultimodalRequestProcessor +from gaab_strands_common.utils.helpers import extract_user_message + +from bedrock_agentcore.runtime import BedrockAgentCoreApp +from bedrock_agentcore.memory.integrations.strands.session_manager import AgentCoreMemorySessionManager +from bedrock_agentcore.memory.integrations.strands.config import AgentCoreMemoryConfig, RetrievalConfig +from workflow_agent import WorkflowAgent + + +# Suppress OpenTelemetry context warnings +logging.getLogger("opentelemetry.context").setLevel(logging.ERROR) + +logger = logging.getLogger(__name__) + +# Initialize the AgentCore app +app = BedrockAgentCoreApp() + +# Module-level private agent instance (singleton pattern) +_workflow_agent: Optional[WorkflowAgent] = None + + +def validate_environment() -> tuple[str, str, str, str, str]: + """ + Validate required environment variables and return them. + + This function checks that all required environment variables are set + and returns them for use in agent initialization. + + Returns: + Tuple of (table_name, config_key, region, memory_id, strategy_id) + + Raises: + ValueError: If any required environment variable is missing + """ + required_vars = { + ENV_USE_CASE_TABLE_NAME: os.getenv(ENV_USE_CASE_TABLE_NAME), + ENV_USE_CASE_CONFIG_KEY: os.getenv(ENV_USE_CASE_CONFIG_KEY), + ENV_AWS_REGION: os.getenv(ENV_AWS_REGION), + ENV_MEMORY_ID: os.getenv(ENV_MEMORY_ID), + } + + missing_vars = [name for name, value in required_vars.items() if not value] + if missing_vars: + raise ValueError(f"Missing required environment variables: {', '.join(missing_vars)}") + + strategy_id = os.getenv(ENV_MEMORY_STRATEGY_ID, "") + + table_name, config_key, region, memory_id = required_vars.values() + logger.info(f"Environment validated - Table: {table_name}, Key: {config_key}, Region: {region}, Memory ID: {memory_id}") + return table_name, config_key, region, memory_id, strategy_id + + +def get_agent_instance(session_id: str = None, actor_id = None) -> WorkflowAgent: + """ + Get or create the singleton workflow agent instance. + + This function implements the singleton pattern to ensure only one + workflow agent instance is created per Lambda container. This improves + performance by: + - Avoiding redundant DynamoDB calls + - Reusing loaded specialized agents + - Reducing initialization overhead + + Args: + session_id: Session ID for memory context + actor_id: Actor ID for memory context + + Returns: + WorkflowAgent: The singleton workflow agent instance + + Raises: + ValueError: If environment validation fails + RuntimeError: If agent initialization fails + """ + global _workflow_agent + + if _workflow_agent is None: + logger.info("Initializing Workflow Agent") + + # Validate environment variables first + table_name, config_key, region, memory_id, strategy_id = validate_environment() + + # Create session manager only if strategy_id exists + session_manager = None + if strategy_id: + logger.info("Creating session manager for memory support") + agentcore_memory_config = AgentCoreMemoryConfig( + memory_id=memory_id, + session_id=session_id, + actor_id=actor_id, + retrieval_config={ + "/strategies/{memoryStrategyId}/actors/{actorId}": + RetrievalConfig( + top_k=5, + relevance_score=0.7, + strategy_id=strategy_id + ) + } + ) + session_manager = AgentCoreMemorySessionManager( + agentcore_memory_config=agentcore_memory_config, + region_name=region + ) + + # Create workflow agent with validated parameters + _workflow_agent = WorkflowAgent( + table_name=table_name, + config_key=config_key, + region=region, + session_manager=session_manager + ) + logger.info("Workflow agent initialized successfully") + + return _workflow_agent + + +@app.entrypoint +def invoke(payload: Dict[str, Any]): + """ + AgentCore Runtime entrypoint function. + + This function is called by the AgentCore Runtime for each invocation. + It handles both streaming and non-streaming responses based on the + workflow configuration. + + The function: + 1. Gets the workflow agent instance (singleton) + 2. Extracts the user message from the payload + 3. Invokes the client agent with the message + 4. Streams or returns the response based on configuration + + Args: + payload: Request payload from AgentCore Runtime containing user input + + Returns: + Generator yielding response chunks (streaming mode) or + Dict containing the complete response (non-streaming mode) + + Error Response Format: + { + "error": "Request processing failed", + "message": "Error details" + } + """ + # Get workflow agent instance - errors here should return dict, not stream + try: + # Extract session ID and actor ID for memory context + session_id = payload.get("conversationId") + actor_id = payload.get("userId") + logger.info(f"Session ID: {session_id}") + logger.info(f"Actor ID: {actor_id}") + + workflow_agent = get_agent_instance(session_id=session_id, actor_id=actor_id) + except ValueError as e: + # Configuration or validation errors during initialization + logger.error(f"Validation error during initialization: {e}") + return {"type": "error", "error": "Invalid configuration or request", "message": str(e)} + except RuntimeError as e: + # Agent initialization errors + logger.error(f"Runtime error during initialization: {e}") + return {"type": "error", "error": "Agent execution failed", "message": str(e)} + except Exception as e: + # Unexpected initialization errors + logger.error(f"Unexpected error during initialization: {e}", exc_info=True) + return {"type": "error", "error": "Request processing failed", "message": str(e)} + + try: + strands_agent = workflow_agent.get_agent() + config = workflow_agent.get_config() + + region = os.getenv(ENV_AWS_REGION) + multimodal_processor = MultimodalRequestProcessor(region) + has_files = multimodal_processor.has_files(payload) + multimodal_enabled = multimodal_processor.is_multimodal_enabled(config) + logger.debug(f"Multimodal enabled: {multimodal_enabled}") + logger.debug(f"Has files: {has_files}") + + # Determine processing mode and handle accordingly + if has_files and multimodal_enabled: + logger.debug("Multimodal request detected - processing files") + user_message = multimodal_processor.process_multimodal_request(payload) + elif has_files and not multimodal_enabled: + logger.warning( + "FILES IGNORED: User sent files but multimodal is disabled. Enable multimodal in configuration to process files. " + ) + user_message = extract_user_message(payload) + else: + # No files present - process as text-only regardless of multimodal setting + if multimodal_enabled: + logger.debug("Text-only request (multimodal enabled but no files provided)") + else: + logger.debug("Text-only request (multimodal disabled)") + user_message = extract_user_message(payload) + + logger.debug(f"User message: {user_message[:100]}...") + + if config.llm_params.streaming: + logger.debug("Using streaming mode") + return RuntimeStreaming.stream_response(strands_agent, user_message, config) + + logger.info("Using non-streaming mode") + + # Non-streaming response + response = strands_agent(user_message) + + return { + "result": str(response), + "agent_name": config.use_case_name, + "model_id": config.llm_params.bedrock_llm_params.model_id, + } + + except ValueError as e: + # Configuration or validation errors + logger.error(f"Validation error: {e}") + return {"type": "error", "error": "Invalid configuration or request", "message": str(e)} + + except RuntimeError as e: + # Agent execution errors + logger.error(f"Runtime error: {e}") + return {"type": "error", "error": "Agent execution failed", "message": str(e)} + + except Exception as e: + # Unexpected errors + logger.error(f"Unexpected error processing request: {e}", exc_info=True) + return {"type": "error", "error": "Request processing failed", "message": str(e)} + + +def main(): + """ + Main entry point for the application. + + This function is called when the Lambda container starts. It: + 1. Initializes the workflow agent (validates environment and loads config) + 2. Starts the AgentCore Runtime application + 3. Handles startup errors gracefully + + Exits: + 1: If initialization fails + """ + logger.info("Starting the Workflow Agent") + + try: + # Start the AgentCore Runtime application + app.run() + + except ValueError as e: + # Environment or configuration errors + logger.error(f"Configuration error: {e}") + sys.exit(1) + + except RuntimeError as e: + # Agent initialization errors + logger.error(f"Initialization error: {e}") + sys.exit(1) + + except Exception as e: + # Unexpected startup errors + logger.error(f"Failed to start workflow agent: {e}", exc_info=True) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/deployment/ecr/gaab-strands-workflow-agent/src/workflow_agent.py b/deployment/ecr/gaab-strands-workflow-agent/src/workflow_agent.py new file mode 100644 index 00000000..aab2644b --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/src/workflow_agent.py @@ -0,0 +1,459 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +WorkflowAgent - Orchestrates specialized agents using agents-as-tools pattern + +This module implements the workflow agent that coordinates multiple specialized +agents by treating them as tools for a client agent. This enables complex +multi-step workflows where the client agent can delegate tasks to specialized +agents based on the user's request. +""" + +import logging +from typing import List, Optional + +from agents_loader import AgentsLoader +from gaab_strands_common import BaseAgent, DynamoDBHelper, ToolsManager, wrap_tool_with_events +from gaab_strands_common.models import UseCaseConfig, WorkflowConfig +from strands import Agent +from strands.tools import tool +from strands.session import SessionManager + +logger = logging.getLogger(__name__) + + +class WorkflowAgent(BaseAgent): + """ + Workflow agent that orchestrates specialized agents. + + This class implements the agents-as-tools orchestration pattern where: + 1. Multiple specialized agents are loaded from configuration + 2. Each specialized agent is wrapped as a tool + 3. A client agent is created with these specialized agents as its tools + 4. The client agent coordinates workflow execution by delegating to specialists + + The workflow agent maintains compatibility with the existing message format + and streaming behavior expected by the AgentCore Runtime and frontend UI. + """ + + def __init__(self, table_name: str, config_key: str, region: str, session_manager: SessionManager = None): + """ + Initialize workflow agent. + + This constructor: + 1. Sets up DynamoDB helper for configuration loading + 2. Initializes base agent with region + 3. Loads workflow configuration from DynamoDB + 4. Validates configuration (use case type, orchestration pattern) + 5. Loads specialized agents + 6. Creates client agent with specialized agents as tools + + Args: + table_name: DynamoDB table name for configurations + config_key: Configuration key to load from DynamoDB + region: AWS region for Bedrock and DynamoDB + session_manager: Session manager for memory support + + Raises: + ValueError: If configuration is invalid or missing required fields + RuntimeError: If all specialized agents fail to load + """ + super().__init__(region) + + self.table_name = table_name + self.config_key = config_key + self.ddb_helper = DynamoDBHelper(table_name, region) + self.session_manager = session_manager + + # Workflow-specific attributes + self.workflow_config: Optional[WorkflowConfig] = None + self.client_agent: Optional[Agent] = None + self.specialized_agents: List[Agent] = [] + + logger.info(f"Initializing workflow agent: {config_key}") + self._initialize() + + def _initialize(self): + """ + Initialize workflow agent by loading configuration and creating agents. + + This method orchestrates the complete initialization process: + 1. Load workflow configuration from DynamoDB + 2. Validate use case type is "AgentBuilder" + 3. Validate workflow type is "AgentsAsTools" + 4. Load specialized agents from configuration + 5. Create client agent with specialized agents as tools + + Raises: + ValueError: If configuration validation fails + RuntimeError: If all specialized agents fail to load + """ + logger.info("Starting workflow agent initialization") + + # Get UseCaseConfig (LlmParams, WorkflowParams, etc.) and WorkflowConfig (WorkflowType and WorkflowParams ) + self.config, self.workflow_config = self._load_workflow_config() + + # Validate orchestration pattern + workflow_type = self.workflow_config.workflow_type + if workflow_type != "agents-as-tools": + raise ValueError( + f"Unsupported workflow type: {workflow_type}. " + f"Only 'agents-as-tools' is currently supported." + ) + + logger.info(f"Workflow type validated: {workflow_type}") + + # Load specialized agents + self.specialized_agents = self._load_specialized_agents() + + # Create client agent + self.client_agent = self._create_client_agent() + + logger.info( + f"Workflow agent initialized successfully: {self.config.use_case_name} " + f"with {len(self.specialized_agents)} specialized agent(s)" + ) + + def _load_workflow_config(self) -> tuple[UseCaseConfig, WorkflowConfig]: + """ + Load workflow configuration from DynamoDB with validation. + + This method: + 1. Fetches configuration from DynamoDB using the config key + 2. Validates that UseCaseType is "WorkflowBuilder" + 3. Parses the configuration into UseCaseConfig and WorkflowConfig models + 4. Validates that workflow parameters are present + + Returns: + Tuple of (UseCaseConfig, WorkflowConfig) + + Raises: + ValueError: If configuration is invalid or missing required fields + """ + logger.info(f"Loading workflow configuration for key: {self.config_key}") + + try: + # Fetch configuration from DynamoDB + config_dict = self.ddb_helper.get_config(self.config_key) + + # Validate use case type - Workflow is the expected type + self._validate_use_case_type(config_dict, "Workflow") + + # Parse use case configuration + use_case_config = UseCaseConfig.from_ddb_config(config_dict) + + # Extract and validate workflow parameters + if not use_case_config.workflow_params: + raise ValueError( + f"No WorkflowParams found in configuration for key: {self.config_key}" + ) + + if not use_case_config.workflow_params.orchestration_pattern: + raise ValueError( + f"No OrchestrationPattern found in WorkflowParams for key: {self.config_key}" + ) + + # Create WorkflowConfig from the structure + workflow_config = WorkflowConfig( + WorkflowType=use_case_config.workflow_params.orchestration_pattern, + WorkflowParams=use_case_config.workflow_params, + ) + + logger.info( + f"Successfully loaded workflow configuration: {use_case_config.use_case_name}" + ) + logger.debug(f"Workflow type: {workflow_config.workflow_type}") + + return use_case_config, workflow_config + + except ValueError: + # Re-raise validation errors + raise + except Exception as e: + logger.error(f"Error loading workflow configuration: {e}", exc_info=True) + raise ValueError(f"Failed to load workflow configuration: {e}") + + def _load_specialized_agents(self) -> List[Agent]: + """ + Load and instantiate specialized agents using AgentsLoader. + + This method: + 1. Extracts selected agents from workflow configuration + 2. Uses AgentsLoader to load each agent with its tools + 3. Handles partial failures gracefully (continues if some agents load) + 4. Logs detailed information about loaded agents + + Returns: + List of successfully loaded Agent instances + + Raises: + RuntimeError: If all specialized agents fail to load + ValueError: If no agents are configured + """ + logger.info("=" * 80) + logger.info("Loading specialized agents for workflow") + logger.info("=" * 80) + + # Extract agent references from workflow configuration + agents_as_tools_params = self.workflow_config.workflow_params.agents_as_tools_params + + if not agents_as_tools_params or not agents_as_tools_params.agents: + raise ValueError("No agents configured in AgentsAsToolsParams") + + agent_references = agents_as_tools_params.agents + logger.info(f"Found {len(agent_references)} agent(s) in workflow configuration") + + # Log summary of each agent + for idx, agent_ref in enumerate(agent_references, 1): + has_llm_params = agent_ref.llm_params is not None + llm_info = "" + if has_llm_params: + llm_info = ( + f" with custom LLM ({agent_ref.llm_params.bedrock_llm_params.model_identifier})" + ) + logger.info(f" {idx}. {agent_ref.use_case_name}{llm_info}") + + # Use AgentsLoader to load all agents + agents_loader = AgentsLoader(self.ddb_helper, self.region) + + try: + agents = agents_loader.load_agents(agent_references) + + logger.info("=" * 80) + logger.info(f"Successfully loaded {len(agents)} specialized agent(s)") + for idx, agent in enumerate(agents, 1): + logger.info(f" {idx}. {agent.name}") + logger.info("=" * 80) + + return agents + + except RuntimeError as e: + # All agents failed to load + logger.error(f"Failed to load specialized agents: {e}") + raise + + def _create_agent_tool(self, agent: Agent): + """ + Convert a specialized agent into a tool function. + + This creates a @tool decorated function that wraps the agent, + making it callable as a tool by the client agent. + + Args: + agent: The specialized Agent to convert to a tool + + Returns: + A tool function that invokes the agent + """ + agent_name = agent.name + agent_description = agent.description + # Create a valid tool name by replacing spaces and hyphens with underscores + tool_name = "specialized_agent__" + agent_name.replace(" ", "_").replace("-", "_") + + @tool(name=tool_name, description=agent_description) + def agent_tool_func(query: str) -> str: + """ + Delegate a query to this specialized agent. + + Args: + query: The query or task to delegate to this specialized agent + + Returns: + The agent's response + """ + try: + logger.info(f"Invoking specialized agent: {agent_name}") + response = agent(query) + response_str = str(response) + logger.info(f"Agent {agent_name} returned response ({len(response_str)} chars)") + return response_str + except Exception as e: + logger.error(f"Error in specialized agent {agent_name}: {e}", exc_info=True) + error_msg = f"Error in {agent_name}: {str(e)}" + return error_msg + + return agent_tool_func + + def _create_client_agent(self) -> Agent: + """ + Create client agent with specialized agents as tools. + + This method: + 1. Creates Bedrock model from LLM parameters + 2. Wraps specialized agents with event emission for UI tracking + 3. Creates client agent with: + - System prompt from agent configuration + - Specialized agents as tools + - Configured LLM model + 4. Logs client agent configuration + + Returns: + Configured Agent instance that orchestrates specialized agents + + Raises: + ValueError: If client agent creation fails + """ + logger.info("Creating client agent with specialized agents as tools") + + try: + # Create model from LLM parameters + model = self._create_model(self.config.llm_params) + + logger.info( + f"Client agent model: {self.config.llm_params.bedrock_llm_params.model_identifier}, " + f"temperature: {self.config.llm_params.temperature}, " + f"streaming: {self.config.llm_params.streaming}" + ) + + # Convert specialized agents to tool functions + logger.info("Converting specialized agents to tool functions") + agent_tools = [] + for agent in self.specialized_agents: + try: + # Create a tool function for this agent + agent_tool = self._create_agent_tool(agent) + # Wrap with event emission for UI tracking + wrapped_agent_tool = wrap_tool_with_events(agent_tool) + agent_tools.append(wrapped_agent_tool) + logger.debug(f"Created and wrapped tool function for agent: {agent.name}") + except Exception as e: + logger.error(f"Failed to create tool for agent {agent.name}: {e}") + # Skip this agent if tool creation fails + continue + + # Load custom tools for the workflow + custom_tools = self._load_workflow_custom_tools() + all_tools = agent_tools + custom_tools + + # Get system prompt from workflow configuration + system_prompt = self.config.workflow_params.system_prompt + + logger.info( + f"Creating client agent '{self.config.use_case_name}' with " + f"{len(agent_tools)} specialized agent(s) as tools and {len(custom_tools)} custom tool(s)" + ) + logger.debug(f"System prompt length: {len(system_prompt)} characters") + + # Check if memory is enabled + additional_params = {} + if ( + self.config.workflow_params.memory_config + and self.config.workflow_params.memory_config.long_term_enabled + and self.session_manager + ): + logger.info("Long-term memory enabled - adding session manager to agent") + additional_params["session_manager"] = self.session_manager + else: + logger.info("Long-term memory disabled or session manager not available") + logger.info(f"Session manager exists: {self.session_manager is not None}") + + # Create client agent + client_agent = Agent( + name=self.config.use_case_name, + system_prompt=system_prompt, + tools=all_tools, + model=model, + **additional_params + ) + + logger.info(f"Client agent created successfully: {client_agent.name}") + + return client_agent + + except Exception as e: + logger.error(f"Error creating client agent: {e}", exc_info=True) + raise ValueError(f"Failed to create client agent: {e}") + + def _load_workflow_custom_tools(self) -> List: + """ + Load custom tools for the workflow + + Custom tools at the workflow level are available to the workflow agents that orchestrate + the specialized agents. These are different from agent-level custom tools. + + This method follows the same pattern as configurable_agent - it always creates a + ToolsManager and calls load_all_tools, which handles auto-attachment internally. + + Returns: + List of custom tool objects for the workflow agent + """ + # Extract workflow-level custom tools (if any) + custom_tools = [] + if ( + hasattr(self.config, "workflow_params") + and self.config.workflow_params + and self.config.workflow_params.custom_tools + ): + custom_tools = self.config.workflow_params.custom_tools + + custom_tool_ids = [tool.tool_id for tool in custom_tools] + + logger.info( + f"Loading workflow-level tools: {len(custom_tool_ids)} custom tool(s) configured" + ) + + try: + # Create a ToolsManager instance - this handles auto-attachment internally + tools_manager = ToolsManager(self.region, self.config) + + # Always call load_all_tools - ToolsManager handles auto-attachment based on config + tools = tools_manager.load_all_tools( + mcp_servers=[], + strands_tool_ids=[], + custom_tool_ids=custom_tool_ids, + ) + logger.info(f"Successfully loaded {len(tools)} workflow-level tool(s)") + return tools + except Exception as e: + logger.error(f"Error loading workflow-level tools: {e}", exc_info=True) + logger.warning("Workflow will continue without custom tools") + return [] + + def get_agent(self) -> Agent: + """ + Get the client agent for workflow execution. + + Returns: + The client agent that orchestrates specialized agents + + Raises: + ValueError: If client agent not initialized + """ + if not self.client_agent: + raise ValueError("Client agent not initialized") + return self.client_agent + + def get_workflow_config(self) -> WorkflowConfig: + """ + Get the workflow configuration. + + Returns: + WorkflowConfig instance + + Raises: + ValueError: If workflow configuration not loaded + """ + if not self.workflow_config: + raise ValueError("Workflow configuration not loaded") + return self.workflow_config + + def get_specialized_agents(self) -> List[Agent]: + """ + Get the list of specialized agents. + + This is useful for debugging and monitoring. + + Returns: + List of specialized Agent instances + """ + return self.specialized_agents.copy() + + def get_agent_count(self) -> int: + """ + Get the number of specialized agents loaded. + + Returns: + Count of specialized agents + """ + return len(self.specialized_agents) diff --git a/deployment/ecr/gaab-strands-workflow-agent/test/conftest.py b/deployment/ecr/gaab-strands-workflow-agent/test/conftest.py new file mode 100644 index 00000000..8224531e --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/test/conftest.py @@ -0,0 +1,189 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Shared test fixtures for workflow agent tests +""" + +import os +from unittest.mock import Mock, patch + +import pytest + + +@pytest.fixture(autouse=True) +def mock_environment(): + """Mock environment variables for all tests""" + with patch.dict( + os.environ, + { + "AWS_REGION": "us-east-1", + "AWS_SDK_USER_AGENT": '{"user_agent_extra": "test-agent"}', + }, + clear=False, + ): + yield + + +@pytest.fixture +def sample_workflow_config_dict(): + """Sample workflow configuration dictionary""" + return { + "UseCaseName": "Test Workflow", + "UseCaseType": "Workflow", + "WorkflowParams": { + "SystemPrompt": "You are a workflow coordinator", + "OrchestrationPattern": "agents-as-tools", + "AgentsAsToolsParams": { + "Agents": [ + { + "UseCaseId": "test-agent-id", + "UseCaseType": "AgentBuilder", + "UseCaseName": "SpecializedAgent1", + "UseCaseDescription": "A specialized agent for testing", + "AgentBuilderParams": { + "SystemPrompt": "You are a specialized agent", + "Tools": [], + "MCPServers": [], + "CustomTools": [{"ToolId": "web_search"}], + "MemoryConfig": {"LongTermEnabled": False}, + }, + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 0.5, + "Streaming": True, + "Verbose": False, + "BedrockLlmParams": { + "ModelId": "anthropic.claude-3-5-sonnet-20240620-v1:0", + "BedrockInferenceType": "QUICK_START", + }, + "ModelParams": {}, + }, + } + ] + }, + "CustomTools": [], + }, + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 0.7, + "Streaming": True, + "Verbose": False, + "BedrockLlmParams": { + "ModelId": "amazon.nova-pro-v1:0", + "BedrockInferenceType": "QUICK_START", + }, + "ModelParams": {}, + }, + } + + +@pytest.fixture +def sample_agent_full_config_dict(): + """Sample full agent configuration dictionary""" + return { + "UseCaseName": "FullAgent", + "UseCaseId": "test-agent-id", + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": { + "SystemPrompt": "You are a full agent", + "Tools": [], + "MCPServers": [{"McpId": "mcp-1"}], + "MemoryConfig": {"LongTermEnabled": False}, + }, + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 0.8, + "Streaming": True, + "Verbose": False, + "BedrockLlmParams": { + "ModelId": "amazon.nova-pro-v1:0", + "BedrockInferenceType": "QUICK_START", + }, + "ModelParams": {}, + }, + } + + +@pytest.fixture +def multimodal_custom_tools_config(): + """Comprehensive workflow configuration with multimodal, custom tools, and multiple agents""" + return { + "UseCaseName": "Comprehensive Workflow", + "UseCaseType": "Workflow", + "WorkflowParams": { + "SystemPrompt": "You are a comprehensive workflow coordinator", + "OrchestrationPattern": "agents-as-tools", + "AgentsAsToolsParams": { + "Agents": [ + { + "UseCaseId": "agent-1-id", + "UseCaseType": "AgentBuilder", + "UseCaseName": "DataAnalysisAgent", + "UseCaseDescription": "Specialized in data analysis", + "AgentBuilderParams": { + "SystemPrompt": "You are a data analysis specialist", + "Tools": [{"ToolId": "current_time"}], + "MCPServers": [], + "CustomTools": [{"ToolId": "data_processor"}], + "MemoryConfig": {"LongTermEnabled": False}, + }, + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 0.3, + "Streaming": True, + "Verbose": False, + "BedrockLlmParams": { + "ModelId": "anthropic.claude-3-5-sonnet-20240620-v1:0", + "BedrockInferenceType": "QUICK_START", + }, + "ModelParams": {}, + "MultimodalParams": { + "MultimodalEnabled": False + }, # Agent level disabled + }, + }, + { + "UseCaseId": "agent-2-id", + "UseCaseType": "AgentBuilder", + "UseCaseName": "ReportGeneratorAgent", + "UseCaseDescription": "Specialized in report generation", + "AgentBuilderParams": { + "SystemPrompt": "You are a report generation specialist", + "Tools": [{"ToolId": "environment"}], + "MCPServers": [], + "CustomTools": [{"ToolId": "report_formatter"}], + "MemoryConfig": {"LongTermEnabled": False}, + }, + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 0.7, + "Streaming": True, + "Verbose": True, + "BedrockLlmParams": { + "ModelId": "amazon.nova-pro-v1:0", + "BedrockInferenceType": "QUICK_START", + }, + "ModelParams": {}, + "MultimodalParams": { + "MultimodalEnabled": False + }, # Agent level disabled + }, + }, + ] + }, + "CustomTools": [{"ToolId": "workflow_orchestrator"}], # 1 workflow-level custom tool + }, + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 0.5, + "Streaming": True, + "Verbose": False, + "BedrockLlmParams": { + "ModelId": "amazon.nova-pro-v1:0", + "BedrockInferenceType": "QUICK_START", + }, + "ModelParams": {}, + "MultimodalParams": {"MultimodalEnabled": True}, # Workflow level multimodal enabled + }, + } diff --git a/deployment/ecr/gaab-strands-workflow-agent/test/test_agentcore_integration.py b/deployment/ecr/gaab-strands-workflow-agent/test/test_agentcore_integration.py new file mode 100644 index 00000000..a84c2c21 --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/test/test_agentcore_integration.py @@ -0,0 +1,829 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Integration tests for workflow agent with AgentCore Runtime. + +These tests verify that the workflow agent properly integrates with the +AgentCore Runtime, including: +- @app.entrypoint decorator usage +- Payload structure compatibility +- Streaming response format +- Environment variable handling +""" + +import logging +import os +import sys +from unittest.mock import Mock, patch + +import pytest + +# Add src to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "src")) + +import main +from gaab_strands_common.utils.helpers import extract_user_message +from main import get_agent_instance, invoke, validate_environment + + +class TestTelemetryConfiguration: + """Test OpenTelemetry configuration and logging setup""" + + def test_opentelemetry_logging_suppression(self): + """Test that OpenTelemetry context warnings are suppressed""" + # Get the OpenTelemetry context logger + otel_logger = logging.getLogger("opentelemetry.context") + + # Verify the logger level is set to ERROR (suppressing warnings) + assert otel_logger.level == logging.ERROR + + +class TestEnvironmentValidation: + """Test environment variable validation""" + + def test_validate_environment_success(self): + """Test successful environment validation""" + with patch.dict( + os.environ, + { + "USE_CASE_TABLE_NAME": "test-table", + "USE_CASE_CONFIG_KEY": "test-key", + "AWS_REGION": "us-east-1", + "MEMORY_ID": "test-memory-id", + }, + ): + table_name, config_key, region, memory_id, strategy_id = validate_environment() + + assert table_name == "test-table" + assert config_key == "test-key" + assert region == "us-east-1" + + def test_validate_environment_missing_table_name(self): + """Test validation fails when USE_CASE_TABLE_NAME is missing""" + with patch.dict( + os.environ, + {"USE_CASE_CONFIG_KEY": "test-key", "AWS_REGION": "us-east-1"}, + clear=True, + ): + with pytest.raises(ValueError, match="USE_CASE_TABLE_NAME"): + validate_environment() + + def test_validate_environment_missing_config_key(self): + """Test validation fails when USE_CASE_CONFIG_KEY is missing""" + with patch.dict( + os.environ, + {"USE_CASE_TABLE_NAME": "test-table", "AWS_REGION": "us-east-1"}, + clear=True, + ): + with pytest.raises(ValueError, match="USE_CASE_CONFIG_KEY"): + validate_environment() + + def test_validate_environment_missing_region(self): + """Test validation fails when AWS_REGION is missing""" + with patch.dict( + os.environ, + {"USE_CASE_TABLE_NAME": "test-table", "USE_CASE_CONFIG_KEY": "test-key"}, + clear=True, + ): + with pytest.raises(ValueError, match="AWS_REGION"): + validate_environment() + + +class TestPayloadExtraction: + """Test payload structure and message extraction""" + + def test_extract_user_message_success(self): + """Test successful message extraction from payload""" + payload = {"input": "Hello, how can you help me?"} + + message = extract_user_message(payload) + + assert message == "Hello, how can you help me?" + + def test_extract_user_message_with_whitespace(self): + """Test message extraction strips whitespace""" + payload = {"input": " Hello, world! "} + + message = extract_user_message(payload) + + assert message == "Hello, world!" + + def test_extract_user_message_missing_input_field(self): + """Test extraction when input field is missing""" + payload = {"other_field": "value"} + + message = extract_user_message(payload) + + assert "Please provide your message" in message + + def test_extract_user_message_empty_input(self): + """Test extraction when input is empty""" + payload = {"input": ""} + + message = extract_user_message(payload) + + assert "Please provide your message" in message + + def test_extract_user_message_none_input(self): + """Test extraction when input is None""" + payload = {"input": None} + + message = extract_user_message(payload) + + assert "Please provide your message" in message + + def test_extract_user_message_invalid_payload_type(self): + """Test extraction fails with invalid payload type""" + payload = "not a dictionary" + + with pytest.raises(ValueError, match="Payload must be a dictionary"): + extract_user_message(payload) + + +class TestAgentCoreIntegration: + """Test AgentCore Runtime integration""" + + @patch.dict(os.environ, { + "USE_CASE_TABLE_NAME": "test-table", + "USE_CASE_CONFIG_KEY": "test-key", + "AWS_REGION": "us-east-1", + "MEMORY_ID": "test-memory-id" + }) + @patch("main.WorkflowAgent") + @patch("main.validate_environment") + def test_invoke_streaming_mode(self, mock_validate_env, mock_workflow_agent_class): + """Test invoke function in streaming mode""" + # Setup mocks + mock_validate_env.return_value = ("test-table", "test-key", "us-east-1", "test-memory-id", "") + + # Mock workflow agent instance + mock_agent_instance = Mock() + mock_client_agent = Mock() + mock_config = Mock() + + # Configure streaming mode + mock_config.llm_params.streaming = True + mock_config.use_case_name = "Test Workflow" + mock_config.llm_params.bedrock_llm_params.model_id = "amazon.nova-pro-v1:0" + + mock_agent_instance.get_agent.return_value = mock_client_agent + mock_agent_instance.get_config.return_value = mock_config + mock_workflow_agent_class.return_value = mock_agent_instance + + # Mock RuntimeStreaming + with patch("main.RuntimeStreaming") as mock_streaming: + # Create mock generator + def mock_stream_response(*args, **kwargs): + yield {"type": "content", "text": "Hello"} + yield {"type": "content", "text": " world"} + yield {"type": "completion"} + + mock_streaming.stream_response.return_value = mock_stream_response() + + # Test payload + payload = {"input": "Test message"} + + # Invoke + result = invoke(payload) + + # Verify it returns a generator + assert hasattr(result, "__iter__") + + # Consume generator and verify chunks + chunks = list(result) + assert len(chunks) == 3 + assert chunks[0]["type"] == "content" + assert chunks[0]["text"] == "Hello" + assert chunks[1]["type"] == "content" + assert chunks[1]["text"] == " world" + assert chunks[2]["type"] == "completion" + + # Verify RuntimeStreaming was called correctly + mock_streaming.stream_response.assert_called_once() + call_args = mock_streaming.stream_response.call_args + assert call_args[0][0] == mock_client_agent + assert call_args[0][1] == "Test message" + assert call_args[0][2] == mock_config + + @patch.dict(os.environ, { + "USE_CASE_TABLE_NAME": "test-table", + "USE_CASE_CONFIG_KEY": "test-key", + "AWS_REGION": "us-east-1", + "MEMORY_ID": "test-memory-id" + }) + @patch("main.WorkflowAgent") + @patch("main.validate_environment") + def test_invoke_non_streaming_mode(self, mock_validate_env, mock_workflow_agent_class): + """Test invoke function in non-streaming mode""" + # Reset singleton + main._workflow_agent = None + + # Setup mocks + mock_validate_env.return_value = ("test-table", "test-key", "us-east-1", "test-memory-id", "") + + # Mock workflow agent instance + mock_agent_instance = Mock() + mock_client_agent = Mock() + mock_config = Mock() + + # Configure non-streaming mode + mock_config.llm_params.streaming = False + mock_config.use_case_name = "Test Workflow" + mock_config.llm_params.bedrock_llm_params.model_id = "amazon.nova-pro-v1:0" + + # Mock agent response + mock_client_agent.return_value = "This is the complete response" + + mock_agent_instance.get_agent.return_value = mock_client_agent + mock_agent_instance.get_config.return_value = mock_config + mock_workflow_agent_class.return_value = mock_agent_instance + + # Test payload + payload = {"input": "Test message"} + + # Invoke + result = invoke(payload) + + # Verify response structure + assert isinstance(result, dict) + assert result["result"] == "This is the complete response" + assert result["agent_name"] == "Test Workflow" + assert result["model_id"] == "amazon.nova-pro-v1:0" + + # Verify agent was called + mock_client_agent.assert_called_once_with("Test message") + + @patch.dict(os.environ, { + "USE_CASE_TABLE_NAME": "test-table", + "USE_CASE_CONFIG_KEY": "test-key", + "AWS_REGION": "us-east-1", + "MEMORY_ID": "test-memory-id" + }) + @patch("main.WorkflowAgent") + @patch("main.validate_environment") + def test_invoke_error_handling(self, mock_validate_env, mock_workflow_agent_class): + """Test invoke function error handling""" + # Reset singleton + main._workflow_agent = None + + # Setup mocks + mock_validate_env.return_value = ("test-table", "test-key", "us-east-1", "test-memory-id", "") + + # Mock workflow agent to raise error + mock_workflow_agent_class.side_effect = RuntimeError("Agent initialization failed") + + # Test payload + payload = {"input": "Test message"} + + # Invoke + result = invoke(payload) + + # Verify error response + assert isinstance(result, dict) + assert "error" in result + assert "message" in result + assert "Agent execution failed" in result["error"] + assert "Agent initialization failed" in result["message"] + + @patch("main.WorkflowAgent") + @patch("main.validate_environment") + def test_invoke_validation_error(self, mock_validate_env, mock_workflow_agent_class): + """Test invoke function with validation error""" + # Reset singleton + main._workflow_agent = None + + # Setup mocks + mock_validate_env.return_value = ("test-table", "test-key", "us-east-1", "test-memory-id", "") + + # Mock workflow agent to raise ValueError + mock_workflow_agent_class.side_effect = ValueError("Invalid configuration") + + # Test payload + payload = {"input": "Test message"} + + # Invoke + result = invoke(payload) + + # Verify error response + assert isinstance(result, dict) + assert "error" in result + assert "message" in result + assert "Invalid configuration" in result["error"] + + +class TestStreamingResponseFormat: + """Test streaming response format compatibility""" + + @patch.dict(os.environ, { + "USE_CASE_TABLE_NAME": "test-table", + "USE_CASE_CONFIG_KEY": "test-key", + "AWS_REGION": "us-east-1", + "MEMORY_ID": "test-memory-id" + }) + @patch("main.WorkflowAgent") + @patch("main.validate_environment") + def test_streaming_content_chunk_format(self, mock_validate_env, mock_workflow_agent_class): + """Test content chunk format matches AgentCore expectations""" + # Setup mocks + mock_validate_env.return_value = ("test-table", "test-key", "us-east-1", "test-memory-id", "") + + mock_agent_instance = Mock() + mock_client_agent = Mock() + mock_config = Mock() + + mock_config.llm_params.streaming = True + mock_config.use_case_name = "Test Workflow" + mock_config.llm_params.bedrock_llm_params.model_id = "amazon.nova-pro-v1:0" + + mock_agent_instance.get_agent.return_value = mock_client_agent + mock_agent_instance.get_config.return_value = mock_config + mock_workflow_agent_class.return_value = mock_agent_instance + + with patch("main.RuntimeStreaming") as mock_streaming: + + def mock_stream_response(*args, **kwargs): + yield { + "type": "content", + "text": "Test response", + "agent_name": "Test Workflow", + "model_id": "amazon.nova-pro-v1:0", + } + + mock_streaming.stream_response.return_value = mock_stream_response() + + payload = {"input": "Test message"} + result = invoke(payload) + chunks = list(result) + + # Verify chunk structure + assert len(chunks) == 1 + chunk = chunks[0] + assert chunk["type"] == "content" + assert chunk["text"] == "Test response" + assert chunk["agent_name"] == "Test Workflow" + assert chunk["model_id"] == "amazon.nova-pro-v1:0" + + @patch.dict(os.environ, { + "USE_CASE_TABLE_NAME": "test-table", + "USE_CASE_CONFIG_KEY": "test-key", + "AWS_REGION": "us-east-1", + "MEMORY_ID": "test-memory-id" + }) + @patch("main.WorkflowAgent") + @patch("main.validate_environment") + def test_streaming_tool_use_chunk_format(self, mock_validate_env, mock_workflow_agent_class): + """Test tool usage chunk format matches AgentCore expectations""" + # Setup mocks + mock_validate_env.return_value = ("test-table", "test-key", "us-east-1", "test-memory-id", "") + + mock_agent_instance = Mock() + mock_client_agent = Mock() + mock_config = Mock() + + mock_config.llm_params.streaming = True + mock_config.use_case_name = "Test Workflow" + mock_config.llm_params.bedrock_llm_params.model_id = "amazon.nova-pro-v1:0" + + mock_agent_instance.get_agent.return_value = mock_client_agent + mock_agent_instance.get_config.return_value = mock_config + mock_workflow_agent_class.return_value = mock_agent_instance + + with patch("main.RuntimeStreaming") as mock_streaming: + + def mock_stream_response(*args, **kwargs): + yield { + "type": "tool_use", + "toolUsage": { + "toolName": "SpecializedAgent", + "status": "started", + "startTime": "2024-01-01T00:00:00Z", + }, + } + yield { + "type": "tool_use", + "toolUsage": { + "toolName": "SpecializedAgent", + "status": "completed", + "startTime": "2024-01-01T00:00:00Z", + "endTime": "2024-01-01T00:00:01Z", + "toolOutput": "Result", + }, + } + + mock_streaming.stream_response.return_value = mock_stream_response() + + payload = {"input": "Test message"} + result = invoke(payload) + chunks = list(result) + + # Verify tool usage chunks + assert len(chunks) == 2 + + # Started chunk + assert chunks[0]["type"] == "tool_use" + assert "toolUsage" in chunks[0] + assert chunks[0]["toolUsage"]["toolName"] == "SpecializedAgent" + assert chunks[0]["toolUsage"]["status"] == "started" + + # Completed chunk + assert chunks[1]["type"] == "tool_use" + assert chunks[1]["toolUsage"]["status"] == "completed" + assert "toolOutput" in chunks[1]["toolUsage"] + + @patch.dict(os.environ, { + "USE_CASE_TABLE_NAME": "test-table", + "USE_CASE_CONFIG_KEY": "test-key", + "AWS_REGION": "us-east-1", + "MEMORY_ID": "test-memory-id" + }) + @patch("main.WorkflowAgent") + @patch("main.validate_environment") + def test_streaming_completion_chunk_format(self, mock_validate_env, mock_workflow_agent_class): + """Test completion chunk format matches AgentCore expectations""" + # Setup mocks + mock_validate_env.return_value = ("test-table", "test-key", "us-east-1", "test-memory-id", "") + + mock_agent_instance = Mock() + mock_client_agent = Mock() + mock_config = Mock() + + mock_config.llm_params.streaming = True + mock_config.use_case_name = "Test Workflow" + mock_config.llm_params.bedrock_llm_params.model_id = "amazon.nova-pro-v1:0" + + mock_agent_instance.get_agent.return_value = mock_client_agent + mock_agent_instance.get_config.return_value = mock_config + mock_workflow_agent_class.return_value = mock_agent_instance + + with patch("main.RuntimeStreaming") as mock_streaming: + + def mock_stream_response(*args, **kwargs): + yield { + "type": "completion", + "agent_name": "Test Workflow", + "model_id": "amazon.nova-pro-v1:0", + } + + mock_streaming.stream_response.return_value = mock_stream_response() + + payload = {"input": "Test message"} + result = invoke(payload) + chunks = list(result) + + # Verify completion chunk + assert len(chunks) == 1 + chunk = chunks[0] + assert chunk["type"] == "completion" + assert chunk["agent_name"] == "Test Workflow" + assert chunk["model_id"] == "amazon.nova-pro-v1:0" + + +class TestSingletonPattern: + """Test singleton pattern for agent instance""" + + @patch.dict(os.environ, { + "USE_CASE_TABLE_NAME": "test-table", + "USE_CASE_CONFIG_KEY": "test-key", + "AWS_REGION": "us-east-1", + "MEMORY_ID": "test-memory-id" + }) + @patch("main.WorkflowAgent") + @patch("main.validate_environment") + def test_agent_instance_singleton(self, mock_validate_env, mock_workflow_agent_class): + """Test that get_agent_instance returns the same instance""" + # Setup mocks + mock_validate_env.return_value = ("test-table", "test-key", "us-east-1", "test-memory-id", "") + + # Reset singleton + main._workflow_agent = None + + # Get instance twice + instance1 = get_agent_instance() + instance2 = get_agent_instance() + + # Verify same instance + assert instance1 is instance2 + + # Verify WorkflowAgent was only instantiated once + assert mock_workflow_agent_class.call_count == 1 + + +class TestMultimodalIntegration: + """Test multimodal functionality integration with AgentCore Runtime""" + + @patch("main.WorkflowAgent") + @patch("main.validate_environment") + @patch("main.MultimodalRequestProcessor") + def test_invoke_with_multimodal_enabled_and_files( + self, mock_multimodal_processor_class, mock_validate_env, mock_workflow_agent_class + ): + """Test invoke function with multimodal enabled and files present""" + # Reset singleton + main._workflow_agent = None + + mock_validate_env.return_value = ("test-table", "test-key", "us-east-1", "test-memory-id", "") + + mock_agent_instance = Mock() + mock_client_agent = Mock() + mock_config = Mock() + + mock_config.llm_params.streaming = False + mock_config.use_case_name = "Test Workflow" + mock_config.llm_params.bedrock_llm_params.model_id = "amazon.nova-pro-v1:0" + + mock_client_agent.return_value = "Analyzed the image successfully" + + mock_agent_instance.get_agent.return_value = mock_client_agent + mock_agent_instance.get_config.return_value = mock_config + mock_workflow_agent_class.return_value = mock_agent_instance + + # Multimodal processor setup - return content blocks instead of string + mock_processor = Mock() + mock_processor.has_files.return_value = True + mock_processor.is_multimodal_enabled.return_value = True + content_blocks = [ + {"text": "Analyze this image"}, + { + "text": "File available for reading: test.jpg with S3 key 'usecase/user/conv/msg/file1'" + }, + ] + mock_processor.process_multimodal_request.return_value = content_blocks + mock_multimodal_processor_class.return_value = mock_processor + + payload = { + "input": "Analyze this image", + "files": [{"name": "test.jpg", "content": "base64content"}], + } + + result = invoke(payload) + + mock_processor.has_files.assert_called_once_with(payload) + mock_processor.is_multimodal_enabled.assert_called_once_with(mock_config) + mock_processor.process_multimodal_request.assert_called_once_with(payload) + + # Verify agent was called with content blocks, not string + mock_client_agent.assert_called_once_with(content_blocks) + + assert isinstance(result, dict) + assert result["result"] == "Analyzed the image successfully" + assert result["agent_name"] == "Test Workflow" + assert result["model_id"] == "amazon.nova-pro-v1:0" + + @patch("main.WorkflowAgent") + @patch("main.validate_environment") + @patch("main.MultimodalRequestProcessor") + @patch("main.extract_user_message") + def test_invoke_with_multimodal_disabled_and_files( + self, + mock_extract_user_message, + mock_multimodal_processor_class, + mock_validate_env, + mock_workflow_agent_class, + ): + """Test invoke function with multimodal disabled but files present""" + # Reset singleton + main._workflow_agent = None + + mock_validate_env.return_value = ("test-table", "test-key", "us-east-1", "test-memory-id", "") + + mock_agent_instance = Mock() + mock_client_agent = Mock() + mock_config = Mock() + + mock_config.llm_params.streaming = False + mock_config.use_case_name = "Test Workflow" + mock_config.llm_params.bedrock_llm_params.model_id = "amazon.nova-pro-v1:0" + + mock_client_agent.return_value = "Text-only response" + + mock_agent_instance.get_agent.return_value = mock_client_agent + mock_agent_instance.get_config.return_value = mock_config + mock_workflow_agent_class.return_value = mock_agent_instance + + mock_processor = Mock() + mock_processor.has_files.return_value = True + mock_processor.is_multimodal_enabled.return_value = False + mock_multimodal_processor_class.return_value = mock_processor + + mock_extract_user_message.return_value = "Analyze this image" + + payload = { + "input": "Analyze this image", + "files": [{"name": "test.jpg", "content": "base64content"}], + } + + result = invoke(payload) + + mock_processor.has_files.assert_called_once_with(payload) + mock_processor.is_multimodal_enabled.assert_called_once_with(mock_config) + mock_processor.process_multimodal_request.assert_not_called() + + mock_extract_user_message.assert_called_once_with(payload) + mock_client_agent.assert_called_once_with("Analyze this image") + + assert isinstance(result, dict) + assert result["result"] == "Text-only response" + + @patch("main.WorkflowAgent") + @patch("main.validate_environment") + @patch("main.MultimodalRequestProcessor") + @patch("main.extract_user_message") + def test_invoke_with_multimodal_enabled_no_files( + self, + mock_extract_user_message, + mock_multimodal_processor_class, + mock_validate_env, + mock_workflow_agent_class, + ): + """Test invoke function with multimodal enabled but no files present""" + # Reset singleton + main._workflow_agent = None + + mock_validate_env.return_value = ("test-table", "test-key", "us-east-1", "test-memory-id", "") + + mock_agent_instance = Mock() + mock_client_agent = Mock() + mock_config = Mock() + + mock_config.llm_params.streaming = False + mock_config.use_case_name = "Test Workflow" + mock_config.llm_params.bedrock_llm_params.model_id = "amazon.nova-pro-v1:0" + + mock_client_agent.return_value = "Text-only response" + + mock_agent_instance.get_agent.return_value = mock_client_agent + mock_agent_instance.get_config.return_value = mock_config + mock_workflow_agent_class.return_value = mock_agent_instance + + mock_processor = Mock() + mock_processor.has_files.return_value = False + mock_processor.is_multimodal_enabled.return_value = True + mock_multimodal_processor_class.return_value = mock_processor + + mock_extract_user_message.return_value = "Hello, how can you help?" + + payload = {"input": "Hello, how can you help?"} + + result = invoke(payload) + + # Verify multimodal processing was checked but not used (no files) + mock_processor.has_files.assert_called_once_with(payload) + mock_processor.process_multimodal_request.assert_not_called() + + mock_extract_user_message.assert_called_once_with(payload) + mock_client_agent.assert_called_once_with("Hello, how can you help?") + + assert isinstance(result, dict) + assert result["result"] == "Text-only response" + + @patch("main.WorkflowAgent") + @patch("main.validate_environment") + @patch("main.MultimodalRequestProcessor") + def test_invoke_multimodal_streaming_mode( + self, mock_multimodal_processor_class, mock_validate_env, mock_workflow_agent_class + ): + """Test invoke function with multimodal in streaming mode""" + # Reset singleton + main._workflow_agent = None + + mock_validate_env.return_value = ("test-table", "test-key", "us-east-1", "test-memory-id", "") + + mock_agent_instance = Mock() + mock_client_agent = Mock() + mock_config = Mock() + + mock_config.llm_params.streaming = True + mock_config.use_case_name = "Test Workflow" + mock_config.llm_params.bedrock_llm_params.model_id = "amazon.nova-pro-v1:0" + + mock_agent_instance.get_agent.return_value = mock_client_agent + mock_agent_instance.get_config.return_value = mock_config + mock_workflow_agent_class.return_value = mock_agent_instance + + mock_processor = Mock() + mock_processor.has_files.return_value = True + mock_processor.is_multimodal_enabled.return_value = True + content_blocks = [ + {"text": "Analyze this image"}, + { + "text": "File available for reading: test.jpg with S3 key 'usecase/user/conv/msg/file1'" + }, + ] + mock_processor.process_multimodal_request.return_value = content_blocks + mock_multimodal_processor_class.return_value = mock_processor + + # Mock RuntimeStreaming + with patch("main.RuntimeStreaming") as mock_streaming: + + def mock_stream_response(*args, **kwargs): + yield {"type": "content", "text": "Analyzing image..."} + yield {"type": "completion"} + + mock_streaming.stream_response.return_value = mock_stream_response() + + payload = { + "input": "Analyze this image", + "files": [{"name": "test.jpg", "content": "base64content"}], + } + + result = invoke(payload) + + mock_processor.has_files.assert_called_once_with(payload) + mock_processor.is_multimodal_enabled.assert_called_once_with(mock_config) + mock_processor.process_multimodal_request.assert_called_once_with(payload) + + # Verify RuntimeStreaming was called with content blocks + mock_streaming.stream_response.assert_called_once() + call_args = mock_streaming.stream_response.call_args + assert call_args[0][0] == mock_client_agent + assert call_args[0][1] == content_blocks + assert call_args[0][2] == mock_config + + assert hasattr(result, "__iter__") + chunks = list(result) + assert len(chunks) == 2 + assert chunks[0]["type"] == "content" + assert chunks[1]["type"] == "completion" + + +class TestFileContentBlockHandling: + """Test file and content block handling in AgentCore integration""" + + @patch("main.WorkflowAgent") + @patch("main.validate_environment") + @patch("main.MultimodalRequestProcessor") + def test_invoke_with_file_content_blocks( + self, mock_multimodal_processor_class, mock_validate_env, mock_workflow_agent_class + ): + """Test invoke function processes file content blocks correctly""" + # Reset singleton + main._workflow_agent = None + + mock_validate_env.return_value = ("test-table", "test-key", "us-east-1", "test-memory-id", "") + mock_agent_instance = Mock() + mock_client_agent = Mock() + mock_config = Mock() + + # Configure non-streaming mode with multimodal enabled + mock_config.llm_params.streaming = False + mock_config.use_case_name = "Test Workflow" + mock_config.llm_params.bedrock_llm_params.model_id = "amazon.nova-pro-v1:0" + + mock_client_agent.return_value = "Successfully processed files and generated response" + + mock_agent_instance.get_agent.return_value = mock_client_agent + mock_agent_instance.get_config.return_value = mock_config + mock_workflow_agent_class.return_value = mock_agent_instance + + mock_processor = Mock() + mock_processor.has_files.return_value = True + mock_processor.is_multimodal_enabled.return_value = True + + content_blocks = [ + {"text": "Analyze these files"}, + { + "text": "File available for reading: document.pdf with S3 key 'usecase/user/conv/msg/file1'" + }, + { + "text": "File available for reading: data.csv with S3 key 'usecase/user/conv/msg/file2'" + }, + ] + mock_processor.process_multimodal_request.return_value = content_blocks + mock_multimodal_processor_class.return_value = mock_processor + + payload = { + "input": "Analyze these files", + "files": [ + {"fileReference": "file-ref-1", "fileName": "document.pdf"}, + {"fileReference": "file-ref-2", "fileName": "data.csv"}, + ], + "conversationId": "conv-123", + "messageId": "msg-456", + "userId": "user-789", + } + + result = invoke(payload) + + mock_processor.has_files.assert_called_once_with(payload) + mock_processor.is_multimodal_enabled.assert_called_once_with(mock_config) + mock_processor.process_multimodal_request.assert_called_once_with(payload) + + mock_client_agent.assert_called_once_with(content_blocks) + + call_args = mock_client_agent.call_args[0][0] # First positional argument + assert call_args == [ + {"text": "Analyze these files"}, # User query + { + "text": "File available for reading: document.pdf with S3 key 'usecase/user/conv/msg/file1'" + }, + { + "text": "File available for reading: data.csv with S3 key 'usecase/user/conv/msg/file2'" + }, + ] + + assert result == { + "agent_name": "Test Workflow", + "model_id": "amazon.nova-pro-v1:0", + "result": "Successfully processed files and generated response", + } diff --git a/deployment/ecr/gaab-strands-workflow-agent/test/test_agents_loader.py b/deployment/ecr/gaab-strands-workflow-agent/test/test_agents_loader.py new file mode 100644 index 00000000..7497bf40 --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/test/test_agents_loader.py @@ -0,0 +1,542 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Unit tests for AgentsLoader class. + +Tests cover: +- Loading agents with various configurations +- Graceful handling of partial agent loading failures +- Optional field handling in SelectedAgent +- Tool loading from MCP servers +- Model creation with fallbacks +""" + +import os +import sys +from unittest.mock import Mock, patch + +import pytest + +# Add src to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "src")) + +from agents_loader import AgentsLoader +from gaab_strands_common.models import ( + AgentBuilderParams, + AgentReference, + BedrockLlmParams, + CustomToolReference, + LlmParams, + MCPServerReference, + StrandsToolReference, + UseCaseConfig, +) + + +class TestAgentsLoaderInitialization: + """Test AgentsLoader initialization""" + + def test_initialization(self): + """Test AgentsLoader initializes correctly""" + mock_ddb_helper = Mock() + + loader = AgentsLoader(mock_ddb_helper, "us-east-1") + + assert loader.ddb_helper == mock_ddb_helper + assert loader.region == "us-east-1" + + +class TestAgentsLoaderLoadAgents: + """Test load_agents method""" + + @patch("agents_loader.Agent") + @patch("agents_loader.BedrockModel") + @patch("agents_loader.ToolsManager") + def test_load_agents_success( + self, mock_tools_manager_class, mock_bedrock_model, mock_agent_class + ): + """Test successful loading of all agents""" + mock_ddb_helper = Mock() + mock_tools_manager = Mock() + mock_tools_manager_class.return_value = mock_tools_manager + mock_tools_manager.load_all_tools.return_value = [] + + loader = AgentsLoader(mock_ddb_helper, "us-east-1") + + agent_references = [ + AgentReference( + UseCaseId="test-agent-id", + UseCaseType="AgentBuilder", + UseCaseName="Agent1", + AgentBuilderParams=AgentBuilderParams( + SystemPrompt="Agent 1 prompt", + Tools=[], + MCPServers=[], + ), + LlmParams=LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams( + ModelId="amazon.nova-lite-v1:0", + BedrockInferenceType="QUICK_START", + ), + ModelParams={}, + RAGEnabled=False, + ), + ), + AgentReference( + UseCaseId="test-agent-id", + UseCaseType="AgentBuilder", + UseCaseName="Agent2", + AgentBuilderParams=AgentBuilderParams( + SystemPrompt="Agent 2 prompt", + Tools=[], + MCPServers=[], + ), + LlmParams=LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams( + ModelId="amazon.nova-lite-v1:0", + BedrockInferenceType="QUICK_START", + ), + ModelParams={}, + RAGEnabled=False, + ), + ), + ] + + mock_agent_1 = Mock() + mock_agent_1.name = "Agent1" + mock_agent_2 = Mock() + mock_agent_2.name = "Agent2" + mock_agent_class.side_effect = [mock_agent_1, mock_agent_2] + + # Mock BedrockModel for default model creation + mock_model = Mock() + mock_bedrock_model.return_value = mock_model + + agents = loader.load_agents(agent_references) + + assert len(agents) == 2 + assert agents[0] == mock_agent_1 + assert agents[1] == mock_agent_2 + + @patch("agents_loader.Agent") + @patch("agents_loader.BedrockModel") + @patch("agents_loader.ToolsManager") + def test_load_agents_partial_failure( + self, mock_tools_manager_class, mock_bedrock_model, mock_agent_class + ): + """Test loading continues when some agents fail""" + mock_ddb_helper = Mock() + mock_tools_manager = Mock() + mock_tools_manager_class.return_value = mock_tools_manager + mock_tools_manager.load_all_tools.return_value = [] + + loader = AgentsLoader(mock_ddb_helper, "us-east-1") + + agent_references = [ + AgentReference( + UseCaseId="test-agent-id", + UseCaseType="AgentBuilder", + UseCaseName="Agent1", + AgentBuilderParams=AgentBuilderParams( + SystemPrompt="Agent 1 prompt", + Tools=[], + MCPServers=[], + ), + LlmParams=LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams( + ModelId="amazon.nova-lite-v1:0", + BedrockInferenceType="QUICK_START", + ), + ModelParams={}, + RAGEnabled=False, + ), + ), + AgentReference( + UseCaseId="test-agent-id", + UseCaseType="AgentBuilder", + UseCaseName="Agent2", + AgentBuilderParams=AgentBuilderParams( + SystemPrompt="Agent 2 prompt", + Tools=[], + MCPServers=[], + ), + LlmParams=LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams( + ModelId="amazon.nova-lite-v1:0", + BedrockInferenceType="QUICK_START", + ), + ModelParams={}, + RAGEnabled=False, + ), + ), + AgentReference( + UseCaseId="test-agent-id", + UseCaseType="AgentBuilder", + UseCaseName="Agent3", + AgentBuilderParams=AgentBuilderParams( + SystemPrompt="Agent 3 prompt", + Tools=[], + MCPServers=[], + ), + LlmParams=LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams( + ModelId="amazon.nova-lite-v1:0", + BedrockInferenceType="QUICK_START", + ), + ModelParams={}, + RAGEnabled=False, + ), + ), + ] + + mock_agent_1 = Mock() + mock_agent_1.name = "Agent1" + mock_agent_3 = Mock() + mock_agent_3.name = "Agent3" + + # Agent 2 will fail during Agent construction + agent_call_count = [0] + + def agent_side_effect(*args, **kwargs): + agent_call_count[0] += 1 + if agent_call_count[0] == 1: + return mock_agent_1 + elif agent_call_count[0] == 2: + # Agent 2 fails during construction + raise RuntimeError("Failed to create agent 2") + else: + return mock_agent_3 + + mock_agent_class.side_effect = agent_side_effect + + # Mock BedrockModel for default model creation + mock_model = Mock() + mock_bedrock_model.return_value = mock_model + + agents = loader.load_agents(agent_references) + + assert len(agents) == 2 + assert agents[0] == mock_agent_1 + assert agents[1] == mock_agent_3 + + @patch("agents_loader.ToolsManager") + def test_load_agents_all_fail(self, mock_tools_manager_class): + """Test loading fails when all agents fail""" + mock_ddb_helper = Mock() + mock_tools_manager = Mock() + mock_tools_manager_class.return_value = mock_tools_manager + + loader = AgentsLoader(mock_ddb_helper, "us-east-1") + + agent_references = [ + AgentReference( + UseCaseId="test-agent-id", + UseCaseType="AgentBuilder", + UseCaseName="Agent1", + AgentBuilderParams=AgentBuilderParams( + SystemPrompt="Agent 1 prompt", + Tools=[], + MCPServers=[], + ), + ), + AgentReference( + UseCaseId="test-agent-id", + UseCaseType="AgentBuilder", + UseCaseName="Agent2", + AgentBuilderParams=AgentBuilderParams( + SystemPrompt="Agent 2 prompt", + Tools=[], + MCPServers=[], + ), + ), + ] + + with patch("agents_loader.Agent") as mock_agent_class: + mock_agent_class.side_effect = RuntimeError("Agent loading failed") + + with pytest.raises(RuntimeError, match="All specialized agents failed to load"): + loader.load_agents(agent_references) + + def test_load_agents_empty_list(self): + """Test loading with empty agent list""" + mock_ddb_helper = Mock() + + loader = AgentsLoader(mock_ddb_helper, "us-east-1") + + agents = loader.load_agents([]) + + assert len(agents) == 0 + + +class TestAgentsLoaderToolLoading: + """Test tool loading from MCP servers""" + + @patch("agents_loader.ToolsManager") + def test_load_agent_tools_with_mcp_servers(self, mock_tools_manager_class): + """Test loading tools from MCP servers, Strands tools, and agent-level custom tools""" + mock_ddb_helper = Mock() + mock_tools_manager = Mock() + mock_tools_manager_class.return_value = mock_tools_manager + + mock_tool_1 = Mock() + mock_tool_2 = Mock() + mock_tool_3 = Mock() + mock_tools_manager.load_all_tools.return_value = [mock_tool_1, mock_tool_2, mock_tool_3] + + loader = AgentsLoader(mock_ddb_helper, "us-east-1") + + strands_tools = [StrandsToolReference(ToolId="current_time")] + mcp_servers = [ + MCPServerReference(UseCaseId="mcp-1", Url="https://example.com/mcp1", Type="gateway"), + MCPServerReference(UseCaseId="mcp-2", Url="https://example.com/mcp2", Type="runtime"), + ] + custom_tools = [CustomToolReference(ToolId="agent_custom_tool")] + + mock_agent_config = UseCaseConfig( + UseCaseName="TestAgent", + UseCaseType="AgentBuilder", + LlmParams=LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams( + ModelId="amazon.nova-lite-v1:0", BedrockInferenceType="QUICK_START" + ), + ModelParams={}, + RAGEnabled=False, + ), + ) + + tools = loader._load_agent_tools( + strands_tools, mcp_servers, custom_tools, mock_agent_config + ) + + assert len(tools) == 3 + assert tools[0] == mock_tool_1 + assert tools[1] == mock_tool_2 + assert tools[2] == mock_tool_3 + + mock_tools_manager_class.assert_called_once_with("us-east-1", mock_agent_config) + + # Verify load_all_tools was called with the new format + mock_tools_manager.load_all_tools.assert_called_once() + call_args = mock_tools_manager.load_all_tools.call_args + assert call_args[1]["strands_tool_ids"] == ["current_time"] + # Check mcp_servers parameter contains the correct structure + mcp_servers_arg = call_args[1]["mcp_servers"] + assert len(mcp_servers_arg) == 2 + assert mcp_servers_arg[0]["use_case_id"] == "mcp-1" + assert mcp_servers_arg[1]["use_case_id"] == "mcp-2" + + @patch("agents_loader.ToolsManager") + def test_load_agent_tools_with_custom_tools_only(self, mock_tools_manager_class): + """Test loading only agent-level custom tools""" + mock_ddb_helper = Mock() + mock_tools_manager = Mock() + mock_tools_manager_class.return_value = mock_tools_manager + + mock_custom_tool = Mock() + mock_tools_manager.load_all_tools.return_value = [mock_custom_tool] + + loader = AgentsLoader(mock_ddb_helper, "us-east-1") + + strands_tools = [] + mcp_servers = [] + custom_tools = [ + CustomToolReference(ToolId="agent_calculator"), + CustomToolReference(ToolId="agent_formatter"), + ] + + mock_agent_config = UseCaseConfig( + UseCaseName="TestAgent", + UseCaseType="AgentBuilder", + LlmParams=LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams( + ModelId="amazon.nova-lite-v1:0", BedrockInferenceType="QUICK_START" + ), + ModelParams={}, + RAGEnabled=False, + ), + ) + + tools = loader._load_agent_tools( + strands_tools, mcp_servers, custom_tools, mock_agent_config + ) + + assert len(tools) == 1 + assert tools[0] == mock_custom_tool + + mock_tools_manager_class.assert_called_once_with("us-east-1", mock_agent_config) + mock_tools_manager.load_all_tools.assert_called_once() + call_args = mock_tools_manager.load_all_tools.call_args + assert call_args[1]["strands_tool_ids"] == [] + assert call_args[1]["custom_tool_ids"] == ["agent_calculator", "agent_formatter"] + assert call_args[1]["mcp_servers"] == [] + + @patch("agents_loader.ToolsManager") + def test_load_agent_tools_empty_list(self, mock_tools_manager_class): + """Test loading tools with empty lists""" + mock_ddb_helper = Mock() + mock_tools_manager = Mock() + mock_tools_manager_class.return_value = mock_tools_manager + + loader = AgentsLoader(mock_ddb_helper, "us-east-1") + + mock_agent_config = UseCaseConfig( + UseCaseName="TestAgent", + UseCaseType="AgentBuilder", + LlmParams=LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams( + ModelId="amazon.nova-lite-v1:0", BedrockInferenceType="QUICK_START" + ), + ModelParams={}, + RAGEnabled=False, + ), + ) + + tools = loader._load_agent_tools([], [], [], mock_agent_config) + + assert len(tools) == 0 + mock_tools_manager.load_all_tools.assert_not_called() + + @patch("agents_loader.ToolsManager") + def test_load_agent_tools_error_handling(self, mock_tools_manager_class): + """Test graceful error handling when tool loading fails""" + mock_ddb_helper = Mock() + mock_tools_manager = Mock() + mock_tools_manager_class.return_value = mock_tools_manager + + mock_tools_manager.load_all_tools.side_effect = RuntimeError("Tool loading failed") + + loader = AgentsLoader(mock_ddb_helper, "us-east-1") + + mcp_servers = [ + MCPServerReference(UseCaseId="mcp-1", Url="https://example.com/mcp", Type="gateway") + ] + strands_tools = [StrandsToolReference(ToolId="current_time")] + custom_tools = [CustomToolReference(ToolId="agent_custom_tool")] + + # Create a mock agent config for testing + mock_agent_config = UseCaseConfig( + UseCaseName="TestAgent", + UseCaseType="AgentBuilder", + LlmParams=LlmParams( + ModelProvider="Bedrock", + Temperature=0.7, + Streaming=True, + Verbose=False, + BedrockLlmParams=BedrockLlmParams( + ModelId="amazon.nova-lite-v1:0", BedrockInferenceType="QUICK_START" + ), + ModelParams={}, + RAGEnabled=False, + ), + ) + + tools = loader._load_agent_tools( + strands_tools, mcp_servers, custom_tools, mock_agent_config + ) + + assert len(tools) == 0 + + +class TestAgentsLoaderModelCreation: + """Test model creation with fallbacks""" + + @patch("agents_loader.BedrockModel") + def test_create_default_model(self, mock_bedrock_model): + """Test creating default model when no config available""" + mock_ddb_helper = Mock() + + loader = AgentsLoader(mock_ddb_helper, "us-east-1") + + mock_model = Mock() + mock_bedrock_model.return_value = mock_model + + model = loader._create_default_model() + + assert model == mock_model + mock_bedrock_model.assert_called_once_with( + model_id="amazon.nova-lite-v1:0", + region_name="us-east-1", + temperature=0.7, + streaming=True, + ) + + +class TestAgentsLoaderConfigCreation: + """Test agent config creation""" + + def test_create_agent_use_case_config(self): + """Test creating UseCaseConfig from AgentReference""" + mock_ddb_helper = Mock() + loader = AgentsLoader(mock_ddb_helper, "us-east-1") + + mock_llm_params = LlmParams( + ModelProvider="Bedrock", + Temperature=0.8, + Streaming=False, + Verbose=True, + BedrockLlmParams=BedrockLlmParams( + ModelId="anthropic.claude-3-5-sonnet-20240620-v1:0", + BedrockInferenceType="QUICK_START", + ), + ModelParams={}, + RAGEnabled=False, + ) + + mock_agent_builder_params = AgentBuilderParams( + SystemPrompt="Test agent prompt", + Tools=[], + MCPServers=[], + ) + + agent_ref = AgentReference( + UseCaseId="test-agent-id", + UseCaseType="AgentBuilder", + UseCaseName="TestAgent", + AgentBuilderParams=mock_agent_builder_params, + LlmParams=mock_llm_params, + ) + + result_config = loader._create_agent_use_case_config(agent_ref) + + assert isinstance(result_config, UseCaseConfig) + assert result_config.use_case_name == "TestAgent" + assert result_config.use_case_type == "AgentBuilder" + assert result_config.agent_builder_params == mock_agent_builder_params + assert result_config.llm_params == mock_llm_params + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/deployment/ecr/gaab-strands-workflow-agent/test/test_workflow_agent.py b/deployment/ecr/gaab-strands-workflow-agent/test/test_workflow_agent.py new file mode 100644 index 00000000..c18cc416 --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/test/test_workflow_agent.py @@ -0,0 +1,714 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Unit tests for WorkflowAgent class. + +Tests cover: +- Initialization and configuration loading +- Configuration validation (use case type, workflow type) +- Specialized agent loading +- Client agent creation +- Error handling +""" + +import os +import sys +from unittest.mock import Mock, patch + +import pytest + +# Add src to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "src")) + +from workflow_agent import WorkflowAgent + + +class TestWorkflowAgentInitialization: + """Test WorkflowAgent initialization""" + + @patch("agents_loader.ToolsManager") + @patch("workflow_agent.Agent") + @patch("gaab_strands_common.base_agent.BedrockModel") + @patch("gaab_strands_common.wrap_tool_with_events") + @patch("workflow_agent.AgentsLoader") + @patch("workflow_agent.DynamoDBHelper") + def test_successful_initialization( + self, + mock_ddb_helper_class, + mock_agents_loader_class, + mock_wrap, + mock_bedrock_model, + mock_agent, + mock_tools_manager, + sample_workflow_config_dict, + ): + """Test successful workflow agent initialization""" + # Setup mock DynamoDB helper + mock_ddb_helper = Mock() + mock_ddb_helper_class.return_value = mock_ddb_helper + mock_ddb_helper.get_config.return_value = sample_workflow_config_dict + + # Setup mock agents loader + mock_agents_loader = Mock() + mock_specialized_agent = Mock() + mock_specialized_agent.name = "SpecializedAgent" + mock_agents_loader.load_agents.return_value = [mock_specialized_agent] + mock_agents_loader_class.return_value = mock_agents_loader + + # Setup mocks - BedrockModel needs model_id attribute + mock_model = Mock() + mock_model.model_id = "amazon.nova-pro-v1:0" + mock_bedrock_model.return_value = mock_model + + mock_client_agent = Mock() + mock_client_agent.name = "Test Workflow" + mock_agent.return_value = mock_client_agent + + mock_wrap.return_value = mock_specialized_agent + + # Create workflow agent + workflow_agent = WorkflowAgent( + table_name="test-table", config_key="test-key", region="us-east-1" + ) + + # Verify initialization + assert workflow_agent.config is not None + assert workflow_agent.workflow_config is not None + assert workflow_agent.client_agent is not None + assert len(workflow_agent.specialized_agents) == 1 + assert workflow_agent.config.use_case_name == "Test Workflow" + + @patch("workflow_agent.DynamoDBHelper") + def test_initialization_with_invalid_use_case_type(self, mock_ddb_helper_class): + """Test initialization fails with invalid use case type""" + mock_ddb_helper = Mock() + mock_ddb_helper_class.return_value = mock_ddb_helper + + mock_config_dict = { + "UseCaseName": "Test Workflow", + "UseCaseType": "Chat", + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 0.7, + "Streaming": True, + "Verbose": False, + "BedrockLlmParams": { + "ModelId": "amazon.nova-pro-v1:0", + "BedrockInferenceType": "QUICK_START", + }, + "ModelParams": {}, + }, + } + mock_ddb_helper.get_config.return_value = mock_config_dict + + with pytest.raises(ValueError, match="Expected Workflow, got Chat"): + WorkflowAgent(table_name="test-table", config_key="test-key", region="us-east-1") + + @patch("workflow_agent.AgentsLoader") + @patch("workflow_agent.DynamoDBHelper") + def test_initialization_with_unsupported_workflow_type( + self, mock_ddb_helper_class, mock_agents_loader_class, sample_workflow_config_dict + ): + """Test initialization fails with unsupported workflow type""" + mock_ddb_helper = Mock() + mock_ddb_helper_class.return_value = mock_ddb_helper + + config_dict = sample_workflow_config_dict.copy() + config_dict["WorkflowParams"]["OrchestrationPattern"] = "UnsupportedType" + mock_ddb_helper.get_config.return_value = config_dict + + with pytest.raises(ValueError, match="Unsupported workflow type"): + WorkflowAgent(table_name="test-table", config_key="test-key", region="us-east-1") + + @patch("workflow_agent.DynamoDBHelper") + def test_initialization_missing_workflow_config(self, mock_ddb_helper_class): + """Test initialization fails when WorkflowParams is missing""" + mock_ddb_helper = Mock() + mock_ddb_helper_class.return_value = mock_ddb_helper + + mock_config_dict = { + "UseCaseName": "Test Workflow", + "UseCaseType": "Workflow", + "LlmParams": { + "ModelProvider": "Bedrock", + "Temperature": 0.7, + "Streaming": True, + "Verbose": False, + "BedrockLlmParams": { + "ModelId": "amazon.nova-pro-v1:0", + "BedrockInferenceType": "QUICK_START", + }, + "ModelParams": {}, + }, + } + mock_ddb_helper.get_config.return_value = mock_config_dict + + with pytest.raises(ValueError, match="No WorkflowParams found"): + WorkflowAgent(table_name="test-table", config_key="test-key", region="us-east-1") + + +class TestWorkflowAgentConfigurationValidation: + """Test configuration validation""" + + @patch("agents_loader.ToolsManager") + @patch("workflow_agent.Agent") + @patch("gaab_strands_common.base_agent.BedrockModel") + @patch("gaab_strands_common.wrap_tool_with_events") + @patch("workflow_agent.AgentsLoader") + @patch("workflow_agent.DynamoDBHelper") + def test_validates_agents_as_tools_workflow_type( + self, + mock_ddb_helper_class, + mock_agents_loader_class, + mock_wrap, + mock_bedrock_model, + mock_agent, + mock_tools_manager, + sample_workflow_config_dict, + ): + """Test that agents-as-tools workflow type is validated""" + mock_ddb_helper = Mock() + mock_ddb_helper_class.return_value = mock_ddb_helper + mock_ddb_helper.get_config.return_value = sample_workflow_config_dict + + mock_agents_loader = Mock() + mock_agents_loader.load_agents.return_value = [Mock()] + mock_agents_loader_class.return_value = mock_agents_loader + + mock_wrap.return_value = Mock() + + workflow_agent = WorkflowAgent( + table_name="test-table", config_key="test-key", region="us-east-1" + ) + + assert workflow_agent.workflow_config.workflow_type == "agents-as-tools" + + +class TestWorkflowAgentGracefulFailures: + """Test graceful handling of partial failures""" + + @patch("agents_loader.ToolsManager") + @patch("workflow_agent.AgentsLoader") + @patch("workflow_agent.DynamoDBHelper") + def test_all_agents_fail_to_load( + self, + mock_ddb_helper_class, + mock_agents_loader_class, + mock_tools_manager, + sample_workflow_config_dict, + ): + """Test that initialization fails when all agents fail to load""" + mock_ddb_helper = Mock() + mock_ddb_helper_class.return_value = mock_ddb_helper + mock_ddb_helper.get_config.return_value = sample_workflow_config_dict + + mock_agents_loader = Mock() + mock_agents_loader.load_agents.side_effect = RuntimeError( + "All specialized agents failed to load" + ) + mock_agents_loader_class.return_value = mock_agents_loader + + with pytest.raises(RuntimeError, match="All specialized agents failed to load"): + WorkflowAgent(table_name="test-table", config_key="test-key", region="us-east-1") + + +class TestWorkflowAgentGetters: + """Test getter methods""" + + @patch("agents_loader.ToolsManager") + @patch("workflow_agent.Agent") + @patch("gaab_strands_common.base_agent.BedrockModel") + @patch("gaab_strands_common.wrap_tool_with_events") + @patch("workflow_agent.AgentsLoader") + @patch("workflow_agent.DynamoDBHelper") + def test_get_agent( + self, + mock_ddb_helper_class, + mock_agents_loader_class, + mock_wrap, + mock_bedrock_model, + mock_agent, + mock_tools_manager, + sample_workflow_config_dict, + ): + """Test get_agent returns client agent""" + mock_ddb_helper = Mock() + mock_ddb_helper_class.return_value = mock_ddb_helper + mock_ddb_helper.get_config.return_value = sample_workflow_config_dict + + mock_agents_loader = Mock() + mock_agents_loader.load_agents.return_value = [Mock()] + mock_agents_loader_class.return_value = mock_agents_loader + + mock_client_agent = Mock() + mock_client_agent.name = "Test Workflow" + mock_agent.return_value = mock_client_agent + mock_wrap.return_value = Mock() + + # Mock BedrockModel + mock_model = Mock() + mock_model.model_id = "amazon.nova-pro-v1:0" + mock_bedrock_model.return_value = mock_model + + workflow_agent = WorkflowAgent( + table_name="test-table", config_key="test-key", region="us-east-1" + ) + + agent = workflow_agent.get_agent() + assert agent == mock_client_agent + + @patch("agents_loader.ToolsManager") + @patch("workflow_agent.Agent") + @patch("gaab_strands_common.base_agent.BedrockModel") + @patch("gaab_strands_common.wrap_tool_with_events") + @patch("workflow_agent.AgentsLoader") + @patch("workflow_agent.DynamoDBHelper") + def test_get_agent_count( + self, + mock_ddb_helper_class, + mock_agents_loader_class, + mock_wrap, + mock_bedrock_model, + mock_agent, + mock_tools_manager, + sample_workflow_config_dict, + ): + """Test get_agent_count returns correct count""" + mock_ddb_helper = Mock() + mock_ddb_helper_class.return_value = mock_ddb_helper + + config_dict = sample_workflow_config_dict.copy() + config_dict["WorkflowParams"]["AgentsAsToolsParams"] = { + "Agents": [ + { + "UseCaseId": "test-agent-id", + "UseCaseType": "AgentBuilder", + "UseCaseName": "Agent1", + "AgentBuilderParams": { + "SystemPrompt": "Agent 1", + "Tools": [], + "MCPServers": [], + }, + }, + { + "UseCaseId": "test-agent-id", + "UseCaseType": "AgentBuilder", + "UseCaseName": "Agent2", + "AgentBuilderParams": { + "SystemPrompt": "Agent 2", + "Tools": [], + "MCPServers": [], + }, + }, + { + "UseCaseId": "test-agent-id", + "UseCaseType": "AgentBuilder", + "UseCaseName": "Agent3", + "AgentBuilderParams": { + "SystemPrompt": "Agent 3", + "Tools": [], + "MCPServers": [], + }, + }, + ] + } + mock_ddb_helper.get_config.return_value = config_dict + + mock_agents_loader = Mock() + mock_agents_loader.load_agents.return_value = [Mock(), Mock(), Mock()] + mock_agents_loader_class.return_value = mock_agents_loader + + mock_wrap.side_effect = lambda x: x + + workflow_agent = WorkflowAgent( + table_name="test-table", config_key="test-key", region="us-east-1" + ) + + assert workflow_agent.get_agent_count() == 3 + + +class TestWorkflowAgentCustomTools: + """Test custom tools functionality at workflow level""" + + @patch("agents_loader.ToolsManager") + @patch("workflow_agent.Agent") + @patch("gaab_strands_common.base_agent.BedrockModel") + @patch("gaab_strands_common.wrap_tool_with_events") + @patch("workflow_agent.AgentsLoader") + @patch("workflow_agent.DynamoDBHelper") + @patch("workflow_agent.ToolsManager") + def test_load_workflow_custom_tools( + self, + mock_workflow_tools_manager_class, + mock_ddb_helper_class, + mock_agents_loader_class, + mock_wrap, + mock_bedrock_model, + mock_agent, + mock_agent_tools_manager, + sample_workflow_config_dict, + ): + """Test loading custom tools at workflow level""" + mock_ddb_helper = Mock() + mock_ddb_helper_class.return_value = mock_ddb_helper + + config_with_custom_tools = sample_workflow_config_dict.copy() + config_with_custom_tools["WorkflowParams"]["CustomTools"] = [ + {"ToolId": "workflow_calculator"}, + {"ToolId": "workflow_formatter"}, + ] + mock_ddb_helper.get_config.return_value = config_with_custom_tools + + mock_agents_loader = Mock() + mock_specialized_agent = Mock() + mock_specialized_agent.name = "SpecializedAgent" + mock_agents_loader.load_agents.return_value = [mock_specialized_agent] + mock_agents_loader_class.return_value = mock_agents_loader + + mock_model = Mock() + mock_model.model_id = "amazon.nova-pro-v1:0" + mock_bedrock_model.return_value = mock_model + + mock_client_agent = Mock() + mock_client_agent.name = "Test Workflow" + mock_agent.return_value = mock_client_agent + + mock_wrap.return_value = mock_specialized_agent + + mock_workflow_tools_manager = Mock() + mock_custom_tool_1 = Mock() + mock_custom_tool_2 = Mock() + mock_workflow_tools_manager.load_all_tools.return_value = [ + mock_custom_tool_1, + mock_custom_tool_2, + ] + mock_workflow_tools_manager_class.return_value = mock_workflow_tools_manager + + workflow_agent = WorkflowAgent( + table_name="test-table", config_key="test-key", region="us-east-1" + ) + + mock_workflow_tools_manager_class.assert_called() + mock_workflow_tools_manager.load_all_tools.assert_called_with( + mcp_servers=[], + strands_tool_ids=[], + custom_tool_ids=["workflow_calculator", "workflow_formatter"], + ) + + mock_agent.assert_called_once() + call_args = mock_agent.call_args[1] + # Should have 1 agent tool + 2 custom tools = 3 total tools + assert len(call_args["tools"]) == 3 + + @patch("agents_loader.ToolsManager") + @patch("workflow_agent.Agent") + @patch("gaab_strands_common.base_agent.BedrockModel") + @patch("gaab_strands_common.wrap_tool_with_events") + @patch("workflow_agent.AgentsLoader") + @patch("workflow_agent.DynamoDBHelper") + @patch("workflow_agent.ToolsManager") + def test_load_workflow_no_custom_tools( + self, + mock_workflow_tools_manager_class, + mock_ddb_helper_class, + mock_agents_loader_class, + mock_wrap, + mock_bedrock_model, + mock_agent, + mock_agent_tools_manager, + sample_workflow_config_dict, + ): + """Test workflow with no custom tools configured - ToolsManager should still be called""" + # Setup mocks + mock_ddb_helper = Mock() + mock_ddb_helper_class.return_value = mock_ddb_helper + mock_ddb_helper.get_config.return_value = sample_workflow_config_dict + + mock_agents_loader = Mock() + mock_specialized_agent = Mock() + mock_specialized_agent.name = "SpecializedAgent" + mock_agents_loader.load_agents.return_value = [mock_specialized_agent] + mock_agents_loader_class.return_value = mock_agents_loader + + mock_model = Mock() + mock_model.model_id = "amazon.nova-pro-v1:0" + mock_bedrock_model.return_value = mock_model + + mock_client_agent = Mock() + mock_client_agent.name = "Test Workflow" + mock_agent.return_value = mock_client_agent + + mock_wrap.return_value = mock_specialized_agent + + # Setup workflow-level tools manager - should return empty list (no custom tools, no auto-attach) + mock_workflow_tools_manager = Mock() + mock_workflow_tools_manager.load_all_tools.return_value = [] + mock_workflow_tools_manager_class.return_value = mock_workflow_tools_manager + + workflow_agent = WorkflowAgent( + table_name="test-table", config_key="test-key", region="us-east-1" + ) + + mock_workflow_tools_manager_class.assert_called_with("us-east-1", workflow_agent.config) + mock_workflow_tools_manager.load_all_tools.assert_called_with( + mcp_servers=[], + strands_tool_ids=[], + custom_tool_ids=[], # Empty because no custom tools configured + ) + + mock_agent.assert_called_once() + call_args = mock_agent.call_args[1] + # Should have 1 agent tool only (no custom tools were loaded) + assert len(call_args["tools"]) == 1 + + @patch("agents_loader.ToolsManager") + @patch("workflow_agent.Agent") + @patch("gaab_strands_common.base_agent.BedrockModel") + @patch("gaab_strands_common.wrap_tool_with_events") + @patch("workflow_agent.AgentsLoader") + @patch("workflow_agent.DynamoDBHelper") + @patch("workflow_agent.ToolsManager") + def test_multimodal_auto_attachment( + self, + mock_workflow_tools_manager_class, + mock_ddb_helper_class, + mock_agents_loader_class, + mock_wrap, + mock_bedrock_model, + mock_agent, + mock_agent_tools_manager, + sample_workflow_config_dict, + ): + """Test that multimodal tools are auto-attached when multimodal is enabled""" + mock_ddb_helper = Mock() + mock_ddb_helper_class.return_value = mock_ddb_helper + + config_with_multimodal = sample_workflow_config_dict.copy() + config_with_multimodal["LlmParams"]["MultimodalParams"] = {"MultimodalEnabled": True} + mock_ddb_helper.get_config.return_value = config_with_multimodal + + mock_agents_loader = Mock() + mock_specialized_agent = Mock() + mock_specialized_agent.name = "SpecializedAgent" + mock_agents_loader.load_agents.return_value = [mock_specialized_agent] + mock_agents_loader_class.return_value = mock_agents_loader + + mock_model = Mock() + mock_model.model_id = "amazon.nova-pro-v1:0" + mock_bedrock_model.return_value = mock_model + + mock_client_agent = Mock() + mock_client_agent.name = "Test Workflow" + mock_agent.return_value = mock_client_agent + + mock_wrap.return_value = mock_specialized_agent + + mock_workflow_tools_manager = Mock() + mock_auto_attached_tool = Mock() # This would be the S3FileReaderTool + mock_workflow_tools_manager.load_all_tools.return_value = [mock_auto_attached_tool] + mock_workflow_tools_manager_class.return_value = mock_workflow_tools_manager + + workflow_agent = WorkflowAgent( + table_name="test-table", config_key="test-key", region="us-east-1" + ) + + assert workflow_agent.config.llm_params.multimodal_params is not None + assert workflow_agent.config.llm_params.multimodal_params.multimodal_enabled is True + + mock_workflow_tools_manager_class.assert_called_with("us-east-1", workflow_agent.config) + mock_workflow_tools_manager.load_all_tools.assert_called_with( + mcp_servers=[], + strands_tool_ids=[], + custom_tool_ids=[], # No explicit custom tools, but auto-attachment should work + ) + + mock_agent.assert_called_once() + call_args = mock_agent.call_args[1] + # Should have 1 agent tool + 1 auto-attached tool = 2 total tools + assert len(call_args["tools"]) == 2 + + @patch("agents_loader.ToolsManager") + @patch("workflow_agent.Agent") + @patch("gaab_strands_common.base_agent.BedrockModel") + @patch("gaab_strands_common.wrap_tool_with_events") + @patch("workflow_agent.AgentsLoader") + @patch("workflow_agent.DynamoDBHelper") + @patch("workflow_agent.ToolsManager") + def test_comprehensive_multimodal_workflow_with_custom_tools_and_multiple_agents( + self, + mock_workflow_tools_manager_class, + mock_ddb_helper_class, + mock_agents_loader_class, + mock_wrap, + mock_bedrock_model, + mock_agent, + mock_agent_tools_manager, + multimodal_custom_tools_config, + ): + """Test comprehensive workflow: multimodal enabled, 1 custom tool, 2 agents""" + mock_ddb_helper = Mock() + mock_ddb_helper_class.return_value = mock_ddb_helper + mock_ddb_helper.get_config.return_value = multimodal_custom_tools_config + + # Setup agents loader with 2 agents + mock_agents_loader = Mock() + mock_agent_1 = Mock() + mock_agent_1.name = "DataAnalysisAgent" + mock_agent_2 = Mock() + mock_agent_2.name = "ReportGeneratorAgent" + mock_agents_loader.load_agents.return_value = [mock_agent_1, mock_agent_2] + mock_agents_loader_class.return_value = mock_agents_loader + + mock_model = Mock() + mock_model.model_id = "amazon.nova-pro-v1:0" + mock_bedrock_model.return_value = mock_model + + mock_client_agent = Mock() + mock_client_agent.name = "Comprehensive Workflow" + mock_agent.return_value = mock_client_agent + + # Mock wrap_tool_with_events to return the agents as tools + mock_wrap.side_effect = lambda x: x + + # Setup workflow-level tools manager - should load 1 custom tool + 1 auto-attached multimodal tool + mock_workflow_tools_manager = Mock() + mock_custom_tool = Mock() # workflow_orchestrator + mock_custom_tool.name = "workflow_orchestrator" + mock_auto_attached_tool = Mock() # S3FileReaderTool (auto-attached due to multimodal) + mock_auto_attached_tool.name = "s3_file_reader" + mock_workflow_tools_manager.load_all_tools.return_value = [ + mock_custom_tool, + mock_auto_attached_tool, + ] + mock_workflow_tools_manager_class.return_value = mock_workflow_tools_manager + + # Create workflow agent + workflow_agent = WorkflowAgent( + table_name="test-table", config_key="test-key", region="us-east-1" + ) + + assert workflow_agent.config.llm_params.multimodal_params is not None + assert workflow_agent.config.llm_params.multimodal_params.multimodal_enabled is True + + # Verify 2 agents were loaded + assert workflow_agent.get_agent_count() == 2 + mock_agents_loader.load_agents.assert_called_once() + loaded_agent_refs = mock_agents_loader.load_agents.call_args[0][0] + assert len(loaded_agent_refs) == 2 + assert loaded_agent_refs[0].use_case_name == "DataAnalysisAgent" + assert loaded_agent_refs[1].use_case_name == "ReportGeneratorAgent" + + # Verify workflow-level ToolsManager was called for custom tools + auto-attachment + mock_workflow_tools_manager_class.assert_called_with("us-east-1", workflow_agent.config) + mock_workflow_tools_manager.load_all_tools.assert_called_with( + mcp_servers=[], + strands_tool_ids=[], + custom_tool_ids=["workflow_orchestrator"], # 1 workflow-level custom tool + ) + + # Verify client agent was created with correct number of tools + mock_agent.assert_called_once() + call_args = mock_agent.call_args[1] + # Should have: + # - 2 agent tools (DataAnalysisAgent + ReportGeneratorAgent) + # - 2 workflow-level tools (1 custom tool + 1 auto-attached multimodal tool) + # Total: 4 tools + tools = call_args["tools"] + assert len(tools) == 4 + + assert call_args["name"] == "Comprehensive Workflow" + assert call_args["system_prompt"] == "You are a comprehensive workflow coordinator" + assert call_args["model"] == mock_model + + # Verify that the correct tool IDs were requested from ToolsManager + # This verifies the workflow-level custom tools were loaded + tools_manager_call = mock_workflow_tools_manager.load_all_tools.call_args[1] + assert tools_manager_call["custom_tool_ids"] == ["workflow_orchestrator"] + assert tools_manager_call["mcp_servers"] == [] + assert tools_manager_call["strands_tool_ids"] == [] + + +class TestWorkflowAgentFileHandling: + """Test file/content block handling in workflow agents""" + + @patch("agents_loader.ToolsManager") + @patch("workflow_agent.Agent") + @patch("gaab_strands_common.base_agent.BedrockModel") + @patch("gaab_strands_common.wrap_tool_with_events") + @patch("workflow_agent.AgentsLoader") + @patch("workflow_agent.DynamoDBHelper") + @patch("workflow_agent.ToolsManager") + def test_workflow_agent_with_multimodal_file_processing( + self, + mock_workflow_tools_manager_class, + mock_ddb_helper_class, + mock_agents_loader_class, + mock_wrap, + mock_bedrock_model, + mock_agent, + mock_agent_tools_manager, + sample_workflow_config_dict, + ): + """Test workflow agent handles multimodal requests with files and content blocks""" + mock_ddb_helper = Mock() + mock_ddb_helper_class.return_value = mock_ddb_helper + + # Enable multimodal in the workflow config + config_with_multimodal = sample_workflow_config_dict.copy() + config_with_multimodal["LlmParams"]["MultimodalParams"] = {"MultimodalEnabled": True} + mock_ddb_helper.get_config.return_value = config_with_multimodal + + # Setup agents loader + mock_agents_loader = Mock() + mock_specialized_agent = Mock() + mock_specialized_agent.name = "SpecializedAgent1" + mock_agents_loader.load_agents.return_value = [mock_specialized_agent] + mock_agents_loader_class.return_value = mock_agents_loader + + mock_model = Mock() + mock_model.model_id = "amazon.nova-pro-v1:0" + mock_bedrock_model.return_value = mock_model + + mock_client_agent = Mock() + mock_client_agent.name = "Test Workflow" + mock_agent.return_value = mock_client_agent + + mock_wrap.side_effect = lambda x: x + + # Setup workflow-level tools manager for auto-attachment + mock_workflow_tools_manager = Mock() + mock_s3_tool = Mock() + mock_s3_tool.name = "s3_file_reader" + mock_workflow_tools_manager.load_all_tools.return_value = [mock_s3_tool] + mock_workflow_tools_manager_class.return_value = mock_workflow_tools_manager + + # Create workflow agent + workflow_agent = WorkflowAgent( + table_name="test-table", config_key="test-key", region="us-east-1" + ) + + assert workflow_agent.config.llm_params.multimodal_params is not None + assert workflow_agent.config.llm_params.multimodal_params.multimodal_enabled is True + + # Verify S3FileReaderTool was auto-attached for file handling + mock_workflow_tools_manager_class.assert_called_with("us-east-1", workflow_agent.config) + mock_workflow_tools_manager.load_all_tools.assert_called_with( + mcp_servers=[], + strands_tool_ids=[], + custom_tool_ids=[], # No explicit custom tools, but auto-attachment should work + ) + + # Verify the workflow agent can handle file-based requests + # The S3FileReaderTool should be available for processing file content blocks + mock_agent.assert_called_once() + call_args = mock_agent.call_args[1] + tools = call_args["tools"] + + # Should have agent tools + auto-attached S3 tool for file handling + assert len(tools) >= 2 # At least 1 agent tool + 1 S3 tool + assert mock_s3_tool in tools # S3FileReaderTool for file processing + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/deployment/ecr/gaab-strands-workflow-agent/test/test_workflow_agent_memory.py b/deployment/ecr/gaab-strands-workflow-agent/test/test_workflow_agent_memory.py new file mode 100644 index 00000000..c7535ce5 --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/test/test_workflow_agent_memory.py @@ -0,0 +1,230 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Tests for WorkflowAgent memory functionality +""" + +import os +import sys +from unittest.mock import Mock, patch + +# Add src to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "src")) + +from workflow_agent import WorkflowAgent + + +class TestWorkflowAgentMemory: + """Test memory functionality in WorkflowAgent""" + + @patch('workflow_agent.DynamoDBHelper') + @patch('workflow_agent.AgentsLoader') + def test_workflow_agent_with_memory_enabled(self, mock_agents_loader, mock_ddb_helper): + """Test workflow agent initialization with memory enabled""" + + # Mock DynamoDB configuration + mock_config_dict = { + "UseCaseType": "Workflow", + "UseCaseName": "Test Workflow", + "LlmParams": { + "ModelProvider": "Bedrock", + "BedrockLlmParams": { + "ModelId": "anthropic.claude-3-sonnet-20240229-v1:0" + }, + "Temperature": 0.7, + "Streaming": True + }, + "WorkflowParams": { + "OrchestrationPattern": "agents-as-tools", + "SystemPrompt": "You are a helpful assistant", + "MemoryConfig": { + "LongTermEnabled": True + }, + "AgentsAsToolsParams": { + "Agents": [{ + "AgentId": "test-agent-1", + "AgentName": "TestAgent1", + "UseCaseId": "test-use-case-1", + "UseCaseType": "RAG", + "UseCaseName": "Test Agent", + "AgentBuilderParams": { + "SystemPrompt": "You are a test agent", + "Tools": [] + } + }] + } + } + } + + mock_ddb_helper_instance = Mock() + mock_ddb_helper_instance.get_config.return_value = mock_config_dict + mock_ddb_helper.return_value = mock_ddb_helper_instance + + # Mock agents loader + mock_agents_loader_instance = Mock() + mock_agents_loader_instance.load_agents.return_value = [] + mock_agents_loader.return_value = mock_agents_loader_instance + + # Mock session manager + mock_session_manager = Mock() + + # Create workflow agent with memory + with patch('workflow_agent.Agent') as mock_agent_class: + mock_agent_instance = Mock() + mock_agent_class.return_value = mock_agent_instance + + workflow_agent = WorkflowAgent( + table_name="test-table", + config_key="test-key", + region="us-east-1", + session_manager=mock_session_manager + ) + + # Verify session manager was stored + assert workflow_agent.session_manager == mock_session_manager + + # Verify agent was created with session manager + mock_agent_class.assert_called_once() + call_args = mock_agent_class.call_args + assert "session_manager" in call_args.kwargs + assert call_args.kwargs["session_manager"] == mock_session_manager + + @patch('workflow_agent.DynamoDBHelper') + @patch('workflow_agent.AgentsLoader') + def test_workflow_agent_with_memory_disabled(self, mock_agents_loader, mock_ddb_helper): + """Test workflow agent initialization with memory disabled""" + + # Mock DynamoDB configuration with memory disabled + mock_config_dict = { + "UseCaseType": "Workflow", + "UseCaseName": "Test Workflow", + "LlmParams": { + "ModelProvider": "Bedrock", + "BedrockLlmParams": { + "ModelId": "anthropic.claude-3-sonnet-20240229-v1:0" + }, + "Temperature": 0.7, + "Streaming": True + }, + "WorkflowParams": { + "OrchestrationPattern": "agents-as-tools", + "SystemPrompt": "You are a helpful assistant", + "MemoryConfig": { + "LongTermEnabled": False + }, + "AgentsAsToolsParams": { + "Agents": [{ + "AgentId": "test-agent-1", + "AgentName": "TestAgent1", + "UseCaseId": "test-use-case-1", + "UseCaseType": "RAG", + "UseCaseName": "Test Agent", + "AgentBuilderParams": { + "SystemPrompt": "You are a test agent", + "Tools": [] + } + }] + } + } + } + + mock_ddb_helper_instance = Mock() + mock_ddb_helper_instance.get_config.return_value = mock_config_dict + mock_ddb_helper.return_value = mock_ddb_helper_instance + + # Mock agents loader + mock_agents_loader_instance = Mock() + mock_agents_loader_instance.load_agents.return_value = [] + mock_agents_loader.return_value = mock_agents_loader_instance + + # Mock session manager + mock_session_manager = Mock() + + # Create workflow agent with memory disabled + with patch('workflow_agent.Agent') as mock_agent_class: + mock_agent_instance = Mock() + mock_agent_class.return_value = mock_agent_instance + + workflow_agent = WorkflowAgent( + table_name="test-table", + config_key="test-key", + region="us-east-1", + session_manager=mock_session_manager + ) + + # Verify session manager was stored but not used + assert workflow_agent.session_manager == mock_session_manager + + # Verify agent was created without session manager + mock_agent_class.assert_called_once() + call_args = mock_agent_class.call_args + assert "session_manager" not in call_args.kwargs + + @patch('workflow_agent.DynamoDBHelper') + @patch('workflow_agent.AgentsLoader') + def test_workflow_agent_without_session_manager(self, mock_agents_loader, mock_ddb_helper): + """Test workflow agent initialization without session manager""" + + # Mock DynamoDB configuration with memory enabled + mock_config_dict = { + "UseCaseType": "Workflow", + "UseCaseName": "Test Workflow", + "LlmParams": { + "ModelProvider": "Bedrock", + "BedrockLlmParams": { + "ModelId": "anthropic.claude-3-sonnet-20240229-v1:0" + }, + "Temperature": 0.7, + "Streaming": True + }, + "WorkflowParams": { + "OrchestrationPattern": "agents-as-tools", + "SystemPrompt": "You are a helpful assistant", + "MemoryConfig": { + "LongTermEnabled": True + }, + "AgentsAsToolsParams": { + "Agents": [{ + "AgentId": "test-agent-1", + "AgentName": "TestAgent1", + "UseCaseId": "test-use-case-1", + "UseCaseType": "RAG", + "UseCaseName": "Test Agent", + "AgentBuilderParams": { + "SystemPrompt": "You are a test agent", + "Tools": [] + } + }] + } + } + } + + mock_ddb_helper_instance = Mock() + mock_ddb_helper_instance.get_config.return_value = mock_config_dict + mock_ddb_helper.return_value = mock_ddb_helper_instance + + # Mock agents loader + mock_agents_loader_instance = Mock() + mock_agents_loader_instance.load_agents.return_value = [] + mock_agents_loader.return_value = mock_agents_loader_instance + + # Create workflow agent without session manager + with patch('workflow_agent.Agent') as mock_agent_class: + mock_agent_instance = Mock() + mock_agent_class.return_value = mock_agent_instance + + workflow_agent = WorkflowAgent( + table_name="test-table", + config_key="test-key", + region="us-east-1", + session_manager=None + ) + + # Verify no session manager + assert workflow_agent.session_manager is None + + # Verify agent was created without session manager + mock_agent_class.assert_called_once() + call_args = mock_agent_class.call_args + assert "session_manager" not in call_args.kwargs diff --git a/deployment/ecr/gaab-strands-workflow-agent/uv.lock b/deployment/ecr/gaab-strands-workflow-agent/uv.lock new file mode 100644 index 00000000..5b73ff94 --- /dev/null +++ b/deployment/ecr/gaab-strands-workflow-agent/uv.lock @@ -0,0 +1,2987 @@ +version = 1 +revision = 2 +requires-python = ">=3.13" + +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" }, +] + +[[package]] +name = "aiohttp" +version = "3.13.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohappyeyeballs" }, + { name = "aiosignal" }, + { name = "attrs" }, + { name = "frozenlist" }, + { name = "multidict" }, + { name = "propcache" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/62/f1/8515650ac3121a9e55c7b217c60e7fae3e0134b5acfe65691781b5356929/aiohttp-3.13.0.tar.gz", hash = "sha256:378dbc57dd8cf341ce243f13fa1fa5394d68e2e02c15cd5f28eae35a70ec7f67", size = 7832348, upload-time = "2025-10-06T19:58:48.089Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/2c/ac53efdc9c10e41399acc2395af98f835b86d0141d5c3820857eb9f6a14a/aiohttp-3.13.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:00243e51f16f6ec0fb021659d4af92f675f3cf9f9b39efd142aa3ad641d8d1e6", size = 730090, upload-time = "2025-10-06T19:56:16.858Z" }, + { url = "https://files.pythonhosted.org/packages/13/18/1ac95683e1c1d48ef4503965c96f5401618a04c139edae12e200392daae8/aiohttp-3.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:059978d2fddc462e9211362cbc8446747ecd930537fa559d3d25c256f032ff54", size = 488041, upload-time = "2025-10-06T19:56:18.659Z" }, + { url = "https://files.pythonhosted.org/packages/fd/79/ef0d477c771a642d1a881b92d226314c43d3c74bc674c93e12e679397a97/aiohttp-3.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:564b36512a7da3b386143c611867e3f7cfb249300a1bf60889bd9985da67ab77", size = 486989, upload-time = "2025-10-06T19:56:20.371Z" }, + { url = "https://files.pythonhosted.org/packages/37/b4/0e440481a0e77a551d6c5dcab5d11f1ff6b2b2ddb8dedc24f54f5caad732/aiohttp-3.13.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4aa995b9156ae499393d949a456a7ab0b994a8241a96db73a3b73c7a090eff6a", size = 1718331, upload-time = "2025-10-06T19:56:22.188Z" }, + { url = "https://files.pythonhosted.org/packages/e6/59/76c421cc4a75bb1aceadb92f20ee6f05a990aa6960c64b59e8e0d340e3f5/aiohttp-3.13.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:55ca0e95a3905f62f00900255ed807c580775174252999286f283e646d675a49", size = 1686263, upload-time = "2025-10-06T19:56:24.393Z" }, + { url = "https://files.pythonhosted.org/packages/ec/ac/5095f12a79c7775f402cfc3e83651b6e0a92ade10ddf7f2c78c4fed79f71/aiohttp-3.13.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:49ce7525853a981fc35d380aa2353536a01a9ec1b30979ea4e35966316cace7e", size = 1754265, upload-time = "2025-10-06T19:56:26.365Z" }, + { url = "https://files.pythonhosted.org/packages/05/d7/a48e4989bd76cc70600c505bbdd0d90ca1ad7f9053eceeb9dbcf9345a9ec/aiohttp-3.13.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2117be9883501eaf95503bd313eb4c7a23d567edd44014ba15835a1e9ec6d852", size = 1856486, upload-time = "2025-10-06T19:56:28.438Z" }, + { url = "https://files.pythonhosted.org/packages/1e/02/45b388b49e37933f316e1fb39c0de6fb1d77384b0c8f4cf6af5f2cbe3ea6/aiohttp-3.13.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d169c47e40c911f728439da853b6fd06da83761012e6e76f11cb62cddae7282b", size = 1737545, upload-time = "2025-10-06T19:56:30.688Z" }, + { url = "https://files.pythonhosted.org/packages/6c/a7/4fde058f1605c34a219348a83a99f14724cc64e68a42480fc03cf40f9ea3/aiohttp-3.13.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:703ad3f742fc81e543638a7bebddd35acadaa0004a5e00535e795f4b6f2c25ca", size = 1552958, upload-time = "2025-10-06T19:56:32.528Z" }, + { url = "https://files.pythonhosted.org/packages/d1/12/0bac4d29231981e3aa234e88d1931f6ba38135ff4c2cf3afbb7895527630/aiohttp-3.13.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5bf635c3476f4119b940cc8d94ad454cbe0c377e61b4527f0192aabeac1e9370", size = 1681166, upload-time = "2025-10-06T19:56:34.81Z" }, + { url = "https://files.pythonhosted.org/packages/71/95/b829eb5f8ac1ca1d8085bb8df614c8acf3ff32e23ad5ad1173c7c9761daa/aiohttp-3.13.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:cfe6285ef99e7ee51cef20609be2bc1dd0e8446462b71c9db8bb296ba632810a", size = 1710516, upload-time = "2025-10-06T19:56:36.787Z" }, + { url = "https://files.pythonhosted.org/packages/47/6d/15ccf4ef3c254d899f62580e0c7fc717014f4d14a3ac31771e505d2c736c/aiohttp-3.13.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:34d8af6391c5f2e69749d7f037b614b8c5c42093c251f336bdbfa4b03c57d6c4", size = 1731354, upload-time = "2025-10-06T19:56:38.659Z" }, + { url = "https://files.pythonhosted.org/packages/46/6a/8acf6c57e03b6fdcc8b4c06392e66abaff3213ea275e41db3edb20738d91/aiohttp-3.13.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:12f5d820fadc5848d4559ea838aef733cf37ed2a1103bba148ac2f5547c14c29", size = 1548040, upload-time = "2025-10-06T19:56:40.578Z" }, + { url = "https://files.pythonhosted.org/packages/75/7d/fbfd59ab2a83fe2578ce79ac3db49727b81e9f4c3376217ad09c03c6d279/aiohttp-3.13.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0f1338b61ea66f4757a0544ed8a02ccbf60e38d9cfb3225888888dd4475ebb96", size = 1756031, upload-time = "2025-10-06T19:56:42.492Z" }, + { url = "https://files.pythonhosted.org/packages/99/e7/cc9f0fdf06cab3ca61e6b62bff9a4b978b8ca736e9d76ddf54365673ab19/aiohttp-3.13.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:582770f82513419512da096e8df21ca44f86a2e56e25dc93c5ab4df0fe065bf0", size = 1714933, upload-time = "2025-10-06T19:56:45.542Z" }, + { url = "https://files.pythonhosted.org/packages/db/43/7abbe1de94748a58a71881163ee280fd3217db36e8344d109f63638fe16a/aiohttp-3.13.0-cp313-cp313-win32.whl", hash = "sha256:3194b8cab8dbc882f37c13ef1262e0a3d62064fa97533d3aa124771f7bf1ecee", size = 423799, upload-time = "2025-10-06T19:56:47.779Z" }, + { url = "https://files.pythonhosted.org/packages/c9/58/afab7f2b9e7df88c995995172eb78cae8a3d5a62d5681abaade86b3f0089/aiohttp-3.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:7897298b3eedc790257fef8a6ec582ca04e9dbe568ba4a9a890913b925b8ea21", size = 450138, upload-time = "2025-10-06T19:56:49.49Z" }, + { url = "https://files.pythonhosted.org/packages/fe/c1/93bb1e35cd0c4665bb422b1ca3d87b588f4bca2656bbe9292b963d5b76a9/aiohttp-3.13.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:c417f8c2e1137775569297c584a8a7144e5d1237789eae56af4faf1894a0b861", size = 733187, upload-time = "2025-10-06T19:56:51.385Z" }, + { url = "https://files.pythonhosted.org/packages/5e/36/2d50eba91992d3fe7a6452506ccdab45d03685ee8d8acaa5b289384a7d4c/aiohttp-3.13.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f84b53326abf8e56ebc28a35cebf4a0f396a13a76300f500ab11fe0573bf0b52", size = 488684, upload-time = "2025-10-06T19:56:53.25Z" }, + { url = "https://files.pythonhosted.org/packages/82/93/fa4b1d5ecdc7805bdf0815ef00257db4632ccf0a8bffd44f9fc4657b1677/aiohttp-3.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:990a53b9d6a30b2878789e490758e568b12b4a7fb2527d0c89deb9650b0e5813", size = 489255, upload-time = "2025-10-06T19:56:55.136Z" }, + { url = "https://files.pythonhosted.org/packages/05/0f/85241f0d158da5e24e8ac9d50c0849ed24f882cafc53dc95749ef85eef09/aiohttp-3.13.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c811612711e01b901e18964b3e5dec0d35525150f5f3f85d0aee2935f059910a", size = 1715914, upload-time = "2025-10-06T19:56:57.286Z" }, + { url = "https://files.pythonhosted.org/packages/ab/fc/c755590d6f6d2b5d1565c72d6ee658d3c30ec61acb18964d1e9bf991d9b5/aiohttp-3.13.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ee433e594d7948e760b5c2a78cc06ac219df33b0848793cf9513d486a9f90a52", size = 1665171, upload-time = "2025-10-06T19:56:59.688Z" }, + { url = "https://files.pythonhosted.org/packages/3a/de/caa61e213ff546b8815aef5e931d7eae1dbe8c840a3f11ec5aa41c5ae462/aiohttp-3.13.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:19bb08e56f57c215e9572cd65cb6f8097804412c54081d933997ddde3e5ac579", size = 1755124, upload-time = "2025-10-06T19:57:02.69Z" }, + { url = "https://files.pythonhosted.org/packages/fb/b7/40c3219dd2691aa35cf889b4fbb0c00e48a19092928707044bfe92068e01/aiohttp-3.13.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f27b7488144eb5dd9151cf839b195edd1569629d90ace4c5b6b18e4e75d1e63a", size = 1835949, upload-time = "2025-10-06T19:57:05.251Z" }, + { url = "https://files.pythonhosted.org/packages/57/e8/66e3c32841fc0e26a09539c377aa0f3bbf6deac1957ac5182cf276c5719c/aiohttp-3.13.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d812838c109757a11354a161c95708ae4199c4fd4d82b90959b20914c1d097f6", size = 1714276, upload-time = "2025-10-06T19:57:07.41Z" }, + { url = "https://files.pythonhosted.org/packages/6b/a5/c68e5b46ff0410fe3abfa508651b09372428f27036138beacf4ff6b7cb8c/aiohttp-3.13.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7c20db99da682f9180fa5195c90b80b159632fb611e8dbccdd99ba0be0970620", size = 1545929, upload-time = "2025-10-06T19:57:09.336Z" }, + { url = "https://files.pythonhosted.org/packages/7a/a6/4c97dc27f9935c0c0aa6e3e10e5b4548823ab5d056636bde374fcd297256/aiohttp-3.13.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:cf8b0870047900eb1f17f453b4b3953b8ffbf203ef56c2f346780ff930a4d430", size = 1679988, upload-time = "2025-10-06T19:57:11.367Z" }, + { url = "https://files.pythonhosted.org/packages/8e/1b/11f9c52fd72b786a47e796e6794883417280cdca8eb1032d8d0939928dfa/aiohttp-3.13.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:5b8a5557d5af3f4e3add52a58c4cf2b8e6e59fc56b261768866f5337872d596d", size = 1678031, upload-time = "2025-10-06T19:57:13.357Z" }, + { url = "https://files.pythonhosted.org/packages/ea/eb/948903d40505f3a25e53e051488d2714ded3afac1f961df135f2936680f9/aiohttp-3.13.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:052bcdd80c1c54b8a18a9ea0cd5e36f473dc8e38d51b804cea34841f677a9971", size = 1726184, upload-time = "2025-10-06T19:57:15.478Z" }, + { url = "https://files.pythonhosted.org/packages/44/14/c8ced38c7dfe80804dec17a671963ccf3cb282f12700ec70b1f689d8de7d/aiohttp-3.13.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:76484ba17b2832776581b7ab466d094e48eba74cb65a60aea20154dae485e8bd", size = 1542344, upload-time = "2025-10-06T19:57:17.611Z" }, + { url = "https://files.pythonhosted.org/packages/a4/6e/f2e6bff550a51fd7c45fdab116a1dab7cc502e5d942956f10fc5c626bb15/aiohttp-3.13.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:62d8a0adcdaf62ee56bfb37737153251ac8e4b27845b3ca065862fb01d99e247", size = 1740913, upload-time = "2025-10-06T19:57:19.821Z" }, + { url = "https://files.pythonhosted.org/packages/da/00/8f057300d9b598a706348abb375b3de9a253195fb615f17c0b2be2a72836/aiohttp-3.13.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5004d727499ecb95f7c9147dd0bfc5b5670f71d355f0bd26d7af2d3af8e07d2f", size = 1695535, upload-time = "2025-10-06T19:57:21.856Z" }, + { url = "https://files.pythonhosted.org/packages/8a/ab/6919d584d8f053a14b15f0bfa3f315b3f548435c2142145459da2efa8673/aiohttp-3.13.0-cp314-cp314-win32.whl", hash = "sha256:a1c20c26af48aea984f63f96e5d7af7567c32cb527e33b60a0ef0a6313cf8b03", size = 429548, upload-time = "2025-10-06T19:57:24.285Z" }, + { url = "https://files.pythonhosted.org/packages/c5/59/5d9e78de6132079066f5077d9687bf524f764a2f8207e04d8d68790060c6/aiohttp-3.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:56f7d230ec66e799fbfd8350e9544f8a45a4353f1cf40c1fea74c1780f555b8f", size = 455548, upload-time = "2025-10-06T19:57:26.136Z" }, + { url = "https://files.pythonhosted.org/packages/7c/ea/7d98da03d1e9798bb99c3ca4963229150d45c9b7a3a16210c5b4a5f89e07/aiohttp-3.13.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:2fd35177dc483ae702f07b86c782f4f4b100a8ce4e7c5778cea016979023d9fd", size = 765319, upload-time = "2025-10-06T19:57:28.278Z" }, + { url = "https://files.pythonhosted.org/packages/5c/02/37f29beced8213bb467c52ad509a5e3b41e6e967de2f6eaf7f8db63bea54/aiohttp-3.13.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:4df1984c8804ed336089e88ac81a9417b1fd0db7c6f867c50a9264488797e778", size = 502567, upload-time = "2025-10-06T19:57:30.273Z" }, + { url = "https://files.pythonhosted.org/packages/e7/22/b0afcafcfe3637bc8d7992abf08ee9452018366c0801e4e7d4efda2ed839/aiohttp-3.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e68c0076052dd911a81d3acc4ef2911cc4ef65bf7cadbfbc8ae762da24da858f", size = 507078, upload-time = "2025-10-06T19:57:32.619Z" }, + { url = "https://files.pythonhosted.org/packages/49/4c/046c847b7a1993b49f3855cc3b97872d5df193d9240de835d0dc6a97b164/aiohttp-3.13.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bc95c49853cd29613e4fe4ff96d73068ff89b89d61e53988442e127e8da8e7ba", size = 1862115, upload-time = "2025-10-06T19:57:34.758Z" }, + { url = "https://files.pythonhosted.org/packages/1a/25/1449a59e3c6405da5e47b0138ee0855414dc12a8c306685d7fc3dd300e1f/aiohttp-3.13.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3b3bdc89413117b40cc39baae08fd09cbdeb839d421c4e7dce6a34f6b54b3ac1", size = 1717147, upload-time = "2025-10-06T19:57:36.938Z" }, + { url = "https://files.pythonhosted.org/packages/23/8f/50cc34ad267b38608f21c6a74327015dd08a66f1dd8e7ceac954d0953191/aiohttp-3.13.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3e77a729df23be2116acc4e9de2767d8e92445fbca68886dd991dc912f473755", size = 1841443, upload-time = "2025-10-06T19:57:39.708Z" }, + { url = "https://files.pythonhosted.org/packages/df/b9/b3ab1278faa0d1b8f434c85f9cf34eeb0a25016ffe1ee6bc361d09fef0ec/aiohttp-3.13.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e88ab34826d6eeb6c67e6e92400b9ec653faf5092a35f07465f44c9f1c429f82", size = 1933652, upload-time = "2025-10-06T19:57:42.33Z" }, + { url = "https://files.pythonhosted.org/packages/88/e2/86050aaa3bd7021b115cdfc88477b754e8cf93ef0079867840eee22d3c34/aiohttp-3.13.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:019dbef24fe28ce2301419dd63a2b97250d9760ca63ee2976c2da2e3f182f82e", size = 1790682, upload-time = "2025-10-06T19:57:44.851Z" }, + { url = "https://files.pythonhosted.org/packages/78/8d/9af903324c2ba24a0c4778e9bcc738b773c98dded3a4fcf8041d5211769f/aiohttp-3.13.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:2c4aeaedd20771b7b4bcdf0ae791904445df6d856c02fc51d809d12d17cffdc7", size = 1622011, upload-time = "2025-10-06T19:57:47.025Z" }, + { url = "https://files.pythonhosted.org/packages/84/97/5174971ba4986d913554ceb248b0401eb5358cb60672ea0166f9f596cd08/aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b3a8e6a2058a0240cfde542b641d0e78b594311bc1a710cbcb2e1841417d5cb3", size = 1787148, upload-time = "2025-10-06T19:57:49.149Z" }, + { url = "https://files.pythonhosted.org/packages/dd/ae/8b397e980ac613ef3ddd8e996aa7a40a1828df958257800d4bb325657db3/aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:f8e38d55ca36c15f36d814ea414ecb2401d860de177c49f84a327a25b3ee752b", size = 1774816, upload-time = "2025-10-06T19:57:51.523Z" }, + { url = "https://files.pythonhosted.org/packages/c7/54/0e8e2111dd92051c787e934b6bbf30c213daaa5e7ee5f51bca8913607492/aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:a921edbe971aade1bf45bcbb3494e30ba6863a5c78f28be992c42de980fd9108", size = 1788610, upload-time = "2025-10-06T19:57:54.337Z" }, + { url = "https://files.pythonhosted.org/packages/fa/dd/c9283dbfd9325ed6fa6c91f009db6344d8d370a7bcf09f36e7b2fcbfae02/aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:474cade59a447cb4019c0dce9f0434bf835fb558ea932f62c686fe07fe6db6a1", size = 1615498, upload-time = "2025-10-06T19:57:56.604Z" }, + { url = "https://files.pythonhosted.org/packages/8c/f6/da76230679bd9ef175d876093f89e7fd6d6476c18505e115e3026fe5ef95/aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:99a303ad960747c33b65b1cb65d01a62ac73fa39b72f08a2e1efa832529b01ed", size = 1815187, upload-time = "2025-10-06T19:57:59.036Z" }, + { url = "https://files.pythonhosted.org/packages/d5/78/394003ac738703822616f4f922705b54e5b3d8e7185831ecc1c97904174d/aiohttp-3.13.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:bb34001fc1f05f6b323e02c278090c07a47645caae3aa77ed7ed8a3ce6abcce9", size = 1760281, upload-time = "2025-10-06T19:58:01.585Z" }, + { url = "https://files.pythonhosted.org/packages/bd/b0/4bad0a9dd5910bd01c3119f8bd3d71887cd412d4105e4acddcdacf3cfa76/aiohttp-3.13.0-cp314-cp314t-win32.whl", hash = "sha256:dea698b64235d053def7d2f08af9302a69fcd760d1c7bd9988fd5d3b6157e657", size = 462608, upload-time = "2025-10-06T19:58:03.674Z" }, + { url = "https://files.pythonhosted.org/packages/bd/af/ad12d592f623aae2bd1d3463201dc39c201ea362f9ddee0d03efd9e83720/aiohttp-3.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:1f164699a060c0b3616459d13c1464a981fddf36f892f0a5027cbd45121fb14b", size = 496010, upload-time = "2025-10-06T19:58:05.589Z" }, +] + +[[package]] +name = "aiosignal" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "frozenlist" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "sniffio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c6/78/7d432127c41b50bccba979505f272c16cbcadcc33645d5fa3a738110ae75/anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4", size = 219094, upload-time = "2025-09-23T09:19:12.58Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/b3/9b1a8074496371342ec1e796a96f99c82c945a339cd81a8e73de28b4cf9e/anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc", size = 109097, upload-time = "2025-09-23T09:19:10.601Z" }, +] + +[[package]] +name = "asgiref" +version = "3.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/46/08/4dfec9b90758a59acc6be32ac82e98d1fbfc321cb5cfa410436dbacf821c/asgiref-3.10.0.tar.gz", hash = "sha256:d89f2d8cd8b56dada7d52fa7dc8075baa08fb836560710d38c292a7a3f78c04e", size = 37483, upload-time = "2025-10-05T09:15:06.557Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/9c/fc2331f538fbf7eedba64b2052e99ccf9ba9d6888e2f41441ee28847004b/asgiref-3.10.0-py3-none-any.whl", hash = "sha256:aef8a81283a34d0ab31630c9b7dfe70c812c95eba78171367ca8745e88124734", size = 24050, upload-time = "2025-10-05T09:15:05.11Z" }, +] + +[[package]] +name = "attrs" +version = "25.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, +] + +[[package]] +name = "aws-opentelemetry-distro" +version = "0.12.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-distro" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-exporter-otlp-proto-grpc" }, + { name = "opentelemetry-exporter-otlp-proto-http" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-aio-pika" }, + { name = "opentelemetry-instrumentation-aiohttp-client" }, + { name = "opentelemetry-instrumentation-aiopg" }, + { name = "opentelemetry-instrumentation-asgi" }, + { name = "opentelemetry-instrumentation-asyncpg" }, + { name = "opentelemetry-instrumentation-aws-lambda" }, + { name = "opentelemetry-instrumentation-boto" }, + { name = "opentelemetry-instrumentation-boto3sqs" }, + { name = "opentelemetry-instrumentation-botocore" }, + { name = "opentelemetry-instrumentation-cassandra" }, + { name = "opentelemetry-instrumentation-celery" }, + { name = "opentelemetry-instrumentation-confluent-kafka" }, + { name = "opentelemetry-instrumentation-dbapi" }, + { name = "opentelemetry-instrumentation-django" }, + { name = "opentelemetry-instrumentation-elasticsearch" }, + { name = "opentelemetry-instrumentation-falcon" }, + { name = "opentelemetry-instrumentation-fastapi" }, + { name = "opentelemetry-instrumentation-flask" }, + { name = "opentelemetry-instrumentation-grpc" }, + { name = "opentelemetry-instrumentation-httpx" }, + { name = "opentelemetry-instrumentation-jinja2" }, + { name = "opentelemetry-instrumentation-kafka-python" }, + { name = "opentelemetry-instrumentation-logging" }, + { name = "opentelemetry-instrumentation-mysql" }, + { name = "opentelemetry-instrumentation-mysqlclient" }, + { name = "opentelemetry-instrumentation-pika" }, + { name = "opentelemetry-instrumentation-psycopg2" }, + { name = "opentelemetry-instrumentation-pymemcache" }, + { name = "opentelemetry-instrumentation-pymongo" }, + { name = "opentelemetry-instrumentation-pymysql" }, + { name = "opentelemetry-instrumentation-pyramid" }, + { name = "opentelemetry-instrumentation-redis" }, + { name = "opentelemetry-instrumentation-remoulade" }, + { name = "opentelemetry-instrumentation-requests" }, + { name = "opentelemetry-instrumentation-sqlalchemy" }, + { name = "opentelemetry-instrumentation-sqlite3" }, + { name = "opentelemetry-instrumentation-starlette" }, + { name = "opentelemetry-instrumentation-system-metrics" }, + { name = "opentelemetry-instrumentation-tornado" }, + { name = "opentelemetry-instrumentation-tortoiseorm" }, + { name = "opentelemetry-instrumentation-urllib" }, + { name = "opentelemetry-instrumentation-urllib3" }, + { name = "opentelemetry-instrumentation-wsgi" }, + { name = "opentelemetry-processor-baggage" }, + { name = "opentelemetry-propagator-aws-xray" }, + { name = "opentelemetry-propagator-b3" }, + { name = "opentelemetry-propagator-jaeger" }, + { name = "opentelemetry-propagator-ot-trace" }, + { name = "opentelemetry-sdk" }, + { name = "opentelemetry-sdk-extension-aws" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/f3/edd092108c7da10fbf36ccfca0c242b37041d81536b4bfd03a2c4b403cc0/aws_opentelemetry_distro-0.12.2-py3-none-any.whl", hash = "sha256:2acbdbc6f4eb04375e18febd01e7363f56600bc2a2f6009e7e40beefc1f57112", size = 105336, upload-time = "2025-10-24T22:03:39.085Z" }, +] + +[[package]] +name = "aws-requests-auth" +version = "0.4.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/54/b2/455c0bfcbd772dafd4c9e93c4b713e36790abf9ccbca9b8e661968b29798/aws-requests-auth-0.4.3.tar.gz", hash = "sha256:33593372018b960a31dbbe236f89421678b885c35f0b6a7abfae35bb77e069b2", size = 10096, upload-time = "2020-05-27T23:10:34.742Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/11/5dc8be418e1d54bed15eaf3a7461797e5ebb9e6a34869ad750561f35fa5b/aws_requests_auth-0.4.3-py2.py3-none-any.whl", hash = "sha256:646bc37d62140ea1c709d20148f5d43197e6bd2d63909eb36fa4bb2345759977", size = 6838, upload-time = "2020-05-27T23:10:33.658Z" }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.14.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/77/e9/df2358efd7659577435e2177bfa69cba6c33216681af51a707193dec162a/beautifulsoup4-4.14.2.tar.gz", hash = "sha256:2a98ab9f944a11acee9cc848508ec28d9228abfd522ef0fad6a02a72e0ded69e", size = 625822, upload-time = "2025-09-29T10:05:42.613Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/fe/3aed5d0be4d404d12d36ab97e2f1791424d9ca39c2f754a6285d59a3b01d/beautifulsoup4-4.14.2-py3-none-any.whl", hash = "sha256:5ef6fa3a8cbece8488d66985560f97ed091e22bbc4e9c2338508a9d5de6d4515", size = 106392, upload-time = "2025-09-29T10:05:43.771Z" }, +] + +[[package]] +name = "bedrock-agentcore" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "boto3" }, + { name = "botocore" }, + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, + { name = "urllib3" }, + { name = "uvicorn" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b0/87/4c0bacf09430e559657fc986cbb1003f76d597ab7e7365ab247dbef73940/bedrock_agentcore-0.1.7.tar.gz", hash = "sha256:e518e8f5e6fb5a5a80182db95757a20e32b0ac2b33d0a1909dfafcba950c6356", size = 263080, upload-time = "2025-10-01T16:18:39.255Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/f3/a9d961cfba236dc85f27f2f2c6eab88e12698754aaa02459ba7dfafc5062/bedrock_agentcore-0.1.7-py3-none-any.whl", hash = "sha256:441dde64fea596e9571e47ae37ee3b033e58d8d255018f13bdcde8ae8bef2075", size = 77216, upload-time = "2025-10-01T16:18:38.153Z" }, +] + +[[package]] +name = "black" +version = "25.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "mypy-extensions" }, + { name = "packaging" }, + { name = "pathspec" }, + { name = "platformdirs" }, + { name = "pytokens" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/43/20b5c90612d7bdb2bdbcceeb53d588acca3bb8f0e4c5d5c751a2c8fdd55a/black-25.9.0.tar.gz", hash = "sha256:0474bca9a0dd1b51791fcc507a4e02078a1c63f6d4e4ae5544b9848c7adfb619", size = 648393, upload-time = "2025-09-19T00:27:37.758Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/99/3acfea65f5e79f45472c45f87ec13037b506522719cd9d4ac86484ff51ac/black-25.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0172a012f725b792c358d57fe7b6b6e8e67375dd157f64fa7a3097b3ed3e2175", size = 1742165, upload-time = "2025-09-19T00:34:10.402Z" }, + { url = "https://files.pythonhosted.org/packages/3a/18/799285282c8236a79f25d590f0222dbd6850e14b060dfaa3e720241fd772/black-25.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3bec74ee60f8dfef564b573a96b8930f7b6a538e846123d5ad77ba14a8d7a64f", size = 1581259, upload-time = "2025-09-19T00:32:49.685Z" }, + { url = "https://files.pythonhosted.org/packages/f1/ce/883ec4b6303acdeca93ee06b7622f1fa383c6b3765294824165d49b1a86b/black-25.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b756fc75871cb1bcac5499552d771822fd9db5a2bb8db2a7247936ca48f39831", size = 1655583, upload-time = "2025-09-19T00:30:44.505Z" }, + { url = "https://files.pythonhosted.org/packages/21/17/5c253aa80a0639ccc427a5c7144534b661505ae2b5a10b77ebe13fa25334/black-25.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:846d58e3ce7879ec1ffe816bb9df6d006cd9590515ed5d17db14e17666b2b357", size = 1343428, upload-time = "2025-09-19T00:32:13.839Z" }, + { url = "https://files.pythonhosted.org/packages/1b/46/863c90dcd3f9d41b109b7f19032ae0db021f0b2a81482ba0a1e28c84de86/black-25.9.0-py3-none-any.whl", hash = "sha256:474b34c1342cdc157d307b56c4c65bce916480c4a8f6551fdc6bf9b486a7c4ae", size = 203363, upload-time = "2025-09-19T00:27:35.724Z" }, +] + +[[package]] +name = "boto3" +version = "1.40.52" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, + { name = "jmespath" }, + { name = "s3transfer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a0/5a/8ba08c979926326d961e2384d994d789a2eda3ed281bb6cb333b36e92310/boto3-1.40.52.tar.gz", hash = "sha256:96ee720b52be647d8ef5ba92fccfce6b65d6321769430fe6edd10d57ec43c25b", size = 111530, upload-time = "2025-10-14T20:32:12.226Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/d2/879e9787c5263aefc5c88f0dd97cdea29ac01c480dd53c2421de77a493f7/boto3-1.40.52-py3-none-any.whl", hash = "sha256:ecc8c99d3cc96716cdfba62d9c9c6ce0eb98d02494a66690bcc2ec181c1ced67", size = 139345, upload-time = "2025-10-14T20:32:10.801Z" }, +] + +[[package]] +name = "botocore" +version = "1.40.52" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jmespath" }, + { name = "python-dateutil" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/74/3449d77c002d82586786b91dff6dd2e6fd52c5cdc1793d1ac7ea690ea52c/botocore-1.40.52.tar.gz", hash = "sha256:b65d970ca4ccd869639332083da17c3a933bcf495120dcc4f5c7723cb3f6216c", size = 14427680, upload-time = "2025-10-14T20:32:03.065Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/ad/559dc4097fe1368e5f3abb5d8ca496f9c609e4e452498bca11134fde1462/botocore-1.40.52-py3-none-any.whl", hash = "sha256:838697a06c7713df8d39f088105334b4eadcc3d65c7a260bf1a1bd8bf616ce4a", size = 14098823, upload-time = "2025-10-14T20:32:00.094Z" }, +] + +[[package]] +name = "certifi" +version = "2025.10.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4c/5b/b6ce21586237c77ce67d01dc5507039d444b630dd76611bbca2d8e5dcd91/certifi-2025.10.5.tar.gz", hash = "sha256:47c09d31ccf2acf0be3f701ea53595ee7e0b8fa08801c6624be771df09ae7b43", size = 164519, upload-time = "2025-10-05T04:12:15.808Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/37/af0d2ef3967ac0d6113837b44a4f0bfe1328c2b9763bd5b1744520e5cfed/certifi-2025.10.5-py3-none-any.whl", hash = "sha256:0f212c2744a9bb6de0c56639a6f68afe01ecd92d91f14ae897c4fe7bbeeef0de", size = 163286, upload-time = "2025-10-05T04:12:14.03Z" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, + { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, + { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, + { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, + { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, + { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, + { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, + { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, + { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, + { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, + { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, + { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, + { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, + { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, + { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, + { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, + { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, + { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, + { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, + { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, + { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, + { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, + { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, + { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, + { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, + { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, + { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, + { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, + { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "click" +version = "8.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/61/de6cd827efad202d7057d93e0fed9294b96952e188f7384832791c7b2254/click-8.3.0.tar.gz", hash = "sha256:e7b8232224eba16f4ebe410c25ced9f7875cb5f3263ffc93cc3e8da705e229c4", size = 276943, upload-time = "2025-09-18T17:32:23.696Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/d3/9dcc0f5797f070ec8edf30fbadfb200e71d9db6b84d211e3b2085a7589a0/click-8.3.0-py3-none-any.whl", hash = "sha256:9b9f285302c6e3064f4330c05f05b81945b2a39544279343e6e7c5f27a9baddc", size = 107295, upload-time = "2025-09-18T17:32:22.42Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "coverage" +version = "7.10.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/51/26/d22c300112504f5f9a9fd2297ce33c35f3d353e4aeb987c8419453b2a7c2/coverage-7.10.7.tar.gz", hash = "sha256:f4ab143ab113be368a3e9b795f9cd7906c5ef407d6173fe9675a902e1fffc239", size = 827704, upload-time = "2025-09-21T20:03:56.815Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/94/b765c1abcb613d103b64fcf10395f54d69b0ef8be6a0dd9c524384892cc7/coverage-7.10.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:981a651f543f2854abd3b5fcb3263aac581b18209be49863ba575de6edf4c14d", size = 218320, upload-time = "2025-09-21T20:01:56.629Z" }, + { url = "https://files.pythonhosted.org/packages/72/4f/732fff31c119bb73b35236dd333030f32c4bfe909f445b423e6c7594f9a2/coverage-7.10.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:73ab1601f84dc804f7812dc297e93cd99381162da39c47040a827d4e8dafe63b", size = 218575, upload-time = "2025-09-21T20:01:58.203Z" }, + { url = "https://files.pythonhosted.org/packages/87/02/ae7e0af4b674be47566707777db1aa375474f02a1d64b9323e5813a6cdd5/coverage-7.10.7-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a8b6f03672aa6734e700bbcd65ff050fd19cddfec4b031cc8cf1c6967de5a68e", size = 249568, upload-time = "2025-09-21T20:01:59.748Z" }, + { url = "https://files.pythonhosted.org/packages/a2/77/8c6d22bf61921a59bce5471c2f1f7ac30cd4ac50aadde72b8c48d5727902/coverage-7.10.7-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10b6ba00ab1132a0ce4428ff68cf50a25efd6840a42cdf4239c9b99aad83be8b", size = 252174, upload-time = "2025-09-21T20:02:01.192Z" }, + { url = "https://files.pythonhosted.org/packages/b1/20/b6ea4f69bbb52dac0aebd62157ba6a9dddbfe664f5af8122dac296c3ee15/coverage-7.10.7-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c79124f70465a150e89340de5963f936ee97097d2ef76c869708c4248c63ca49", size = 253447, upload-time = "2025-09-21T20:02:02.701Z" }, + { url = "https://files.pythonhosted.org/packages/f9/28/4831523ba483a7f90f7b259d2018fef02cb4d5b90bc7c1505d6e5a84883c/coverage-7.10.7-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:69212fbccdbd5b0e39eac4067e20a4a5256609e209547d86f740d68ad4f04911", size = 249779, upload-time = "2025-09-21T20:02:04.185Z" }, + { url = "https://files.pythonhosted.org/packages/a7/9f/4331142bc98c10ca6436d2d620c3e165f31e6c58d43479985afce6f3191c/coverage-7.10.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7ea7c6c9d0d286d04ed3541747e6597cbe4971f22648b68248f7ddcd329207f0", size = 251604, upload-time = "2025-09-21T20:02:06.034Z" }, + { url = "https://files.pythonhosted.org/packages/ce/60/bda83b96602036b77ecf34e6393a3836365481b69f7ed7079ab85048202b/coverage-7.10.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b9be91986841a75042b3e3243d0b3cb0b2434252b977baaf0cd56e960fe1e46f", size = 249497, upload-time = "2025-09-21T20:02:07.619Z" }, + { url = "https://files.pythonhosted.org/packages/5f/af/152633ff35b2af63977edd835d8e6430f0caef27d171edf2fc76c270ef31/coverage-7.10.7-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:b281d5eca50189325cfe1f365fafade89b14b4a78d9b40b05ddd1fc7d2a10a9c", size = 249350, upload-time = "2025-09-21T20:02:10.34Z" }, + { url = "https://files.pythonhosted.org/packages/9d/71/d92105d122bd21cebba877228990e1646d862e34a98bb3374d3fece5a794/coverage-7.10.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:99e4aa63097ab1118e75a848a28e40d68b08a5e19ce587891ab7fd04475e780f", size = 251111, upload-time = "2025-09-21T20:02:12.122Z" }, + { url = "https://files.pythonhosted.org/packages/a2/9e/9fdb08f4bf476c912f0c3ca292e019aab6712c93c9344a1653986c3fd305/coverage-7.10.7-cp313-cp313-win32.whl", hash = "sha256:dc7c389dce432500273eaf48f410b37886be9208b2dd5710aaf7c57fd442c698", size = 220746, upload-time = "2025-09-21T20:02:13.919Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b1/a75fd25df44eab52d1931e89980d1ada46824c7a3210be0d3c88a44aaa99/coverage-7.10.7-cp313-cp313-win_amd64.whl", hash = "sha256:cac0fdca17b036af3881a9d2729a850b76553f3f716ccb0360ad4dbc06b3b843", size = 221541, upload-time = "2025-09-21T20:02:15.57Z" }, + { url = "https://files.pythonhosted.org/packages/14/3a/d720d7c989562a6e9a14b2c9f5f2876bdb38e9367126d118495b89c99c37/coverage-7.10.7-cp313-cp313-win_arm64.whl", hash = "sha256:4b6f236edf6e2f9ae8fcd1332da4e791c1b6ba0dc16a2dc94590ceccb482e546", size = 220170, upload-time = "2025-09-21T20:02:17.395Z" }, + { url = "https://files.pythonhosted.org/packages/bb/22/e04514bf2a735d8b0add31d2b4ab636fc02370730787c576bb995390d2d5/coverage-7.10.7-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a0ec07fd264d0745ee396b666d47cef20875f4ff2375d7c4f58235886cc1ef0c", size = 219029, upload-time = "2025-09-21T20:02:18.936Z" }, + { url = "https://files.pythonhosted.org/packages/11/0b/91128e099035ece15da3445d9015e4b4153a6059403452d324cbb0a575fa/coverage-7.10.7-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:dd5e856ebb7bfb7672b0086846db5afb4567a7b9714b8a0ebafd211ec7ce6a15", size = 219259, upload-time = "2025-09-21T20:02:20.44Z" }, + { url = "https://files.pythonhosted.org/packages/8b/51/66420081e72801536a091a0c8f8c1f88a5c4bf7b9b1bdc6222c7afe6dc9b/coverage-7.10.7-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f57b2a3c8353d3e04acf75b3fed57ba41f5c0646bbf1d10c7c282291c97936b4", size = 260592, upload-time = "2025-09-21T20:02:22.313Z" }, + { url = "https://files.pythonhosted.org/packages/5d/22/9b8d458c2881b22df3db5bb3e7369e63d527d986decb6c11a591ba2364f7/coverage-7.10.7-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1ef2319dd15a0b009667301a3f84452a4dc6fddfd06b0c5c53ea472d3989fbf0", size = 262768, upload-time = "2025-09-21T20:02:24.287Z" }, + { url = "https://files.pythonhosted.org/packages/f7/08/16bee2c433e60913c610ea200b276e8eeef084b0d200bdcff69920bd5828/coverage-7.10.7-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:83082a57783239717ceb0ad584de3c69cf581b2a95ed6bf81ea66034f00401c0", size = 264995, upload-time = "2025-09-21T20:02:26.133Z" }, + { url = "https://files.pythonhosted.org/packages/20/9d/e53eb9771d154859b084b90201e5221bca7674ba449a17c101a5031d4054/coverage-7.10.7-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:50aa94fb1fb9a397eaa19c0d5ec15a5edd03a47bf1a3a6111a16b36e190cff65", size = 259546, upload-time = "2025-09-21T20:02:27.716Z" }, + { url = "https://files.pythonhosted.org/packages/ad/b0/69bc7050f8d4e56a89fb550a1577d5d0d1db2278106f6f626464067b3817/coverage-7.10.7-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2120043f147bebb41c85b97ac45dd173595ff14f2a584f2963891cbcc3091541", size = 262544, upload-time = "2025-09-21T20:02:29.216Z" }, + { url = "https://files.pythonhosted.org/packages/ef/4b/2514b060dbd1bc0aaf23b852c14bb5818f244c664cb16517feff6bb3a5ab/coverage-7.10.7-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2fafd773231dd0378fdba66d339f84904a8e57a262f583530f4f156ab83863e6", size = 260308, upload-time = "2025-09-21T20:02:31.226Z" }, + { url = "https://files.pythonhosted.org/packages/54/78/7ba2175007c246d75e496f64c06e94122bdb914790a1285d627a918bd271/coverage-7.10.7-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:0b944ee8459f515f28b851728ad224fa2d068f1513ef6b7ff1efafeb2185f999", size = 258920, upload-time = "2025-09-21T20:02:32.823Z" }, + { url = "https://files.pythonhosted.org/packages/c0/b3/fac9f7abbc841409b9a410309d73bfa6cfb2e51c3fada738cb607ce174f8/coverage-7.10.7-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4b583b97ab2e3efe1b3e75248a9b333bd3f8b0b1b8e5b45578e05e5850dfb2c2", size = 261434, upload-time = "2025-09-21T20:02:34.86Z" }, + { url = "https://files.pythonhosted.org/packages/ee/51/a03bec00d37faaa891b3ff7387192cef20f01604e5283a5fabc95346befa/coverage-7.10.7-cp313-cp313t-win32.whl", hash = "sha256:2a78cd46550081a7909b3329e2266204d584866e8d97b898cd7fb5ac8d888b1a", size = 221403, upload-time = "2025-09-21T20:02:37.034Z" }, + { url = "https://files.pythonhosted.org/packages/53/22/3cf25d614e64bf6d8e59c7c669b20d6d940bb337bdee5900b9ca41c820bb/coverage-7.10.7-cp313-cp313t-win_amd64.whl", hash = "sha256:33a5e6396ab684cb43dc7befa386258acb2d7fae7f67330ebb85ba4ea27938eb", size = 222469, upload-time = "2025-09-21T20:02:39.011Z" }, + { url = "https://files.pythonhosted.org/packages/49/a1/00164f6d30d8a01c3c9c48418a7a5be394de5349b421b9ee019f380df2a0/coverage-7.10.7-cp313-cp313t-win_arm64.whl", hash = "sha256:86b0e7308289ddde73d863b7683f596d8d21c7d8664ce1dee061d0bcf3fbb4bb", size = 220731, upload-time = "2025-09-21T20:02:40.939Z" }, + { url = "https://files.pythonhosted.org/packages/23/9c/5844ab4ca6a4dd97a1850e030a15ec7d292b5c5cb93082979225126e35dd/coverage-7.10.7-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b06f260b16ead11643a5a9f955bd4b5fd76c1a4c6796aeade8520095b75de520", size = 218302, upload-time = "2025-09-21T20:02:42.527Z" }, + { url = "https://files.pythonhosted.org/packages/f0/89/673f6514b0961d1f0e20ddc242e9342f6da21eaba3489901b565c0689f34/coverage-7.10.7-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:212f8f2e0612778f09c55dd4872cb1f64a1f2b074393d139278ce902064d5b32", size = 218578, upload-time = "2025-09-21T20:02:44.468Z" }, + { url = "https://files.pythonhosted.org/packages/05/e8/261cae479e85232828fb17ad536765c88dd818c8470aca690b0ac6feeaa3/coverage-7.10.7-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3445258bcded7d4aa630ab8296dea4d3f15a255588dd535f980c193ab6b95f3f", size = 249629, upload-time = "2025-09-21T20:02:46.503Z" }, + { url = "https://files.pythonhosted.org/packages/82/62/14ed6546d0207e6eda876434e3e8475a3e9adbe32110ce896c9e0c06bb9a/coverage-7.10.7-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bb45474711ba385c46a0bfe696c695a929ae69ac636cda8f532be9e8c93d720a", size = 252162, upload-time = "2025-09-21T20:02:48.689Z" }, + { url = "https://files.pythonhosted.org/packages/ff/49/07f00db9ac6478e4358165a08fb41b469a1b053212e8a00cb02f0d27a05f/coverage-7.10.7-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:813922f35bd800dca9994c5971883cbc0d291128a5de6b167c7aa697fcf59360", size = 253517, upload-time = "2025-09-21T20:02:50.31Z" }, + { url = "https://files.pythonhosted.org/packages/a2/59/c5201c62dbf165dfbc91460f6dbbaa85a8b82cfa6131ac45d6c1bfb52deb/coverage-7.10.7-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:93c1b03552081b2a4423091d6fb3787265b8f86af404cff98d1b5342713bdd69", size = 249632, upload-time = "2025-09-21T20:02:51.971Z" }, + { url = "https://files.pythonhosted.org/packages/07/ae/5920097195291a51fb00b3a70b9bbd2edbfe3c84876a1762bd1ef1565ebc/coverage-7.10.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:cc87dd1b6eaf0b848eebb1c86469b9f72a1891cb42ac7adcfbce75eadb13dd14", size = 251520, upload-time = "2025-09-21T20:02:53.858Z" }, + { url = "https://files.pythonhosted.org/packages/b9/3c/a815dde77a2981f5743a60b63df31cb322c944843e57dbd579326625a413/coverage-7.10.7-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:39508ffda4f343c35f3236fe8d1a6634a51f4581226a1262769d7f970e73bffe", size = 249455, upload-time = "2025-09-21T20:02:55.807Z" }, + { url = "https://files.pythonhosted.org/packages/aa/99/f5cdd8421ea656abefb6c0ce92556709db2265c41e8f9fc6c8ae0f7824c9/coverage-7.10.7-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:925a1edf3d810537c5a3abe78ec5530160c5f9a26b1f4270b40e62cc79304a1e", size = 249287, upload-time = "2025-09-21T20:02:57.784Z" }, + { url = "https://files.pythonhosted.org/packages/c3/7a/e9a2da6a1fc5d007dd51fca083a663ab930a8c4d149c087732a5dbaa0029/coverage-7.10.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2c8b9a0636f94c43cd3576811e05b89aa9bc2d0a85137affc544ae5cb0e4bfbd", size = 250946, upload-time = "2025-09-21T20:02:59.431Z" }, + { url = "https://files.pythonhosted.org/packages/ef/5b/0b5799aa30380a949005a353715095d6d1da81927d6dbed5def2200a4e25/coverage-7.10.7-cp314-cp314-win32.whl", hash = "sha256:b7b8288eb7cdd268b0304632da8cb0bb93fadcfec2fe5712f7b9cc8f4d487be2", size = 221009, upload-time = "2025-09-21T20:03:01.324Z" }, + { url = "https://files.pythonhosted.org/packages/da/b0/e802fbb6eb746de006490abc9bb554b708918b6774b722bb3a0e6aa1b7de/coverage-7.10.7-cp314-cp314-win_amd64.whl", hash = "sha256:1ca6db7c8807fb9e755d0379ccc39017ce0a84dcd26d14b5a03b78563776f681", size = 221804, upload-time = "2025-09-21T20:03:03.4Z" }, + { url = "https://files.pythonhosted.org/packages/9e/e8/71d0c8e374e31f39e3389bb0bd19e527d46f00ea8571ec7ec8fd261d8b44/coverage-7.10.7-cp314-cp314-win_arm64.whl", hash = "sha256:097c1591f5af4496226d5783d036bf6fd6cd0cbc132e071b33861de756efb880", size = 220384, upload-time = "2025-09-21T20:03:05.111Z" }, + { url = "https://files.pythonhosted.org/packages/62/09/9a5608d319fa3eba7a2019addeacb8c746fb50872b57a724c9f79f146969/coverage-7.10.7-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:a62c6ef0d50e6de320c270ff91d9dd0a05e7250cac2a800b7784bae474506e63", size = 219047, upload-time = "2025-09-21T20:03:06.795Z" }, + { url = "https://files.pythonhosted.org/packages/f5/6f/f58d46f33db9f2e3647b2d0764704548c184e6f5e014bef528b7f979ef84/coverage-7.10.7-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:9fa6e4dd51fe15d8738708a973470f67a855ca50002294852e9571cdbd9433f2", size = 219266, upload-time = "2025-09-21T20:03:08.495Z" }, + { url = "https://files.pythonhosted.org/packages/74/5c/183ffc817ba68e0b443b8c934c8795553eb0c14573813415bd59941ee165/coverage-7.10.7-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:8fb190658865565c549b6b4706856d6a7b09302c797eb2cf8e7fe9dabb043f0d", size = 260767, upload-time = "2025-09-21T20:03:10.172Z" }, + { url = "https://files.pythonhosted.org/packages/0f/48/71a8abe9c1ad7e97548835e3cc1adbf361e743e9d60310c5f75c9e7bf847/coverage-7.10.7-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:affef7c76a9ef259187ef31599a9260330e0335a3011732c4b9effa01e1cd6e0", size = 262931, upload-time = "2025-09-21T20:03:11.861Z" }, + { url = "https://files.pythonhosted.org/packages/84/fd/193a8fb132acfc0a901f72020e54be5e48021e1575bb327d8ee1097a28fd/coverage-7.10.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6e16e07d85ca0cf8bafe5f5d23a0b850064e8e945d5677492b06bbe6f09cc699", size = 265186, upload-time = "2025-09-21T20:03:13.539Z" }, + { url = "https://files.pythonhosted.org/packages/b1/8f/74ecc30607dd95ad50e3034221113ccb1c6d4e8085cc761134782995daae/coverage-7.10.7-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:03ffc58aacdf65d2a82bbeb1ffe4d01ead4017a21bfd0454983b88ca73af94b9", size = 259470, upload-time = "2025-09-21T20:03:15.584Z" }, + { url = "https://files.pythonhosted.org/packages/0f/55/79ff53a769f20d71b07023ea115c9167c0bb56f281320520cf64c5298a96/coverage-7.10.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1b4fd784344d4e52647fd7857b2af5b3fbe6c239b0b5fa63e94eb67320770e0f", size = 262626, upload-time = "2025-09-21T20:03:17.673Z" }, + { url = "https://files.pythonhosted.org/packages/88/e2/dac66c140009b61ac3fc13af673a574b00c16efdf04f9b5c740703e953c0/coverage-7.10.7-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:0ebbaddb2c19b71912c6f2518e791aa8b9f054985a0769bdb3a53ebbc765c6a1", size = 260386, upload-time = "2025-09-21T20:03:19.36Z" }, + { url = "https://files.pythonhosted.org/packages/a2/f1/f48f645e3f33bb9ca8a496bc4a9671b52f2f353146233ebd7c1df6160440/coverage-7.10.7-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:a2d9a3b260cc1d1dbdb1c582e63ddcf5363426a1a68faa0f5da28d8ee3c722a0", size = 258852, upload-time = "2025-09-21T20:03:21.007Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3b/8442618972c51a7affeead957995cfa8323c0c9bcf8fa5a027421f720ff4/coverage-7.10.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a3cc8638b2480865eaa3926d192e64ce6c51e3d29c849e09d5b4ad95efae5399", size = 261534, upload-time = "2025-09-21T20:03:23.12Z" }, + { url = "https://files.pythonhosted.org/packages/b2/dc/101f3fa3a45146db0cb03f5b4376e24c0aac818309da23e2de0c75295a91/coverage-7.10.7-cp314-cp314t-win32.whl", hash = "sha256:67f8c5cbcd3deb7a60b3345dffc89a961a484ed0af1f6f73de91705cc6e31235", size = 221784, upload-time = "2025-09-21T20:03:24.769Z" }, + { url = "https://files.pythonhosted.org/packages/4c/a1/74c51803fc70a8a40d7346660379e144be772bab4ac7bb6e6b905152345c/coverage-7.10.7-cp314-cp314t-win_amd64.whl", hash = "sha256:e1ed71194ef6dea7ed2d5cb5f7243d4bcd334bfb63e59878519be558078f848d", size = 222905, upload-time = "2025-09-21T20:03:26.93Z" }, + { url = "https://files.pythonhosted.org/packages/12/65/f116a6d2127df30bcafbceef0302d8a64ba87488bf6f73a6d8eebf060873/coverage-7.10.7-cp314-cp314t-win_arm64.whl", hash = "sha256:7fe650342addd8524ca63d77b2362b02345e5f1a093266787d210c70a50b471a", size = 220922, upload-time = "2025-09-21T20:03:28.672Z" }, + { url = "https://files.pythonhosted.org/packages/ec/16/114df1c291c22cac3b0c127a73e0af5c12ed7bbb6558d310429a0ae24023/coverage-7.10.7-py3-none-any.whl", hash = "sha256:f7941f6f2fe6dd6807a1208737b8a0cbcf1cc6d7b07d24998ad2d63590868260", size = 209952, upload-time = "2025-09-21T20:03:53.918Z" }, +] + +[[package]] +name = "cryptography" +version = "46.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4a/9b/e301418629f7bfdf72db9e80ad6ed9d1b83c487c471803eaa6464c511a01/cryptography-46.0.2.tar.gz", hash = "sha256:21b6fc8c71a3f9a604f028a329e5560009cc4a3a828bfea5fcba8eb7647d88fe", size = 749293, upload-time = "2025-10-01T00:29:11.856Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/98/7a8df8c19a335c8028414738490fc3955c0cecbfdd37fcc1b9c3d04bd561/cryptography-46.0.2-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:f3e32ab7dd1b1ef67b9232c4cf5e2ee4cd517d4316ea910acaaa9c5712a1c663", size = 7261255, upload-time = "2025-10-01T00:27:22.947Z" }, + { url = "https://files.pythonhosted.org/packages/c6/38/b2adb2aa1baa6706adc3eb746691edd6f90a656a9a65c3509e274d15a2b8/cryptography-46.0.2-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1fd1a69086926b623ef8126b4c33d5399ce9e2f3fac07c9c734c2a4ec38b6d02", size = 4297596, upload-time = "2025-10-01T00:27:25.258Z" }, + { url = "https://files.pythonhosted.org/packages/e4/27/0f190ada240003119488ae66c897b5e97149292988f556aef4a6a2a57595/cryptography-46.0.2-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bb7fb9cd44c2582aa5990cf61a4183e6f54eea3172e54963787ba47287edd135", size = 4450899, upload-time = "2025-10-01T00:27:27.458Z" }, + { url = "https://files.pythonhosted.org/packages/85/d5/e4744105ab02fdf6bb58ba9a816e23b7a633255987310b4187d6745533db/cryptography-46.0.2-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:9066cfd7f146f291869a9898b01df1c9b0e314bfa182cef432043f13fc462c92", size = 4300382, upload-time = "2025-10-01T00:27:29.091Z" }, + { url = "https://files.pythonhosted.org/packages/33/fb/bf9571065c18c04818cb07de90c43fc042c7977c68e5de6876049559c72f/cryptography-46.0.2-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:97e83bf4f2f2c084d8dd792d13841d0a9b241643151686010866bbd076b19659", size = 4017347, upload-time = "2025-10-01T00:27:30.767Z" }, + { url = "https://files.pythonhosted.org/packages/35/72/fc51856b9b16155ca071080e1a3ad0c3a8e86616daf7eb018d9565b99baa/cryptography-46.0.2-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:4a766d2a5d8127364fd936572c6e6757682fc5dfcbdba1632d4554943199f2fa", size = 4983500, upload-time = "2025-10-01T00:27:32.741Z" }, + { url = "https://files.pythonhosted.org/packages/c1/53/0f51e926799025e31746d454ab2e36f8c3f0d41592bc65cb9840368d3275/cryptography-46.0.2-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:fab8f805e9675e61ed8538f192aad70500fa6afb33a8803932999b1049363a08", size = 4482591, upload-time = "2025-10-01T00:27:34.869Z" }, + { url = "https://files.pythonhosted.org/packages/86/96/4302af40b23ab8aa360862251fb8fc450b2a06ff24bc5e261c2007f27014/cryptography-46.0.2-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:1e3b6428a3d56043bff0bb85b41c535734204e599c1c0977e1d0f261b02f3ad5", size = 4300019, upload-time = "2025-10-01T00:27:37.029Z" }, + { url = "https://files.pythonhosted.org/packages/9b/59/0be12c7fcc4c5e34fe2b665a75bc20958473047a30d095a7657c218fa9e8/cryptography-46.0.2-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:1a88634851d9b8de8bb53726f4300ab191d3b2f42595e2581a54b26aba71b7cc", size = 4950006, upload-time = "2025-10-01T00:27:40.272Z" }, + { url = "https://files.pythonhosted.org/packages/55/1d/42fda47b0111834b49e31590ae14fd020594d5e4dadd639bce89ad790fba/cryptography-46.0.2-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:be939b99d4e091eec9a2bcf41aaf8f351f312cd19ff74b5c83480f08a8a43e0b", size = 4482088, upload-time = "2025-10-01T00:27:42.668Z" }, + { url = "https://files.pythonhosted.org/packages/17/50/60f583f69aa1602c2bdc7022dae86a0d2b837276182f8c1ec825feb9b874/cryptography-46.0.2-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f13b040649bc18e7eb37936009b24fd31ca095a5c647be8bb6aaf1761142bd1", size = 4425599, upload-time = "2025-10-01T00:27:44.616Z" }, + { url = "https://files.pythonhosted.org/packages/d1/57/d8d4134cd27e6e94cf44adb3f3489f935bde85f3a5508e1b5b43095b917d/cryptography-46.0.2-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:9bdc25e4e01b261a8fda4e98618f1c9515febcecebc9566ddf4a70c63967043b", size = 4697458, upload-time = "2025-10-01T00:27:46.209Z" }, + { url = "https://files.pythonhosted.org/packages/d1/2b/531e37408573e1da33adfb4c58875013ee8ac7d548d1548967d94a0ae5c4/cryptography-46.0.2-cp311-abi3-win32.whl", hash = "sha256:8b9bf67b11ef9e28f4d78ff88b04ed0929fcd0e4f70bb0f704cfc32a5c6311ee", size = 3056077, upload-time = "2025-10-01T00:27:48.424Z" }, + { url = "https://files.pythonhosted.org/packages/a8/cd/2f83cafd47ed2dc5a3a9c783ff5d764e9e70d3a160e0df9a9dcd639414ce/cryptography-46.0.2-cp311-abi3-win_amd64.whl", hash = "sha256:758cfc7f4c38c5c5274b55a57ef1910107436f4ae842478c4989abbd24bd5acb", size = 3512585, upload-time = "2025-10-01T00:27:50.521Z" }, + { url = "https://files.pythonhosted.org/packages/00/36/676f94e10bfaa5c5b86c469ff46d3e0663c5dc89542f7afbadac241a3ee4/cryptography-46.0.2-cp311-abi3-win_arm64.whl", hash = "sha256:218abd64a2e72f8472c2102febb596793347a3e65fafbb4ad50519969da44470", size = 2927474, upload-time = "2025-10-01T00:27:52.91Z" }, + { url = "https://files.pythonhosted.org/packages/6f/cc/47fc6223a341f26d103cb6da2216805e08a37d3b52bee7f3b2aee8066f95/cryptography-46.0.2-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:bda55e8dbe8533937956c996beaa20266a8eca3570402e52ae52ed60de1faca8", size = 7198626, upload-time = "2025-10-01T00:27:54.8Z" }, + { url = "https://files.pythonhosted.org/packages/93/22/d66a8591207c28bbe4ac7afa25c4656dc19dc0db29a219f9809205639ede/cryptography-46.0.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e7155c0b004e936d381b15425273aee1cebc94f879c0ce82b0d7fecbf755d53a", size = 4287584, upload-time = "2025-10-01T00:27:57.018Z" }, + { url = "https://files.pythonhosted.org/packages/8c/3e/fac3ab6302b928e0398c269eddab5978e6c1c50b2b77bb5365ffa8633b37/cryptography-46.0.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a61c154cc5488272a6c4b86e8d5beff4639cdb173d75325ce464d723cda0052b", size = 4433796, upload-time = "2025-10-01T00:27:58.631Z" }, + { url = "https://files.pythonhosted.org/packages/7d/d8/24392e5d3c58e2d83f98fe5a2322ae343360ec5b5b93fe18bc52e47298f5/cryptography-46.0.2-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:9ec3f2e2173f36a9679d3b06d3d01121ab9b57c979de1e6a244b98d51fea1b20", size = 4292126, upload-time = "2025-10-01T00:28:00.643Z" }, + { url = "https://files.pythonhosted.org/packages/ed/38/3d9f9359b84c16c49a5a336ee8be8d322072a09fac17e737f3bb11f1ce64/cryptography-46.0.2-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2fafb6aa24e702bbf74de4cb23bfa2c3beb7ab7683a299062b69724c92e0fa73", size = 3993056, upload-time = "2025-10-01T00:28:02.8Z" }, + { url = "https://files.pythonhosted.org/packages/d6/a3/4c44fce0d49a4703cc94bfbe705adebf7ab36efe978053742957bc7ec324/cryptography-46.0.2-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:0c7ffe8c9b1fcbb07a26d7c9fa5e857c2fe80d72d7b9e0353dcf1d2180ae60ee", size = 4967604, upload-time = "2025-10-01T00:28:04.783Z" }, + { url = "https://files.pythonhosted.org/packages/eb/c2/49d73218747c8cac16bb8318a5513fde3129e06a018af3bc4dc722aa4a98/cryptography-46.0.2-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:5840f05518caa86b09d23f8b9405a7b6d5400085aa14a72a98fdf5cf1568c0d2", size = 4465367, upload-time = "2025-10-01T00:28:06.864Z" }, + { url = "https://files.pythonhosted.org/packages/1b/64/9afa7d2ee742f55ca6285a54386ed2778556a4ed8871571cb1c1bfd8db9e/cryptography-46.0.2-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:27c53b4f6a682a1b645fbf1cd5058c72cf2f5aeba7d74314c36838c7cbc06e0f", size = 4291678, upload-time = "2025-10-01T00:28:08.982Z" }, + { url = "https://files.pythonhosted.org/packages/50/48/1696d5ea9623a7b72ace87608f6899ca3c331709ac7ebf80740abb8ac673/cryptography-46.0.2-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:512c0250065e0a6b286b2db4bbcc2e67d810acd53eb81733e71314340366279e", size = 4931366, upload-time = "2025-10-01T00:28:10.74Z" }, + { url = "https://files.pythonhosted.org/packages/eb/3c/9dfc778401a334db3b24435ee0733dd005aefb74afe036e2d154547cb917/cryptography-46.0.2-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:07c0eb6657c0e9cca5891f4e35081dbf985c8131825e21d99b4f440a8f496f36", size = 4464738, upload-time = "2025-10-01T00:28:12.491Z" }, + { url = "https://files.pythonhosted.org/packages/dc/b1/abcde62072b8f3fd414e191a6238ce55a0050e9738090dc6cded24c12036/cryptography-46.0.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:48b983089378f50cba258f7f7aa28198c3f6e13e607eaf10472c26320332ca9a", size = 4419305, upload-time = "2025-10-01T00:28:14.145Z" }, + { url = "https://files.pythonhosted.org/packages/c7/1f/3d2228492f9391395ca34c677e8f2571fb5370fe13dc48c1014f8c509864/cryptography-46.0.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e6f6775eaaa08c0eec73e301f7592f4367ccde5e4e4df8e58320f2ebf161ea2c", size = 4681201, upload-time = "2025-10-01T00:28:15.951Z" }, + { url = "https://files.pythonhosted.org/packages/de/77/b687745804a93a55054f391528fcfc76c3d6bfd082ce9fb62c12f0d29fc1/cryptography-46.0.2-cp314-cp314t-win32.whl", hash = "sha256:e8633996579961f9b5a3008683344c2558d38420029d3c0bc7ff77c17949a4e1", size = 3022492, upload-time = "2025-10-01T00:28:17.643Z" }, + { url = "https://files.pythonhosted.org/packages/60/a5/8d498ef2996e583de0bef1dcc5e70186376f00883ae27bf2133f490adf21/cryptography-46.0.2-cp314-cp314t-win_amd64.whl", hash = "sha256:48c01988ecbb32979bb98731f5c2b2f79042a6c58cc9a319c8c2f9987c7f68f9", size = 3496215, upload-time = "2025-10-01T00:28:19.272Z" }, + { url = "https://files.pythonhosted.org/packages/56/db/ee67aaef459a2706bc302b15889a1a8126ebe66877bab1487ae6ad00f33d/cryptography-46.0.2-cp314-cp314t-win_arm64.whl", hash = "sha256:8e2ad4d1a5899b7caa3a450e33ee2734be7cc0689010964703a7c4bcc8dd4fd0", size = 2919255, upload-time = "2025-10-01T00:28:21.115Z" }, + { url = "https://files.pythonhosted.org/packages/d5/bb/fa95abcf147a1b0bb94d95f53fbb09da77b24c776c5d87d36f3d94521d2c/cryptography-46.0.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a08e7401a94c002e79dc3bc5231b6558cd4b2280ee525c4673f650a37e2c7685", size = 7248090, upload-time = "2025-10-01T00:28:22.846Z" }, + { url = "https://files.pythonhosted.org/packages/b7/66/f42071ce0e3ffbfa80a88feadb209c779fda92a23fbc1e14f74ebf72ef6b/cryptography-46.0.2-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d30bc11d35743bf4ddf76674a0a369ec8a21f87aaa09b0661b04c5f6c46e8d7b", size = 4293123, upload-time = "2025-10-01T00:28:25.072Z" }, + { url = "https://files.pythonhosted.org/packages/a8/5d/1fdbd2e5c1ba822828d250e5a966622ef00185e476d1cd2726b6dd135e53/cryptography-46.0.2-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bca3f0ce67e5a2a2cf524e86f44697c4323a86e0fd7ba857de1c30d52c11ede1", size = 4439524, upload-time = "2025-10-01T00:28:26.808Z" }, + { url = "https://files.pythonhosted.org/packages/c8/c1/5e4989a7d102d4306053770d60f978c7b6b1ea2ff8c06e0265e305b23516/cryptography-46.0.2-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ff798ad7a957a5021dcbab78dfff681f0cf15744d0e6af62bd6746984d9c9e9c", size = 4297264, upload-time = "2025-10-01T00:28:29.327Z" }, + { url = "https://files.pythonhosted.org/packages/28/78/b56f847d220cb1d6d6aef5a390e116ad603ce13a0945a3386a33abc80385/cryptography-46.0.2-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:cb5e8daac840e8879407acbe689a174f5ebaf344a062f8918e526824eb5d97af", size = 4011872, upload-time = "2025-10-01T00:28:31.479Z" }, + { url = "https://files.pythonhosted.org/packages/e1/80/2971f214b066b888944f7b57761bf709ee3f2cf805619a18b18cab9b263c/cryptography-46.0.2-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:3f37aa12b2d91e157827d90ce78f6180f0c02319468a0aea86ab5a9566da644b", size = 4978458, upload-time = "2025-10-01T00:28:33.267Z" }, + { url = "https://files.pythonhosted.org/packages/a5/84/0cb0a2beaa4f1cbe63ebec4e97cd7e0e9f835d0ba5ee143ed2523a1e0016/cryptography-46.0.2-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5e38f203160a48b93010b07493c15f2babb4e0f2319bbd001885adb3f3696d21", size = 4472195, upload-time = "2025-10-01T00:28:36.039Z" }, + { url = "https://files.pythonhosted.org/packages/30/8b/2b542ddbf78835c7cd67b6fa79e95560023481213a060b92352a61a10efe/cryptography-46.0.2-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:d19f5f48883752b5ab34cff9e2f7e4a7f216296f33714e77d1beb03d108632b6", size = 4296791, upload-time = "2025-10-01T00:28:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/78/12/9065b40201b4f4876e93b9b94d91feb18de9150d60bd842a16a21565007f/cryptography-46.0.2-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:04911b149eae142ccd8c9a68892a70c21613864afb47aba92d8c7ed9cc001023", size = 4939629, upload-time = "2025-10-01T00:28:39.654Z" }, + { url = "https://files.pythonhosted.org/packages/f6/9e/6507dc048c1b1530d372c483dfd34e7709fc542765015425f0442b08547f/cryptography-46.0.2-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:8b16c1ede6a937c291d41176934268e4ccac2c6521c69d3f5961c5a1e11e039e", size = 4471988, upload-time = "2025-10-01T00:28:41.822Z" }, + { url = "https://files.pythonhosted.org/packages/b1/86/d025584a5f7d5c5ec8d3633dbcdce83a0cd579f1141ceada7817a4c26934/cryptography-46.0.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:747b6f4a4a23d5a215aadd1d0b12233b4119c4313df83ab4137631d43672cc90", size = 4422989, upload-time = "2025-10-01T00:28:43.608Z" }, + { url = "https://files.pythonhosted.org/packages/4b/39/536370418b38a15a61bbe413006b79dfc3d2b4b0eafceb5581983f973c15/cryptography-46.0.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6b275e398ab3a7905e168c036aad54b5969d63d3d9099a0a66cc147a3cc983be", size = 4685578, upload-time = "2025-10-01T00:28:45.361Z" }, + { url = "https://files.pythonhosted.org/packages/15/52/ea7e2b1910f547baed566c866fbb86de2402e501a89ecb4871ea7f169a81/cryptography-46.0.2-cp38-abi3-win32.whl", hash = "sha256:0b507c8e033307e37af61cb9f7159b416173bdf5b41d11c4df2e499a1d8e007c", size = 3036711, upload-time = "2025-10-01T00:28:47.096Z" }, + { url = "https://files.pythonhosted.org/packages/71/9e/171f40f9c70a873e73c2efcdbe91e1d4b1777a03398fa1c4af3c56a2477a/cryptography-46.0.2-cp38-abi3-win_amd64.whl", hash = "sha256:f9b2dc7668418fb6f221e4bf701f716e05e8eadb4f1988a2487b11aedf8abe62", size = 3500007, upload-time = "2025-10-01T00:28:48.967Z" }, + { url = "https://files.pythonhosted.org/packages/3e/7c/15ad426257615f9be8caf7f97990cf3dcbb5b8dd7ed7e0db581a1c4759dd/cryptography-46.0.2-cp38-abi3-win_arm64.whl", hash = "sha256:91447f2b17e83c9e0c89f133119d83f94ce6e0fb55dd47da0a959316e6e9cfa1", size = 2918153, upload-time = "2025-10-01T00:28:51.003Z" }, +] + +[[package]] +name = "deprecated" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/49/85/12f0a49a7c4ffb70572b6c2ef13c90c88fd190debda93b23f026b25f9634/deprecated-1.3.1.tar.gz", hash = "sha256:b1b50e0ff0c1fddaa5708a2c6b0a6588bb09b892825ab2b214ac9ea9d92a5223", size = 2932523, upload-time = "2025-10-30T08:19:02.757Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/d0/205d54408c08b13550c733c4b85429e7ead111c7f0014309637425520a9a/deprecated-1.3.1-py2.py3-none-any.whl", hash = "sha256:597bfef186b6f60181535a29fbe44865ce137a5079f295b479886c82729d5f3f", size = 11298, upload-time = "2025-10-30T08:19:00.758Z" }, +] + +[[package]] +name = "dill" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/80/630b4b88364e9a8c8c5797f4602d0f76ef820909ee32f0bacb9f90654042/dill-0.4.0.tar.gz", hash = "sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0", size = 186976, upload-time = "2025-04-16T00:41:48.867Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/3d/9373ad9c56321fdab5b41197068e1d8c25883b3fea29dd361f9b55116869/dill-0.4.0-py3-none-any.whl", hash = "sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049", size = 119668, upload-time = "2025-04-16T00:41:47.671Z" }, +] + +[[package]] +name = "docstring-parser" +version = "0.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/9d/c3b43da9515bd270df0f80548d9944e389870713cc1fe2b8fb35fe2bcefd/docstring_parser-0.17.0.tar.gz", hash = "sha256:583de4a309722b3315439bb31d64ba3eebada841f2e2cee23b99df001434c912", size = 27442, upload-time = "2025-07-21T07:35:01.868Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/55/e2/2537ebcff11c1ee1ff17d8d0b6f4db75873e3b0fb32c2d4a2ee31ecb310a/docstring_parser-0.17.0-py3-none-any.whl", hash = "sha256:cf2569abd23dce8099b300f9b4fa8191e9582dda731fd533daf54c4551658708", size = 36896, upload-time = "2025-07-21T07:35:00.684Z" }, +] + +[[package]] +name = "frozenlist" +version = "1.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/f5/c831fac6cc817d26fd54c7eaccd04ef7e0288806943f7cc5bbf69f3ac1f0/frozenlist-1.8.0.tar.gz", hash = "sha256:3ede829ed8d842f6cd48fc7081d7a41001a56f1f38603f9d49bf3020d59a31ad", size = 45875, upload-time = "2025-10-06T05:38:17.865Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2d/40/0832c31a37d60f60ed79e9dfb5a92e1e2af4f40a16a29abcc7992af9edff/frozenlist-1.8.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8d92f1a84bb12d9e56f818b3a746f3efba93c1b63c8387a73dde655e1e42282a", size = 85717, upload-time = "2025-10-06T05:36:27.341Z" }, + { url = "https://files.pythonhosted.org/packages/30/ba/b0b3de23f40bc55a7057bd38434e25c34fa48e17f20ee273bbde5e0650f3/frozenlist-1.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96153e77a591c8adc2ee805756c61f59fef4cf4073a9275ee86fe8cba41241f7", size = 49651, upload-time = "2025-10-06T05:36:28.855Z" }, + { url = "https://files.pythonhosted.org/packages/0c/ab/6e5080ee374f875296c4243c381bbdef97a9ac39c6e3ce1d5f7d42cb78d6/frozenlist-1.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f21f00a91358803399890ab167098c131ec2ddd5f8f5fd5fe9c9f2c6fcd91e40", size = 49417, upload-time = "2025-10-06T05:36:29.877Z" }, + { url = "https://files.pythonhosted.org/packages/d5/4e/e4691508f9477ce67da2015d8c00acd751e6287739123113a9fca6f1604e/frozenlist-1.8.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fb30f9626572a76dfe4293c7194a09fb1fe93ba94c7d4f720dfae3b646b45027", size = 234391, upload-time = "2025-10-06T05:36:31.301Z" }, + { url = "https://files.pythonhosted.org/packages/40/76/c202df58e3acdf12969a7895fd6f3bc016c642e6726aa63bd3025e0fc71c/frozenlist-1.8.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eaa352d7047a31d87dafcacbabe89df0aa506abb5b1b85a2fb91bc3faa02d822", size = 233048, upload-time = "2025-10-06T05:36:32.531Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c0/8746afb90f17b73ca5979c7a3958116e105ff796e718575175319b5bb4ce/frozenlist-1.8.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:03ae967b4e297f58f8c774c7eabcce57fe3c2434817d4385c50661845a058121", size = 226549, upload-time = "2025-10-06T05:36:33.706Z" }, + { url = "https://files.pythonhosted.org/packages/7e/eb/4c7eefc718ff72f9b6c4893291abaae5fbc0c82226a32dcd8ef4f7a5dbef/frozenlist-1.8.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f6292f1de555ffcc675941d65fffffb0a5bcd992905015f85d0592201793e0e5", size = 239833, upload-time = "2025-10-06T05:36:34.947Z" }, + { url = "https://files.pythonhosted.org/packages/c2/4e/e5c02187cf704224f8b21bee886f3d713ca379535f16893233b9d672ea71/frozenlist-1.8.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29548f9b5b5e3460ce7378144c3010363d8035cea44bc0bf02d57f5a685e084e", size = 245363, upload-time = "2025-10-06T05:36:36.534Z" }, + { url = "https://files.pythonhosted.org/packages/1f/96/cb85ec608464472e82ad37a17f844889c36100eed57bea094518bf270692/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ec3cc8c5d4084591b4237c0a272cc4f50a5b03396a47d9caaf76f5d7b38a4f11", size = 229314, upload-time = "2025-10-06T05:36:38.582Z" }, + { url = "https://files.pythonhosted.org/packages/5d/6f/4ae69c550e4cee66b57887daeebe006fe985917c01d0fff9caab9883f6d0/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:517279f58009d0b1f2e7c1b130b377a349405da3f7621ed6bfae50b10adf20c1", size = 243365, upload-time = "2025-10-06T05:36:40.152Z" }, + { url = "https://files.pythonhosted.org/packages/7a/58/afd56de246cf11780a40a2c28dc7cbabbf06337cc8ddb1c780a2d97e88d8/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:db1e72ede2d0d7ccb213f218df6a078a9c09a7de257c2fe8fcef16d5925230b1", size = 237763, upload-time = "2025-10-06T05:36:41.355Z" }, + { url = "https://files.pythonhosted.org/packages/cb/36/cdfaf6ed42e2644740d4a10452d8e97fa1c062e2a8006e4b09f1b5fd7d63/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b4dec9482a65c54a5044486847b8a66bf10c9cb4926d42927ec4e8fd5db7fed8", size = 240110, upload-time = "2025-10-06T05:36:42.716Z" }, + { url = "https://files.pythonhosted.org/packages/03/a8/9ea226fbefad669f11b52e864c55f0bd57d3c8d7eb07e9f2e9a0b39502e1/frozenlist-1.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:21900c48ae04d13d416f0e1e0c4d81f7931f73a9dfa0b7a8746fb2fe7dd970ed", size = 233717, upload-time = "2025-10-06T05:36:44.251Z" }, + { url = "https://files.pythonhosted.org/packages/1e/0b/1b5531611e83ba7d13ccc9988967ea1b51186af64c42b7a7af465dcc9568/frozenlist-1.8.0-cp313-cp313-win32.whl", hash = "sha256:8b7b94a067d1c504ee0b16def57ad5738701e4ba10cec90529f13fa03c833496", size = 39628, upload-time = "2025-10-06T05:36:45.423Z" }, + { url = "https://files.pythonhosted.org/packages/d8/cf/174c91dbc9cc49bc7b7aab74d8b734e974d1faa8f191c74af9b7e80848e6/frozenlist-1.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:878be833caa6a3821caf85eb39c5ba92d28e85df26d57afb06b35b2efd937231", size = 43882, upload-time = "2025-10-06T05:36:46.796Z" }, + { url = "https://files.pythonhosted.org/packages/c1/17/502cd212cbfa96eb1388614fe39a3fc9ab87dbbe042b66f97acb57474834/frozenlist-1.8.0-cp313-cp313-win_arm64.whl", hash = "sha256:44389d135b3ff43ba8cc89ff7f51f5a0bb6b63d829c8300f79a2fe4fe61bcc62", size = 39676, upload-time = "2025-10-06T05:36:47.8Z" }, + { url = "https://files.pythonhosted.org/packages/d2/5c/3bbfaa920dfab09e76946a5d2833a7cbdf7b9b4a91c714666ac4855b88b4/frozenlist-1.8.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:e25ac20a2ef37e91c1b39938b591457666a0fa835c7783c3a8f33ea42870db94", size = 89235, upload-time = "2025-10-06T05:36:48.78Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d6/f03961ef72166cec1687e84e8925838442b615bd0b8854b54923ce5b7b8a/frozenlist-1.8.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07cdca25a91a4386d2e76ad992916a85038a9b97561bf7a3fd12d5d9ce31870c", size = 50742, upload-time = "2025-10-06T05:36:49.837Z" }, + { url = "https://files.pythonhosted.org/packages/1e/bb/a6d12b7ba4c3337667d0e421f7181c82dda448ce4e7ad7ecd249a16fa806/frozenlist-1.8.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4e0c11f2cc6717e0a741f84a527c52616140741cd812a50422f83dc31749fb52", size = 51725, upload-time = "2025-10-06T05:36:50.851Z" }, + { url = "https://files.pythonhosted.org/packages/bc/71/d1fed0ffe2c2ccd70b43714c6cab0f4188f09f8a67a7914a6b46ee30f274/frozenlist-1.8.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b3210649ee28062ea6099cfda39e147fa1bc039583c8ee4481cb7811e2448c51", size = 284533, upload-time = "2025-10-06T05:36:51.898Z" }, + { url = "https://files.pythonhosted.org/packages/c9/1f/fb1685a7b009d89f9bf78a42d94461bc06581f6e718c39344754a5d9bada/frozenlist-1.8.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:581ef5194c48035a7de2aefc72ac6539823bb71508189e5de01d60c9dcd5fa65", size = 292506, upload-time = "2025-10-06T05:36:53.101Z" }, + { url = "https://files.pythonhosted.org/packages/e6/3b/b991fe1612703f7e0d05c0cf734c1b77aaf7c7d321df4572e8d36e7048c8/frozenlist-1.8.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3ef2d026f16a2b1866e1d86fc4e1291e1ed8a387b2c333809419a2f8b3a77b82", size = 274161, upload-time = "2025-10-06T05:36:54.309Z" }, + { url = "https://files.pythonhosted.org/packages/ca/ec/c5c618767bcdf66e88945ec0157d7f6c4a1322f1473392319b7a2501ded7/frozenlist-1.8.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5500ef82073f599ac84d888e3a8c1f77ac831183244bfd7f11eaa0289fb30714", size = 294676, upload-time = "2025-10-06T05:36:55.566Z" }, + { url = "https://files.pythonhosted.org/packages/7c/ce/3934758637d8f8a88d11f0585d6495ef54b2044ed6ec84492a91fa3b27aa/frozenlist-1.8.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:50066c3997d0091c411a66e710f4e11752251e6d2d73d70d8d5d4c76442a199d", size = 300638, upload-time = "2025-10-06T05:36:56.758Z" }, + { url = "https://files.pythonhosted.org/packages/fc/4f/a7e4d0d467298f42de4b41cbc7ddaf19d3cfeabaf9ff97c20c6c7ee409f9/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:5c1c8e78426e59b3f8005e9b19f6ff46e5845895adbde20ece9218319eca6506", size = 283067, upload-time = "2025-10-06T05:36:57.965Z" }, + { url = "https://files.pythonhosted.org/packages/dc/48/c7b163063d55a83772b268e6d1affb960771b0e203b632cfe09522d67ea5/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:eefdba20de0d938cec6a89bd4d70f346a03108a19b9df4248d3cf0d88f1b0f51", size = 292101, upload-time = "2025-10-06T05:36:59.237Z" }, + { url = "https://files.pythonhosted.org/packages/9f/d0/2366d3c4ecdc2fd391e0afa6e11500bfba0ea772764d631bbf82f0136c9d/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:cf253e0e1c3ceb4aaff6df637ce033ff6535fb8c70a764a8f46aafd3d6ab798e", size = 289901, upload-time = "2025-10-06T05:37:00.811Z" }, + { url = "https://files.pythonhosted.org/packages/b8/94/daff920e82c1b70e3618a2ac39fbc01ae3e2ff6124e80739ce5d71c9b920/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:032efa2674356903cd0261c4317a561a6850f3ac864a63fc1583147fb05a79b0", size = 289395, upload-time = "2025-10-06T05:37:02.115Z" }, + { url = "https://files.pythonhosted.org/packages/e3/20/bba307ab4235a09fdcd3cc5508dbabd17c4634a1af4b96e0f69bfe551ebd/frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6da155091429aeba16851ecb10a9104a108bcd32f6c1642867eadaee401c1c41", size = 283659, upload-time = "2025-10-06T05:37:03.711Z" }, + { url = "https://files.pythonhosted.org/packages/fd/00/04ca1c3a7a124b6de4f8a9a17cc2fcad138b4608e7a3fc5877804b8715d7/frozenlist-1.8.0-cp313-cp313t-win32.whl", hash = "sha256:0f96534f8bfebc1a394209427d0f8a63d343c9779cda6fc25e8e121b5fd8555b", size = 43492, upload-time = "2025-10-06T05:37:04.915Z" }, + { url = "https://files.pythonhosted.org/packages/59/5e/c69f733a86a94ab10f68e496dc6b7e8bc078ebb415281d5698313e3af3a1/frozenlist-1.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5d63a068f978fc69421fb0e6eb91a9603187527c86b7cd3f534a5b77a592b888", size = 48034, upload-time = "2025-10-06T05:37:06.343Z" }, + { url = "https://files.pythonhosted.org/packages/16/6c/be9d79775d8abe79b05fa6d23da99ad6e7763a1d080fbae7290b286093fd/frozenlist-1.8.0-cp313-cp313t-win_arm64.whl", hash = "sha256:bf0a7e10b077bf5fb9380ad3ae8ce20ef919a6ad93b4552896419ac7e1d8e042", size = 41749, upload-time = "2025-10-06T05:37:07.431Z" }, + { url = "https://files.pythonhosted.org/packages/f1/c8/85da824b7e7b9b6e7f7705b2ecaf9591ba6f79c1177f324c2735e41d36a2/frozenlist-1.8.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cee686f1f4cadeb2136007ddedd0aaf928ab95216e7691c63e50a8ec066336d0", size = 86127, upload-time = "2025-10-06T05:37:08.438Z" }, + { url = "https://files.pythonhosted.org/packages/8e/e8/a1185e236ec66c20afd72399522f142c3724c785789255202d27ae992818/frozenlist-1.8.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:119fb2a1bd47307e899c2fac7f28e85b9a543864df47aa7ec9d3c1b4545f096f", size = 49698, upload-time = "2025-10-06T05:37:09.48Z" }, + { url = "https://files.pythonhosted.org/packages/a1/93/72b1736d68f03fda5fdf0f2180fb6caaae3894f1b854d006ac61ecc727ee/frozenlist-1.8.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4970ece02dbc8c3a92fcc5228e36a3e933a01a999f7094ff7c23fbd2beeaa67c", size = 49749, upload-time = "2025-10-06T05:37:10.569Z" }, + { url = "https://files.pythonhosted.org/packages/a7/b2/fabede9fafd976b991e9f1b9c8c873ed86f202889b864756f240ce6dd855/frozenlist-1.8.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:cba69cb73723c3f329622e34bdbf5ce1f80c21c290ff04256cff1cd3c2036ed2", size = 231298, upload-time = "2025-10-06T05:37:11.993Z" }, + { url = "https://files.pythonhosted.org/packages/3a/3b/d9b1e0b0eed36e70477ffb8360c49c85c8ca8ef9700a4e6711f39a6e8b45/frozenlist-1.8.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:778a11b15673f6f1df23d9586f83c4846c471a8af693a22e066508b77d201ec8", size = 232015, upload-time = "2025-10-06T05:37:13.194Z" }, + { url = "https://files.pythonhosted.org/packages/dc/94/be719d2766c1138148564a3960fc2c06eb688da592bdc25adcf856101be7/frozenlist-1.8.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0325024fe97f94c41c08872db482cf8ac4800d80e79222c6b0b7b162d5b13686", size = 225038, upload-time = "2025-10-06T05:37:14.577Z" }, + { url = "https://files.pythonhosted.org/packages/e4/09/6712b6c5465f083f52f50cf74167b92d4ea2f50e46a9eea0523d658454ae/frozenlist-1.8.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:97260ff46b207a82a7567b581ab4190bd4dfa09f4db8a8b49d1a958f6aa4940e", size = 240130, upload-time = "2025-10-06T05:37:15.781Z" }, + { url = "https://files.pythonhosted.org/packages/f8/d4/cd065cdcf21550b54f3ce6a22e143ac9e4836ca42a0de1022da8498eac89/frozenlist-1.8.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:54b2077180eb7f83dd52c40b2750d0a9f175e06a42e3213ce047219de902717a", size = 242845, upload-time = "2025-10-06T05:37:17.037Z" }, + { url = "https://files.pythonhosted.org/packages/62/c3/f57a5c8c70cd1ead3d5d5f776f89d33110b1addae0ab010ad774d9a44fb9/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2f05983daecab868a31e1da44462873306d3cbfd76d1f0b5b69c473d21dbb128", size = 229131, upload-time = "2025-10-06T05:37:18.221Z" }, + { url = "https://files.pythonhosted.org/packages/6c/52/232476fe9cb64f0742f3fde2b7d26c1dac18b6d62071c74d4ded55e0ef94/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:33f48f51a446114bc5d251fb2954ab0164d5be02ad3382abcbfe07e2531d650f", size = 240542, upload-time = "2025-10-06T05:37:19.771Z" }, + { url = "https://files.pythonhosted.org/packages/5f/85/07bf3f5d0fb5414aee5f47d33c6f5c77bfe49aac680bfece33d4fdf6a246/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:154e55ec0655291b5dd1b8731c637ecdb50975a2ae70c606d100750a540082f7", size = 237308, upload-time = "2025-10-06T05:37:20.969Z" }, + { url = "https://files.pythonhosted.org/packages/11/99/ae3a33d5befd41ac0ca2cc7fd3aa707c9c324de2e89db0e0f45db9a64c26/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:4314debad13beb564b708b4a496020e5306c7333fa9a3ab90374169a20ffab30", size = 238210, upload-time = "2025-10-06T05:37:22.252Z" }, + { url = "https://files.pythonhosted.org/packages/b2/60/b1d2da22f4970e7a155f0adde9b1435712ece01b3cd45ba63702aea33938/frozenlist-1.8.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:073f8bf8becba60aa931eb3bc420b217bb7d5b8f4750e6f8b3be7f3da85d38b7", size = 231972, upload-time = "2025-10-06T05:37:23.5Z" }, + { url = "https://files.pythonhosted.org/packages/3f/ab/945b2f32de889993b9c9133216c068b7fcf257d8595a0ac420ac8677cab0/frozenlist-1.8.0-cp314-cp314-win32.whl", hash = "sha256:bac9c42ba2ac65ddc115d930c78d24ab8d4f465fd3fc473cdedfccadb9429806", size = 40536, upload-time = "2025-10-06T05:37:25.581Z" }, + { url = "https://files.pythonhosted.org/packages/59/ad/9caa9b9c836d9ad6f067157a531ac48b7d36499f5036d4141ce78c230b1b/frozenlist-1.8.0-cp314-cp314-win_amd64.whl", hash = "sha256:3e0761f4d1a44f1d1a47996511752cf3dcec5bbdd9cc2b4fe595caf97754b7a0", size = 44330, upload-time = "2025-10-06T05:37:26.928Z" }, + { url = "https://files.pythonhosted.org/packages/82/13/e6950121764f2676f43534c555249f57030150260aee9dcf7d64efda11dd/frozenlist-1.8.0-cp314-cp314-win_arm64.whl", hash = "sha256:d1eaff1d00c7751b7c6662e9c5ba6eb2c17a2306ba5e2a37f24ddf3cc953402b", size = 40627, upload-time = "2025-10-06T05:37:28.075Z" }, + { url = "https://files.pythonhosted.org/packages/c0/c7/43200656ecc4e02d3f8bc248df68256cd9572b3f0017f0a0c4e93440ae23/frozenlist-1.8.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:d3bb933317c52d7ea5004a1c442eef86f426886fba134ef8cf4226ea6ee1821d", size = 89238, upload-time = "2025-10-06T05:37:29.373Z" }, + { url = "https://files.pythonhosted.org/packages/d1/29/55c5f0689b9c0fb765055629f472c0de484dcaf0acee2f7707266ae3583c/frozenlist-1.8.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:8009897cdef112072f93a0efdce29cd819e717fd2f649ee3016efd3cd885a7ed", size = 50738, upload-time = "2025-10-06T05:37:30.792Z" }, + { url = "https://files.pythonhosted.org/packages/ba/7d/b7282a445956506fa11da8c2db7d276adcbf2b17d8bb8407a47685263f90/frozenlist-1.8.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2c5dcbbc55383e5883246d11fd179782a9d07a986c40f49abe89ddf865913930", size = 51739, upload-time = "2025-10-06T05:37:32.127Z" }, + { url = "https://files.pythonhosted.org/packages/62/1c/3d8622e60d0b767a5510d1d3cf21065b9db874696a51ea6d7a43180a259c/frozenlist-1.8.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:39ecbc32f1390387d2aa4f5a995e465e9e2f79ba3adcac92d68e3e0afae6657c", size = 284186, upload-time = "2025-10-06T05:37:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/2d/14/aa36d5f85a89679a85a1d44cd7a6657e0b1c75f61e7cad987b203d2daca8/frozenlist-1.8.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92db2bf818d5cc8d9c1f1fc56b897662e24ea5adb36ad1f1d82875bd64e03c24", size = 292196, upload-time = "2025-10-06T05:37:36.107Z" }, + { url = "https://files.pythonhosted.org/packages/05/23/6bde59eb55abd407d34f77d39a5126fb7b4f109a3f611d3929f14b700c66/frozenlist-1.8.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2dc43a022e555de94c3b68a4ef0b11c4f747d12c024a520c7101709a2144fb37", size = 273830, upload-time = "2025-10-06T05:37:37.663Z" }, + { url = "https://files.pythonhosted.org/packages/d2/3f/22cff331bfad7a8afa616289000ba793347fcd7bc275f3b28ecea2a27909/frozenlist-1.8.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cb89a7f2de3602cfed448095bab3f178399646ab7c61454315089787df07733a", size = 294289, upload-time = "2025-10-06T05:37:39.261Z" }, + { url = "https://files.pythonhosted.org/packages/a4/89/5b057c799de4838b6c69aa82b79705f2027615e01be996d2486a69ca99c4/frozenlist-1.8.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:33139dc858c580ea50e7e60a1b0ea003efa1fd42e6ec7fdbad78fff65fad2fd2", size = 300318, upload-time = "2025-10-06T05:37:43.213Z" }, + { url = "https://files.pythonhosted.org/packages/30/de/2c22ab3eb2a8af6d69dc799e48455813bab3690c760de58e1bf43b36da3e/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:168c0969a329b416119507ba30b9ea13688fafffac1b7822802537569a1cb0ef", size = 282814, upload-time = "2025-10-06T05:37:45.337Z" }, + { url = "https://files.pythonhosted.org/packages/59/f7/970141a6a8dbd7f556d94977858cfb36fa9b66e0892c6dd780d2219d8cd8/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:28bd570e8e189d7f7b001966435f9dac6718324b5be2990ac496cf1ea9ddb7fe", size = 291762, upload-time = "2025-10-06T05:37:46.657Z" }, + { url = "https://files.pythonhosted.org/packages/c1/15/ca1adae83a719f82df9116d66f5bb28bb95557b3951903d39135620ef157/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:b2a095d45c5d46e5e79ba1e5b9cb787f541a8dee0433836cea4b96a2c439dcd8", size = 289470, upload-time = "2025-10-06T05:37:47.946Z" }, + { url = "https://files.pythonhosted.org/packages/ac/83/dca6dc53bf657d371fbc88ddeb21b79891e747189c5de990b9dfff2ccba1/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:eab8145831a0d56ec9c4139b6c3e594c7a83c2c8be25d5bcf2d86136a532287a", size = 289042, upload-time = "2025-10-06T05:37:49.499Z" }, + { url = "https://files.pythonhosted.org/packages/96/52/abddd34ca99be142f354398700536c5bd315880ed0a213812bc491cff5e4/frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:974b28cf63cc99dfb2188d8d222bc6843656188164848c4f679e63dae4b0708e", size = 283148, upload-time = "2025-10-06T05:37:50.745Z" }, + { url = "https://files.pythonhosted.org/packages/af/d3/76bd4ed4317e7119c2b7f57c3f6934aba26d277acc6309f873341640e21f/frozenlist-1.8.0-cp314-cp314t-win32.whl", hash = "sha256:342c97bf697ac5480c0a7ec73cd700ecfa5a8a40ac923bd035484616efecc2df", size = 44676, upload-time = "2025-10-06T05:37:52.222Z" }, + { url = "https://files.pythonhosted.org/packages/89/76/c615883b7b521ead2944bb3480398cbb07e12b7b4e4d073d3752eb721558/frozenlist-1.8.0-cp314-cp314t-win_amd64.whl", hash = "sha256:06be8f67f39c8b1dc671f5d83aaefd3358ae5cdcf8314552c57e7ed3e6475bdd", size = 49451, upload-time = "2025-10-06T05:37:53.425Z" }, + { url = "https://files.pythonhosted.org/packages/e0/a3/5982da14e113d07b325230f95060e2169f5311b1017ea8af2a29b374c289/frozenlist-1.8.0-cp314-cp314t-win_arm64.whl", hash = "sha256:102e6314ca4da683dca92e3b1355490fed5f313b768500084fbe6371fddfdb79", size = 42507, upload-time = "2025-10-06T05:37:54.513Z" }, + { url = "https://files.pythonhosted.org/packages/9a/9a/e35b4a917281c0b8419d4207f4334c8e8c5dbf4f3f5f9ada73958d937dcc/frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d", size = 13409, upload-time = "2025-10-06T05:38:16.721Z" }, +] + +[[package]] +name = "gaab-strands-common" +version = "0.1.0" +source = { editable = "../gaab-strands-common" } +dependencies = [ + { name = "bedrock-agentcore" }, + { name = "boto3" }, + { name = "pydantic" }, + { name = "strands-agents" }, +] + +[package.metadata] +requires-dist = [ + { name = "bedrock-agentcore", specifier = ">=0.1.0" }, + { name = "boto3", specifier = ">=1.34.0" }, + { name = "pydantic", specifier = ">=2.0.0" }, + { name = "strands-agents", specifier = ">=1.10.0" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "black", specifier = ">=24.0.0" }, + { name = "isort", specifier = ">=5.13.0" }, + { name = "moto", specifier = ">=5.1.0" }, + { name = "pytest", specifier = ">=8.0.0" }, + { name = "pytest-asyncio", specifier = ">=0.23.0" }, + { name = "pytest-cov", specifier = ">=4.1.0" }, +] + +[[package]] +name = "gaab-strands-workflow-agent" +version = "1.0.0" +source = { editable = "." } +dependencies = [ + { name = "aws-opentelemetry-distro" }, + { name = "bedrock-agentcore" }, + { name = "boto3" }, + { name = "gaab-strands-common" }, + { name = "pip" }, + { name = "pydantic" }, + { name = "setuptools" }, + { name = "strands-agents", extra = ["otel"] }, + { name = "strands-agents-tools" }, + { name = "wheel" }, +] + +[package.dev-dependencies] +dev = [ + { name = "black" }, + { name = "isort" }, + { name = "moto" }, + { name = "mypy" }, + { name = "pytest" }, + { name = "pytest-cov" }, + { name = "pytest-mock" }, +] + +[package.metadata] +requires-dist = [ + { name = "aws-opentelemetry-distro", specifier = ">=0.12.1" }, + { name = "bedrock-agentcore", specifier = ">=0.1.5" }, + { name = "boto3", specifier = ">=1.35.0" }, + { name = "gaab-strands-common", editable = "../gaab-strands-common" }, + { name = "pip", specifier = ">=25.0" }, + { name = "pydantic", specifier = ">=2.0.0" }, + { name = "setuptools", specifier = ">=70.0.0" }, + { name = "strands-agents", extras = ["otel"], specifier = ">=1.10.0" }, + { name = "strands-agents-tools", specifier = ">=0.2.9" }, + { name = "wheel", specifier = ">=0.42.0" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "black", specifier = ">=24.0.0" }, + { name = "isort", specifier = ">=5.12.0" }, + { name = "moto", specifier = ">=5.0.0" }, + { name = "mypy", specifier = ">=1.8.0" }, + { name = "pytest", specifier = ">=8.0.0" }, + { name = "pytest-cov", specifier = ">=5.0.0" }, + { name = "pytest-mock", specifier = ">=3.12.0" }, +] + +[[package]] +name = "googleapis-common-protos" +version = "1.72.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, +] + +[[package]] +name = "grpcio" +version = "1.76.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b6/e0/318c1ce3ae5a17894d5791e87aea147587c9e702f24122cc7a5c8bbaeeb1/grpcio-1.76.0.tar.gz", hash = "sha256:7be78388d6da1a25c0d5ec506523db58b18be22d9c37d8d3a32c08be4987bd73", size = 12785182, upload-time = "2025-10-21T16:23:12.106Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/ed/71467ab770effc9e8cef5f2e7388beb2be26ed642d567697bb103a790c72/grpcio-1.76.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:26ef06c73eb53267c2b319f43e6634c7556ea37672029241a056629af27c10e2", size = 5807716, upload-time = "2025-10-21T16:21:48.475Z" }, + { url = "https://files.pythonhosted.org/packages/2c/85/c6ed56f9817fab03fa8a111ca91469941fb514e3e3ce6d793cb8f1e1347b/grpcio-1.76.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:45e0111e73f43f735d70786557dc38141185072d7ff8dc1829d6a77ac1471468", size = 11821522, upload-time = "2025-10-21T16:21:51.142Z" }, + { url = "https://files.pythonhosted.org/packages/ac/31/2b8a235ab40c39cbc141ef647f8a6eb7b0028f023015a4842933bc0d6831/grpcio-1.76.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:83d57312a58dcfe2a3a0f9d1389b299438909a02db60e2f2ea2ae2d8034909d3", size = 6362558, upload-time = "2025-10-21T16:21:54.213Z" }, + { url = "https://files.pythonhosted.org/packages/bd/64/9784eab483358e08847498ee56faf8ff6ea8e0a4592568d9f68edc97e9e9/grpcio-1.76.0-cp313-cp313-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:3e2a27c89eb9ac3d81ec8835e12414d73536c6e620355d65102503064a4ed6eb", size = 7049990, upload-time = "2025-10-21T16:21:56.476Z" }, + { url = "https://files.pythonhosted.org/packages/2b/94/8c12319a6369434e7a184b987e8e9f3b49a114c489b8315f029e24de4837/grpcio-1.76.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61f69297cba3950a524f61c7c8ee12e55c486cb5f7db47ff9dcee33da6f0d3ae", size = 6575387, upload-time = "2025-10-21T16:21:59.051Z" }, + { url = "https://files.pythonhosted.org/packages/15/0f/f12c32b03f731f4a6242f771f63039df182c8b8e2cf8075b245b409259d4/grpcio-1.76.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6a15c17af8839b6801d554263c546c69c4d7718ad4321e3166175b37eaacca77", size = 7166668, upload-time = "2025-10-21T16:22:02.049Z" }, + { url = "https://files.pythonhosted.org/packages/ff/2d/3ec9ce0c2b1d92dd59d1c3264aaec9f0f7c817d6e8ac683b97198a36ed5a/grpcio-1.76.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:25a18e9810fbc7e7f03ec2516addc116a957f8cbb8cbc95ccc80faa072743d03", size = 8124928, upload-time = "2025-10-21T16:22:04.984Z" }, + { url = "https://files.pythonhosted.org/packages/1a/74/fd3317be5672f4856bcdd1a9e7b5e17554692d3db9a3b273879dc02d657d/grpcio-1.76.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:931091142fd8cc14edccc0845a79248bc155425eee9a98b2db2ea4f00a235a42", size = 7589983, upload-time = "2025-10-21T16:22:07.881Z" }, + { url = "https://files.pythonhosted.org/packages/45/bb/ca038cf420f405971f19821c8c15bcbc875505f6ffadafe9ffd77871dc4c/grpcio-1.76.0-cp313-cp313-win32.whl", hash = "sha256:5e8571632780e08526f118f74170ad8d50fb0a48c23a746bef2a6ebade3abd6f", size = 3984727, upload-time = "2025-10-21T16:22:10.032Z" }, + { url = "https://files.pythonhosted.org/packages/41/80/84087dc56437ced7cdd4b13d7875e7439a52a261e3ab4e06488ba6173b0a/grpcio-1.76.0-cp313-cp313-win_amd64.whl", hash = "sha256:f9f7bd5faab55f47231ad8dba7787866b69f5e93bc306e3915606779bbfb4ba8", size = 4702799, upload-time = "2025-10-21T16:22:12.709Z" }, + { url = "https://files.pythonhosted.org/packages/b4/46/39adac80de49d678e6e073b70204091e76631e03e94928b9ea4ecf0f6e0e/grpcio-1.76.0-cp314-cp314-linux_armv7l.whl", hash = "sha256:ff8a59ea85a1f2191a0ffcc61298c571bc566332f82e5f5be1b83c9d8e668a62", size = 5808417, upload-time = "2025-10-21T16:22:15.02Z" }, + { url = "https://files.pythonhosted.org/packages/9c/f5/a4531f7fb8b4e2a60b94e39d5d924469b7a6988176b3422487be61fe2998/grpcio-1.76.0-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:06c3d6b076e7b593905d04fdba6a0525711b3466f43b3400266f04ff735de0cd", size = 11828219, upload-time = "2025-10-21T16:22:17.954Z" }, + { url = "https://files.pythonhosted.org/packages/4b/1c/de55d868ed7a8bd6acc6b1d6ddc4aa36d07a9f31d33c912c804adb1b971b/grpcio-1.76.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fd5ef5932f6475c436c4a55e4336ebbe47bd3272be04964a03d316bbf4afbcbc", size = 6367826, upload-time = "2025-10-21T16:22:20.721Z" }, + { url = "https://files.pythonhosted.org/packages/59/64/99e44c02b5adb0ad13ab3adc89cb33cb54bfa90c74770f2607eea629b86f/grpcio-1.76.0-cp314-cp314-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:b331680e46239e090f5b3cead313cc772f6caa7d0fc8de349337563125361a4a", size = 7049550, upload-time = "2025-10-21T16:22:23.637Z" }, + { url = "https://files.pythonhosted.org/packages/43/28/40a5be3f9a86949b83e7d6a2ad6011d993cbe9b6bd27bea881f61c7788b6/grpcio-1.76.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2229ae655ec4e8999599469559e97630185fdd53ae1e8997d147b7c9b2b72cba", size = 6575564, upload-time = "2025-10-21T16:22:26.016Z" }, + { url = "https://files.pythonhosted.org/packages/4b/a9/1be18e6055b64467440208a8559afac243c66a8b904213af6f392dc2212f/grpcio-1.76.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:490fa6d203992c47c7b9e4a9d39003a0c2bcc1c9aa3c058730884bbbb0ee9f09", size = 7176236, upload-time = "2025-10-21T16:22:28.362Z" }, + { url = "https://files.pythonhosted.org/packages/0f/55/dba05d3fcc151ce6e81327541d2cc8394f442f6b350fead67401661bf041/grpcio-1.76.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:479496325ce554792dba6548fae3df31a72cef7bad71ca2e12b0e58f9b336bfc", size = 8125795, upload-time = "2025-10-21T16:22:31.075Z" }, + { url = "https://files.pythonhosted.org/packages/4a/45/122df922d05655f63930cf42c9e3f72ba20aadb26c100ee105cad4ce4257/grpcio-1.76.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:1c9b93f79f48b03ada57ea24725d83a30284a012ec27eab2cf7e50a550cbbbcc", size = 7592214, upload-time = "2025-10-21T16:22:33.831Z" }, + { url = "https://files.pythonhosted.org/packages/4a/6e/0b899b7f6b66e5af39e377055fb4a6675c9ee28431df5708139df2e93233/grpcio-1.76.0-cp314-cp314-win32.whl", hash = "sha256:747fa73efa9b8b1488a95d0ba1039c8e2dca0f741612d80415b1e1c560febf4e", size = 4062961, upload-time = "2025-10-21T16:22:36.468Z" }, + { url = "https://files.pythonhosted.org/packages/19/41/0b430b01a2eb38ee887f88c1f07644a1df8e289353b78e82b37ef988fb64/grpcio-1.76.0-cp314-cp314-win_amd64.whl", hash = "sha256:922fa70ba549fce362d2e2871ab542082d66e2aaf0c19480ea453905b01f384e", size = 4834462, upload-time = "2025-10-21T16:22:39.772Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "httpx-sse" +version = "0.4.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/4c/751061ffa58615a32c31b2d82e8482be8dd4a89154f003147acee90f2be9/httpx_sse-0.4.3.tar.gz", hash = "sha256:9b1ed0127459a66014aec3c56bebd93da3c1bc8bb6618c8082039a44889a755d", size = 15943, upload-time = "2025-10-10T21:48:22.271Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/fd/6668e5aec43ab844de6fc74927e155a3b37bf40d7c3790e49fc0406b6578/httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc", size = 8960, upload-time = "2025-10-10T21:48:21.158Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/33/08/c1395a292bb23fd03bdf572a1357c5a733d3eecbab877641ceacab23db6e/importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580", size = 55767, upload-time = "2025-01-20T22:21:30.429Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/9d/0fb148dc4d6fa4a7dd1d8378168d9b4cd8d4560a6fbf6f0121c5fc34eb68/importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e", size = 26971, upload-time = "2025-01-20T22:21:29.177Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, +] + +[[package]] +name = "isort" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/63/53/4f3c058e3bace40282876f9b553343376ee687f3c35a525dc79dbd450f88/isort-7.0.0.tar.gz", hash = "sha256:5513527951aadb3ac4292a41a16cbc50dd1642432f5e8c20057d414bdafb4187", size = 805049, upload-time = "2025-10-11T13:30:59.107Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/ed/e3705d6d02b4f7aea715a353c8ce193efd0b5db13e204df895d38734c244/isort-7.0.0-py3-none-any.whl", hash = "sha256:1bcabac8bc3c36c7fb7b98a76c8abb18e0f841a3ba81decac7691008592499c1", size = 94672, upload-time = "2025-10-11T13:30:57.665Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/2a/e867e8531cf3e36b41201936b7fa7ba7b5702dbef42922193f05c8976cd6/jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe", size = 25843, upload-time = "2022-06-17T18:00:12.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256, upload-time = "2022-06-17T18:00:10.251Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, +] + +[[package]] +name = "markdown-it-py" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, +] + +[[package]] +name = "markdownify" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/1b/6f2697b51eaca81f08852fd2734745af15718fea10222a1d40f8a239c4ea/markdownify-1.2.0.tar.gz", hash = "sha256:f6c367c54eb24ee953921804dfe6d6575c5e5b42c643955e7242034435de634c", size = 18771, upload-time = "2025-08-09T17:44:15.302Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/e2/7af643acb4cae0741dffffaa7f3f7c9e7ab4046724543ba1777c401d821c/markdownify-1.2.0-py3-none-any.whl", hash = "sha256:48e150a1c4993d4d50f282f725c0111bd9eb25645d41fa2f543708fd44161351", size = 15561, upload-time = "2025-08-09T17:44:14.074Z" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313, upload-time = "2025-09-27T18:37:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/2f/907b9c7bbba283e68f20259574b13d005c121a0fa4c175f9bed27c4597ff/markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795", size = 11622, upload-time = "2025-09-27T18:36:41.777Z" }, + { url = "https://files.pythonhosted.org/packages/9c/d9/5f7756922cdd676869eca1c4e3c0cd0df60ed30199ffd775e319089cb3ed/markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219", size = 12029, upload-time = "2025-09-27T18:36:43.257Z" }, + { url = "https://files.pythonhosted.org/packages/00/07/575a68c754943058c78f30db02ee03a64b3c638586fba6a6dd56830b30a3/markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6", size = 24374, upload-time = "2025-09-27T18:36:44.508Z" }, + { url = "https://files.pythonhosted.org/packages/a9/21/9b05698b46f218fc0e118e1f8168395c65c8a2c750ae2bab54fc4bd4e0e8/markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676", size = 22980, upload-time = "2025-09-27T18:36:45.385Z" }, + { url = "https://files.pythonhosted.org/packages/7f/71/544260864f893f18b6827315b988c146b559391e6e7e8f7252839b1b846a/markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9", size = 21990, upload-time = "2025-09-27T18:36:46.916Z" }, + { url = "https://files.pythonhosted.org/packages/c2/28/b50fc2f74d1ad761af2f5dcce7492648b983d00a65b8c0e0cb457c82ebbe/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1", size = 23784, upload-time = "2025-09-27T18:36:47.884Z" }, + { url = "https://files.pythonhosted.org/packages/ed/76/104b2aa106a208da8b17a2fb72e033a5a9d7073c68f7e508b94916ed47a9/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc", size = 21588, upload-time = "2025-09-27T18:36:48.82Z" }, + { url = "https://files.pythonhosted.org/packages/b5/99/16a5eb2d140087ebd97180d95249b00a03aa87e29cc224056274f2e45fd6/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12", size = 23041, upload-time = "2025-09-27T18:36:49.797Z" }, + { url = "https://files.pythonhosted.org/packages/19/bc/e7140ed90c5d61d77cea142eed9f9c303f4c4806f60a1044c13e3f1471d0/markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed", size = 14543, upload-time = "2025-09-27T18:36:51.584Z" }, + { url = "https://files.pythonhosted.org/packages/05/73/c4abe620b841b6b791f2edc248f556900667a5a1cf023a6646967ae98335/markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5", size = 15113, upload-time = "2025-09-27T18:36:52.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/3a/fa34a0f7cfef23cf9500d68cb7c32dd64ffd58a12b09225fb03dd37d5b80/markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485", size = 13911, upload-time = "2025-09-27T18:36:53.513Z" }, + { url = "https://files.pythonhosted.org/packages/e4/d7/e05cd7efe43a88a17a37b3ae96e79a19e846f3f456fe79c57ca61356ef01/markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73", size = 11658, upload-time = "2025-09-27T18:36:54.819Z" }, + { url = "https://files.pythonhosted.org/packages/99/9e/e412117548182ce2148bdeacdda3bb494260c0b0184360fe0d56389b523b/markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37", size = 12066, upload-time = "2025-09-27T18:36:55.714Z" }, + { url = "https://files.pythonhosted.org/packages/bc/e6/fa0ffcda717ef64a5108eaa7b4f5ed28d56122c9a6d70ab8b72f9f715c80/markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19", size = 25639, upload-time = "2025-09-27T18:36:56.908Z" }, + { url = "https://files.pythonhosted.org/packages/96/ec/2102e881fe9d25fc16cb4b25d5f5cde50970967ffa5dddafdb771237062d/markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025", size = 23569, upload-time = "2025-09-27T18:36:57.913Z" }, + { url = "https://files.pythonhosted.org/packages/4b/30/6f2fce1f1f205fc9323255b216ca8a235b15860c34b6798f810f05828e32/markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6", size = 23284, upload-time = "2025-09-27T18:36:58.833Z" }, + { url = "https://files.pythonhosted.org/packages/58/47/4a0ccea4ab9f5dcb6f79c0236d954acb382202721e704223a8aafa38b5c8/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f", size = 24801, upload-time = "2025-09-27T18:36:59.739Z" }, + { url = "https://files.pythonhosted.org/packages/6a/70/3780e9b72180b6fecb83a4814d84c3bf4b4ae4bf0b19c27196104149734c/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb", size = 22769, upload-time = "2025-09-27T18:37:00.719Z" }, + { url = "https://files.pythonhosted.org/packages/98/c5/c03c7f4125180fc215220c035beac6b9cb684bc7a067c84fc69414d315f5/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009", size = 23642, upload-time = "2025-09-27T18:37:01.673Z" }, + { url = "https://files.pythonhosted.org/packages/80/d6/2d1b89f6ca4bff1036499b1e29a1d02d282259f3681540e16563f27ebc23/markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354", size = 14612, upload-time = "2025-09-27T18:37:02.639Z" }, + { url = "https://files.pythonhosted.org/packages/2b/98/e48a4bfba0a0ffcf9925fe2d69240bfaa19c6f7507b8cd09c70684a53c1e/markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218", size = 15200, upload-time = "2025-09-27T18:37:03.582Z" }, + { url = "https://files.pythonhosted.org/packages/0e/72/e3cc540f351f316e9ed0f092757459afbc595824ca724cbc5a5d4263713f/markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287", size = 13973, upload-time = "2025-09-27T18:37:04.929Z" }, + { url = "https://files.pythonhosted.org/packages/33/8a/8e42d4838cd89b7dde187011e97fe6c3af66d8c044997d2183fbd6d31352/markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe", size = 11619, upload-time = "2025-09-27T18:37:06.342Z" }, + { url = "https://files.pythonhosted.org/packages/b5/64/7660f8a4a8e53c924d0fa05dc3a55c9cee10bbd82b11c5afb27d44b096ce/markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026", size = 12029, upload-time = "2025-09-27T18:37:07.213Z" }, + { url = "https://files.pythonhosted.org/packages/da/ef/e648bfd021127bef5fa12e1720ffed0c6cbb8310c8d9bea7266337ff06de/markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737", size = 24408, upload-time = "2025-09-27T18:37:09.572Z" }, + { url = "https://files.pythonhosted.org/packages/41/3c/a36c2450754618e62008bf7435ccb0f88053e07592e6028a34776213d877/markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97", size = 23005, upload-time = "2025-09-27T18:37:10.58Z" }, + { url = "https://files.pythonhosted.org/packages/bc/20/b7fdf89a8456b099837cd1dc21974632a02a999ec9bf7ca3e490aacd98e7/markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d", size = 22048, upload-time = "2025-09-27T18:37:11.547Z" }, + { url = "https://files.pythonhosted.org/packages/9a/a7/591f592afdc734f47db08a75793a55d7fbcc6902a723ae4cfbab61010cc5/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda", size = 23821, upload-time = "2025-09-27T18:37:12.48Z" }, + { url = "https://files.pythonhosted.org/packages/7d/33/45b24e4f44195b26521bc6f1a82197118f74df348556594bd2262bda1038/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf", size = 21606, upload-time = "2025-09-27T18:37:13.485Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0e/53dfaca23a69fbfbbf17a4b64072090e70717344c52eaaaa9c5ddff1e5f0/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe", size = 23043, upload-time = "2025-09-27T18:37:14.408Z" }, + { url = "https://files.pythonhosted.org/packages/46/11/f333a06fc16236d5238bfe74daccbca41459dcd8d1fa952e8fbd5dccfb70/markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9", size = 14747, upload-time = "2025-09-27T18:37:15.36Z" }, + { url = "https://files.pythonhosted.org/packages/28/52/182836104b33b444e400b14f797212f720cbc9ed6ba34c800639d154e821/markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581", size = 15341, upload-time = "2025-09-27T18:37:16.496Z" }, + { url = "https://files.pythonhosted.org/packages/6f/18/acf23e91bd94fd7b3031558b1f013adfa21a8e407a3fdb32745538730382/markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4", size = 14073, upload-time = "2025-09-27T18:37:17.476Z" }, + { url = "https://files.pythonhosted.org/packages/3c/f0/57689aa4076e1b43b15fdfa646b04653969d50cf30c32a102762be2485da/markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab", size = 11661, upload-time = "2025-09-27T18:37:18.453Z" }, + { url = "https://files.pythonhosted.org/packages/89/c3/2e67a7ca217c6912985ec766c6393b636fb0c2344443ff9d91404dc4c79f/markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175", size = 12069, upload-time = "2025-09-27T18:37:19.332Z" }, + { url = "https://files.pythonhosted.org/packages/f0/00/be561dce4e6ca66b15276e184ce4b8aec61fe83662cce2f7d72bd3249d28/markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634", size = 25670, upload-time = "2025-09-27T18:37:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/50/09/c419f6f5a92e5fadde27efd190eca90f05e1261b10dbd8cbcb39cd8ea1dc/markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50", size = 23598, upload-time = "2025-09-27T18:37:21.177Z" }, + { url = "https://files.pythonhosted.org/packages/22/44/a0681611106e0b2921b3033fc19bc53323e0b50bc70cffdd19f7d679bb66/markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e", size = 23261, upload-time = "2025-09-27T18:37:22.167Z" }, + { url = "https://files.pythonhosted.org/packages/5f/57/1b0b3f100259dc9fffe780cfb60d4be71375510e435efec3d116b6436d43/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5", size = 24835, upload-time = "2025-09-27T18:37:23.296Z" }, + { url = "https://files.pythonhosted.org/packages/26/6a/4bf6d0c97c4920f1597cc14dd720705eca0bf7c787aebc6bb4d1bead5388/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523", size = 22733, upload-time = "2025-09-27T18:37:24.237Z" }, + { url = "https://files.pythonhosted.org/packages/14/c7/ca723101509b518797fedc2fdf79ba57f886b4aca8a7d31857ba3ee8281f/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc", size = 23672, upload-time = "2025-09-27T18:37:25.271Z" }, + { url = "https://files.pythonhosted.org/packages/fb/df/5bd7a48c256faecd1d36edc13133e51397e41b73bb77e1a69deab746ebac/markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d", size = 14819, upload-time = "2025-09-27T18:37:26.285Z" }, + { url = "https://files.pythonhosted.org/packages/1a/8a/0402ba61a2f16038b48b39bccca271134be00c5c9f0f623208399333c448/markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9", size = 15426, upload-time = "2025-09-27T18:37:27.316Z" }, + { url = "https://files.pythonhosted.org/packages/70/bc/6f1c2f612465f5fa89b95bead1f44dcb607670fd42891d8fdcd5d039f4f4/markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa", size = 14146, upload-time = "2025-09-27T18:37:28.327Z" }, +] + +[[package]] +name = "mcp" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "httpx" }, + { name = "httpx-sse" }, + { name = "jsonschema" }, + { name = "pydantic" }, + { name = "pydantic-settings" }, + { name = "python-multipart" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "sse-starlette" }, + { name = "starlette" }, + { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/79/5724a540df19e192e8606c543cdcf162de8eb435077520cca150f7365ec0/mcp-1.17.0.tar.gz", hash = "sha256:1b57fabf3203240ccc48e39859faf3ae1ccb0b571ff798bbedae800c73c6df90", size = 477951, upload-time = "2025-10-10T12:16:44.519Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/72/3751feae343a5ad07959df713907b5c3fbaed269d697a14b0c449080cf2e/mcp-1.17.0-py3-none-any.whl", hash = "sha256:0660ef275cada7a545af154db3082f176cf1d2681d5e35ae63e014faf0a35d40", size = 167737, upload-time = "2025-10-10T12:16:42.863Z" }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, +] + +[[package]] +name = "moto" +version = "5.1.14" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "boto3" }, + { name = "botocore" }, + { name = "cryptography" }, + { name = "jinja2" }, + { name = "python-dateutil" }, + { name = "requests" }, + { name = "responses" }, + { name = "werkzeug" }, + { name = "xmltodict" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/d9/ec94955a1b14ef45ccbda81f2256b30bf1f21ae5c5739fca14130bb1f048/moto-5.1.14.tar.gz", hash = "sha256:450690abb0b152fea7f93e497ac2172f15d8a838b15f22b514db801a6b857ae4", size = 7264025, upload-time = "2025-10-05T13:32:38.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/a0/4c5955187853536c7d337709074a5f3ef391654a32a3379096b2d16bfd9b/moto-5.1.14-py3-none-any.whl", hash = "sha256:b9767848953beaf6650f1fd91615a3bcef84d93bd00603fa64dae38c656548e8", size = 5384022, upload-time = "2025-10-05T13:32:35.763Z" }, +] + +[[package]] +name = "mpmath" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" }, +] + +[[package]] +name = "multidict" +version = "6.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/80/1e/5492c365f222f907de1039b91f922b93fa4f764c713ee858d235495d8f50/multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5", size = 101834, upload-time = "2025-10-06T14:52:30.657Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/86/33272a544eeb36d66e4d9a920602d1a2f57d4ebea4ef3cdfe5a912574c95/multidict-6.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bee7c0588aa0076ce77c0ea5d19a68d76ad81fcd9fe8501003b9a24f9d4000f6", size = 76135, upload-time = "2025-10-06T14:49:54.26Z" }, + { url = "https://files.pythonhosted.org/packages/91/1c/eb97db117a1ebe46d457a3d235a7b9d2e6dcab174f42d1b67663dd9e5371/multidict-6.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7ef6b61cad77091056ce0e7ce69814ef72afacb150b7ac6a3e9470def2198159", size = 45117, upload-time = "2025-10-06T14:49:55.82Z" }, + { url = "https://files.pythonhosted.org/packages/f1/d8/6c3442322e41fb1dd4de8bd67bfd11cd72352ac131f6368315617de752f1/multidict-6.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c0359b1ec12b1d6849c59f9d319610b7f20ef990a6d454ab151aa0e3b9f78ca", size = 43472, upload-time = "2025-10-06T14:49:57.048Z" }, + { url = "https://files.pythonhosted.org/packages/75/3f/e2639e80325af0b6c6febdf8e57cc07043ff15f57fa1ef808f4ccb5ac4cd/multidict-6.7.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cd240939f71c64bd658f186330603aac1a9a81bf6273f523fca63673cb7378a8", size = 249342, upload-time = "2025-10-06T14:49:58.368Z" }, + { url = "https://files.pythonhosted.org/packages/5d/cc/84e0585f805cbeaa9cbdaa95f9a3d6aed745b9d25700623ac89a6ecff400/multidict-6.7.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a60a4d75718a5efa473ebd5ab685786ba0c67b8381f781d1be14da49f1a2dc60", size = 257082, upload-time = "2025-10-06T14:49:59.89Z" }, + { url = "https://files.pythonhosted.org/packages/b0/9c/ac851c107c92289acbbf5cfb485694084690c1b17e555f44952c26ddc5bd/multidict-6.7.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53a42d364f323275126aff81fb67c5ca1b7a04fda0546245730a55c8c5f24bc4", size = 240704, upload-time = "2025-10-06T14:50:01.485Z" }, + { url = "https://files.pythonhosted.org/packages/50/cc/5f93e99427248c09da95b62d64b25748a5f5c98c7c2ab09825a1d6af0e15/multidict-6.7.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3b29b980d0ddbecb736735ee5bef69bb2ddca56eff603c86f3f29a1128299b4f", size = 266355, upload-time = "2025-10-06T14:50:02.955Z" }, + { url = "https://files.pythonhosted.org/packages/ec/0c/2ec1d883ceb79c6f7f6d7ad90c919c898f5d1c6ea96d322751420211e072/multidict-6.7.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f8a93b1c0ed2d04b97a5e9336fd2d33371b9a6e29ab7dd6503d63407c20ffbaf", size = 267259, upload-time = "2025-10-06T14:50:04.446Z" }, + { url = "https://files.pythonhosted.org/packages/c6/2d/f0b184fa88d6630aa267680bdb8623fb69cb0d024b8c6f0d23f9a0f406d3/multidict-6.7.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ff96e8815eecacc6645da76c413eb3b3d34cfca256c70b16b286a687d013c32", size = 254903, upload-time = "2025-10-06T14:50:05.98Z" }, + { url = "https://files.pythonhosted.org/packages/06/c9/11ea263ad0df7dfabcad404feb3c0dd40b131bc7f232d5537f2fb1356951/multidict-6.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7516c579652f6a6be0e266aec0acd0db80829ca305c3d771ed898538804c2036", size = 252365, upload-time = "2025-10-06T14:50:07.511Z" }, + { url = "https://files.pythonhosted.org/packages/41/88/d714b86ee2c17d6e09850c70c9d310abac3d808ab49dfa16b43aba9d53fd/multidict-6.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:040f393368e63fb0f3330e70c26bfd336656bed925e5cbe17c9da839a6ab13ec", size = 250062, upload-time = "2025-10-06T14:50:09.074Z" }, + { url = "https://files.pythonhosted.org/packages/15/fe/ad407bb9e818c2b31383f6131ca19ea7e35ce93cf1310fce69f12e89de75/multidict-6.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b3bc26a951007b1057a1c543af845f1c7e3e71cc240ed1ace7bf4484aa99196e", size = 249683, upload-time = "2025-10-06T14:50:10.714Z" }, + { url = "https://files.pythonhosted.org/packages/8c/a4/a89abdb0229e533fb925e7c6e5c40201c2873efebc9abaf14046a4536ee6/multidict-6.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7b022717c748dd1992a83e219587aabe45980d88969f01b316e78683e6285f64", size = 261254, upload-time = "2025-10-06T14:50:12.28Z" }, + { url = "https://files.pythonhosted.org/packages/8d/aa/0e2b27bd88b40a4fb8dc53dd74eecac70edaa4c1dd0707eb2164da3675b3/multidict-6.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:9600082733859f00d79dee64effc7aef1beb26adb297416a4ad2116fd61374bd", size = 257967, upload-time = "2025-10-06T14:50:14.16Z" }, + { url = "https://files.pythonhosted.org/packages/d0/8e/0c67b7120d5d5f6d874ed85a085f9dc770a7f9d8813e80f44a9fec820bb7/multidict-6.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:94218fcec4d72bc61df51c198d098ce2b378e0ccbac41ddbed5ef44092913288", size = 250085, upload-time = "2025-10-06T14:50:15.639Z" }, + { url = "https://files.pythonhosted.org/packages/ba/55/b73e1d624ea4b8fd4dd07a3bb70f6e4c7c6c5d9d640a41c6ffe5cdbd2a55/multidict-6.7.0-cp313-cp313-win32.whl", hash = "sha256:a37bd74c3fa9d00be2d7b8eca074dc56bd8077ddd2917a839bd989612671ed17", size = 41713, upload-time = "2025-10-06T14:50:17.066Z" }, + { url = "https://files.pythonhosted.org/packages/32/31/75c59e7d3b4205075b4c183fa4ca398a2daf2303ddf616b04ae6ef55cffe/multidict-6.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:30d193c6cc6d559db42b6bcec8a5d395d34d60c9877a0b71ecd7c204fcf15390", size = 45915, upload-time = "2025-10-06T14:50:18.264Z" }, + { url = "https://files.pythonhosted.org/packages/31/2a/8987831e811f1184c22bc2e45844934385363ee61c0a2dcfa8f71b87e608/multidict-6.7.0-cp313-cp313-win_arm64.whl", hash = "sha256:ea3334cabe4d41b7ccd01e4d349828678794edbc2d3ae97fc162a3312095092e", size = 43077, upload-time = "2025-10-06T14:50:19.853Z" }, + { url = "https://files.pythonhosted.org/packages/e8/68/7b3a5170a382a340147337b300b9eb25a9ddb573bcdfff19c0fa3f31ffba/multidict-6.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:ad9ce259f50abd98a1ca0aa6e490b58c316a0fce0617f609723e40804add2c00", size = 83114, upload-time = "2025-10-06T14:50:21.223Z" }, + { url = "https://files.pythonhosted.org/packages/55/5c/3fa2d07c84df4e302060f555bbf539310980362236ad49f50eeb0a1c1eb9/multidict-6.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07f5594ac6d084cbb5de2df218d78baf55ef150b91f0ff8a21cc7a2e3a5a58eb", size = 48442, upload-time = "2025-10-06T14:50:22.871Z" }, + { url = "https://files.pythonhosted.org/packages/fc/56/67212d33239797f9bd91962bb899d72bb0f4c35a8652dcdb8ed049bef878/multidict-6.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0591b48acf279821a579282444814a2d8d0af624ae0bc600aa4d1b920b6e924b", size = 46885, upload-time = "2025-10-06T14:50:24.258Z" }, + { url = "https://files.pythonhosted.org/packages/46/d1/908f896224290350721597a61a69cd19b89ad8ee0ae1f38b3f5cd12ea2ac/multidict-6.7.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:749a72584761531d2b9467cfbdfd29487ee21124c304c4b6cb760d8777b27f9c", size = 242588, upload-time = "2025-10-06T14:50:25.716Z" }, + { url = "https://files.pythonhosted.org/packages/ab/67/8604288bbd68680eee0ab568fdcb56171d8b23a01bcd5cb0c8fedf6e5d99/multidict-6.7.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b4c3d199f953acd5b446bf7c0de1fe25d94e09e79086f8dc2f48a11a129cdf1", size = 249966, upload-time = "2025-10-06T14:50:28.192Z" }, + { url = "https://files.pythonhosted.org/packages/20/33/9228d76339f1ba51e3efef7da3ebd91964d3006217aae13211653193c3ff/multidict-6.7.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9fb0211dfc3b51efea2f349ec92c114d7754dd62c01f81c3e32b765b70c45c9b", size = 228618, upload-time = "2025-10-06T14:50:29.82Z" }, + { url = "https://files.pythonhosted.org/packages/f8/2d/25d9b566d10cab1c42b3b9e5b11ef79c9111eaf4463b8c257a3bd89e0ead/multidict-6.7.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a027ec240fe73a8d6281872690b988eed307cd7d91b23998ff35ff577ca688b5", size = 257539, upload-time = "2025-10-06T14:50:31.731Z" }, + { url = "https://files.pythonhosted.org/packages/b6/b1/8d1a965e6637fc33de3c0d8f414485c2b7e4af00f42cab3d84e7b955c222/multidict-6.7.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1d964afecdf3a8288789df2f5751dc0a8261138c3768d9af117ed384e538fad", size = 256345, upload-time = "2025-10-06T14:50:33.26Z" }, + { url = "https://files.pythonhosted.org/packages/ba/0c/06b5a8adbdeedada6f4fb8d8f193d44a347223b11939b42953eeb6530b6b/multidict-6.7.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:caf53b15b1b7df9fbd0709aa01409000a2b4dd03a5f6f5cc548183c7c8f8b63c", size = 247934, upload-time = "2025-10-06T14:50:34.808Z" }, + { url = "https://files.pythonhosted.org/packages/8f/31/b2491b5fe167ca044c6eb4b8f2c9f3b8a00b24c432c365358eadac5d7625/multidict-6.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:654030da3197d927f05a536a66186070e98765aa5142794c9904555d3a9d8fb5", size = 245243, upload-time = "2025-10-06T14:50:36.436Z" }, + { url = "https://files.pythonhosted.org/packages/61/1a/982913957cb90406c8c94f53001abd9eafc271cb3e70ff6371590bec478e/multidict-6.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:2090d3718829d1e484706a2f525e50c892237b2bf9b17a79b059cb98cddc2f10", size = 235878, upload-time = "2025-10-06T14:50:37.953Z" }, + { url = "https://files.pythonhosted.org/packages/be/c0/21435d804c1a1cf7a2608593f4d19bca5bcbd7a81a70b253fdd1c12af9c0/multidict-6.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2d2cfeec3f6f45651b3d408c4acec0ebf3daa9bc8a112a084206f5db5d05b754", size = 243452, upload-time = "2025-10-06T14:50:39.574Z" }, + { url = "https://files.pythonhosted.org/packages/54/0a/4349d540d4a883863191be6eb9a928846d4ec0ea007d3dcd36323bb058ac/multidict-6.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:4ef089f985b8c194d341eb2c24ae6e7408c9a0e2e5658699c92f497437d88c3c", size = 252312, upload-time = "2025-10-06T14:50:41.612Z" }, + { url = "https://files.pythonhosted.org/packages/26/64/d5416038dbda1488daf16b676e4dbfd9674dde10a0cc8f4fc2b502d8125d/multidict-6.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e93a0617cd16998784bf4414c7e40f17a35d2350e5c6f0bd900d3a8e02bd3762", size = 246935, upload-time = "2025-10-06T14:50:43.972Z" }, + { url = "https://files.pythonhosted.org/packages/9f/8c/8290c50d14e49f35e0bd4abc25e1bc7711149ca9588ab7d04f886cdf03d9/multidict-6.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f0feece2ef8ebc42ed9e2e8c78fc4aa3cf455733b507c09ef7406364c94376c6", size = 243385, upload-time = "2025-10-06T14:50:45.648Z" }, + { url = "https://files.pythonhosted.org/packages/ef/a0/f83ae75e42d694b3fbad3e047670e511c138be747bc713cf1b10d5096416/multidict-6.7.0-cp313-cp313t-win32.whl", hash = "sha256:19a1d55338ec1be74ef62440ca9e04a2f001a04d0cc49a4983dc320ff0f3212d", size = 47777, upload-time = "2025-10-06T14:50:47.154Z" }, + { url = "https://files.pythonhosted.org/packages/dc/80/9b174a92814a3830b7357307a792300f42c9e94664b01dee8e457551fa66/multidict-6.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3da4fb467498df97e986af166b12d01f05d2e04f978a9c1c680ea1988e0bc4b6", size = 53104, upload-time = "2025-10-06T14:50:48.851Z" }, + { url = "https://files.pythonhosted.org/packages/cc/28/04baeaf0428d95bb7a7bea0e691ba2f31394338ba424fb0679a9ed0f4c09/multidict-6.7.0-cp313-cp313t-win_arm64.whl", hash = "sha256:b4121773c49a0776461f4a904cdf6264c88e42218aaa8407e803ca8025872792", size = 45503, upload-time = "2025-10-06T14:50:50.16Z" }, + { url = "https://files.pythonhosted.org/packages/e2/b1/3da6934455dd4b261d4c72f897e3a5728eba81db59959f3a639245891baa/multidict-6.7.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3bab1e4aff7adaa34410f93b1f8e57c4b36b9af0426a76003f441ee1d3c7e842", size = 75128, upload-time = "2025-10-06T14:50:51.92Z" }, + { url = "https://files.pythonhosted.org/packages/14/2c/f069cab5b51d175a1a2cb4ccdf7a2c2dabd58aa5bd933fa036a8d15e2404/multidict-6.7.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b8512bac933afc3e45fb2b18da8e59b78d4f408399a960339598374d4ae3b56b", size = 44410, upload-time = "2025-10-06T14:50:53.275Z" }, + { url = "https://files.pythonhosted.org/packages/42/e2/64bb41266427af6642b6b128e8774ed84c11b80a90702c13ac0a86bb10cc/multidict-6.7.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:79dcf9e477bc65414ebfea98ffd013cb39552b5ecd62908752e0e413d6d06e38", size = 43205, upload-time = "2025-10-06T14:50:54.911Z" }, + { url = "https://files.pythonhosted.org/packages/02/68/6b086fef8a3f1a8541b9236c594f0c9245617c29841f2e0395d979485cde/multidict-6.7.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:31bae522710064b5cbeddaf2e9f32b1abab70ac6ac91d42572502299e9953128", size = 245084, upload-time = "2025-10-06T14:50:56.369Z" }, + { url = "https://files.pythonhosted.org/packages/15/ee/f524093232007cd7a75c1d132df70f235cfd590a7c9eaccd7ff422ef4ae8/multidict-6.7.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a0df7ff02397bb63e2fd22af2c87dfa39e8c7f12947bc524dbdc528282c7e34", size = 252667, upload-time = "2025-10-06T14:50:57.991Z" }, + { url = "https://files.pythonhosted.org/packages/02/a5/eeb3f43ab45878f1895118c3ef157a480db58ede3f248e29b5354139c2c9/multidict-6.7.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7a0222514e8e4c514660e182d5156a415c13ef0aabbd71682fc714e327b95e99", size = 233590, upload-time = "2025-10-06T14:50:59.589Z" }, + { url = "https://files.pythonhosted.org/packages/6a/1e/76d02f8270b97269d7e3dbd45644b1785bda457b474315f8cf999525a193/multidict-6.7.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2397ab4daaf2698eb51a76721e98db21ce4f52339e535725de03ea962b5a3202", size = 264112, upload-time = "2025-10-06T14:51:01.183Z" }, + { url = "https://files.pythonhosted.org/packages/76/0b/c28a70ecb58963847c2a8efe334904cd254812b10e535aefb3bcce513918/multidict-6.7.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8891681594162635948a636c9fe0ff21746aeb3dd5463f6e25d9bea3a8a39ca1", size = 261194, upload-time = "2025-10-06T14:51:02.794Z" }, + { url = "https://files.pythonhosted.org/packages/b4/63/2ab26e4209773223159b83aa32721b4021ffb08102f8ac7d689c943fded1/multidict-6.7.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18706cc31dbf402a7945916dd5cddf160251b6dab8a2c5f3d6d5a55949f676b3", size = 248510, upload-time = "2025-10-06T14:51:04.724Z" }, + { url = "https://files.pythonhosted.org/packages/93/cd/06c1fa8282af1d1c46fd55c10a7930af652afdce43999501d4d68664170c/multidict-6.7.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f844a1bbf1d207dd311a56f383f7eda2d0e134921d45751842d8235e7778965d", size = 248395, upload-time = "2025-10-06T14:51:06.306Z" }, + { url = "https://files.pythonhosted.org/packages/99/ac/82cb419dd6b04ccf9e7e61befc00c77614fc8134362488b553402ecd55ce/multidict-6.7.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:d4393e3581e84e5645506923816b9cc81f5609a778c7e7534054091acc64d1c6", size = 239520, upload-time = "2025-10-06T14:51:08.091Z" }, + { url = "https://files.pythonhosted.org/packages/fa/f3/a0f9bf09493421bd8716a362e0cd1d244f5a6550f5beffdd6b47e885b331/multidict-6.7.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:fbd18dc82d7bf274b37aa48d664534330af744e03bccf696d6f4c6042e7d19e7", size = 245479, upload-time = "2025-10-06T14:51:10.365Z" }, + { url = "https://files.pythonhosted.org/packages/8d/01/476d38fc73a212843f43c852b0eee266b6971f0e28329c2184a8df90c376/multidict-6.7.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:b6234e14f9314731ec45c42fc4554b88133ad53a09092cc48a88e771c125dadb", size = 258903, upload-time = "2025-10-06T14:51:12.466Z" }, + { url = "https://files.pythonhosted.org/packages/49/6d/23faeb0868adba613b817d0e69c5f15531b24d462af8012c4f6de4fa8dc3/multidict-6.7.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:08d4379f9744d8f78d98c8673c06e202ffa88296f009c71bbafe8a6bf847d01f", size = 252333, upload-time = "2025-10-06T14:51:14.48Z" }, + { url = "https://files.pythonhosted.org/packages/1e/cc/48d02ac22b30fa247f7dad82866e4b1015431092f4ba6ebc7e77596e0b18/multidict-6.7.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9fe04da3f79387f450fd0061d4dd2e45a72749d31bf634aecc9e27f24fdc4b3f", size = 243411, upload-time = "2025-10-06T14:51:16.072Z" }, + { url = "https://files.pythonhosted.org/packages/4a/03/29a8bf5a18abf1fe34535c88adbdfa88c9fb869b5a3b120692c64abe8284/multidict-6.7.0-cp314-cp314-win32.whl", hash = "sha256:fbafe31d191dfa7c4c51f7a6149c9fb7e914dcf9ffead27dcfd9f1ae382b3885", size = 40940, upload-time = "2025-10-06T14:51:17.544Z" }, + { url = "https://files.pythonhosted.org/packages/82/16/7ed27b680791b939de138f906d5cf2b4657b0d45ca6f5dd6236fdddafb1a/multidict-6.7.0-cp314-cp314-win_amd64.whl", hash = "sha256:2f67396ec0310764b9222a1728ced1ab638f61aadc6226f17a71dd9324f9a99c", size = 45087, upload-time = "2025-10-06T14:51:18.875Z" }, + { url = "https://files.pythonhosted.org/packages/cd/3c/e3e62eb35a1950292fe39315d3c89941e30a9d07d5d2df42965ab041da43/multidict-6.7.0-cp314-cp314-win_arm64.whl", hash = "sha256:ba672b26069957ee369cfa7fc180dde1fc6f176eaf1e6beaf61fbebbd3d9c000", size = 42368, upload-time = "2025-10-06T14:51:20.225Z" }, + { url = "https://files.pythonhosted.org/packages/8b/40/cd499bd0dbc5f1136726db3153042a735fffd0d77268e2ee20d5f33c010f/multidict-6.7.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:c1dcc7524066fa918c6a27d61444d4ee7900ec635779058571f70d042d86ed63", size = 82326, upload-time = "2025-10-06T14:51:21.588Z" }, + { url = "https://files.pythonhosted.org/packages/13/8a/18e031eca251c8df76daf0288e6790561806e439f5ce99a170b4af30676b/multidict-6.7.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:27e0b36c2d388dc7b6ced3406671b401e84ad7eb0656b8f3a2f46ed0ce483718", size = 48065, upload-time = "2025-10-06T14:51:22.93Z" }, + { url = "https://files.pythonhosted.org/packages/40/71/5e6701277470a87d234e433fb0a3a7deaf3bcd92566e421e7ae9776319de/multidict-6.7.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2a7baa46a22e77f0988e3b23d4ede5513ebec1929e34ee9495be535662c0dfe2", size = 46475, upload-time = "2025-10-06T14:51:24.352Z" }, + { url = "https://files.pythonhosted.org/packages/fe/6a/bab00cbab6d9cfb57afe1663318f72ec28289ea03fd4e8236bb78429893a/multidict-6.7.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7bf77f54997a9166a2f5675d1201520586439424c2511723a7312bdb4bcc034e", size = 239324, upload-time = "2025-10-06T14:51:25.822Z" }, + { url = "https://files.pythonhosted.org/packages/2a/5f/8de95f629fc22a7769ade8b41028e3e5a822c1f8904f618d175945a81ad3/multidict-6.7.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e011555abada53f1578d63389610ac8a5400fc70ce71156b0aa30d326f1a5064", size = 246877, upload-time = "2025-10-06T14:51:27.604Z" }, + { url = "https://files.pythonhosted.org/packages/23/b4/38881a960458f25b89e9f4a4fdcb02ac101cfa710190db6e5528841e67de/multidict-6.7.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:28b37063541b897fd6a318007373930a75ca6d6ac7c940dbe14731ffdd8d498e", size = 225824, upload-time = "2025-10-06T14:51:29.664Z" }, + { url = "https://files.pythonhosted.org/packages/1e/39/6566210c83f8a261575f18e7144736059f0c460b362e96e9cf797a24b8e7/multidict-6.7.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:05047ada7a2fde2631a0ed706f1fd68b169a681dfe5e4cf0f8e4cb6618bbc2cd", size = 253558, upload-time = "2025-10-06T14:51:31.684Z" }, + { url = "https://files.pythonhosted.org/packages/00/a3/67f18315100f64c269f46e6c0319fa87ba68f0f64f2b8e7fd7c72b913a0b/multidict-6.7.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:716133f7d1d946a4e1b91b1756b23c088881e70ff180c24e864c26192ad7534a", size = 252339, upload-time = "2025-10-06T14:51:33.699Z" }, + { url = "https://files.pythonhosted.org/packages/c8/2a/1cb77266afee2458d82f50da41beba02159b1d6b1f7973afc9a1cad1499b/multidict-6.7.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d1bed1b467ef657f2a0ae62844a607909ef1c6889562de5e1d505f74457d0b96", size = 244895, upload-time = "2025-10-06T14:51:36.189Z" }, + { url = "https://files.pythonhosted.org/packages/dd/72/09fa7dd487f119b2eb9524946ddd36e2067c08510576d43ff68469563b3b/multidict-6.7.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ca43bdfa5d37bd6aee89d85e1d0831fb86e25541be7e9d376ead1b28974f8e5e", size = 241862, upload-time = "2025-10-06T14:51:41.291Z" }, + { url = "https://files.pythonhosted.org/packages/65/92/bc1f8bd0853d8669300f732c801974dfc3702c3eeadae2f60cef54dc69d7/multidict-6.7.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:44b546bd3eb645fd26fb949e43c02a25a2e632e2ca21a35e2e132c8105dc8599", size = 232376, upload-time = "2025-10-06T14:51:43.55Z" }, + { url = "https://files.pythonhosted.org/packages/09/86/ac39399e5cb9d0c2ac8ef6e10a768e4d3bc933ac808d49c41f9dc23337eb/multidict-6.7.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a6ef16328011d3f468e7ebc326f24c1445f001ca1dec335b2f8e66bed3006394", size = 240272, upload-time = "2025-10-06T14:51:45.265Z" }, + { url = "https://files.pythonhosted.org/packages/3d/b6/fed5ac6b8563ec72df6cb1ea8dac6d17f0a4a1f65045f66b6d3bf1497c02/multidict-6.7.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:5aa873cbc8e593d361ae65c68f85faadd755c3295ea2c12040ee146802f23b38", size = 248774, upload-time = "2025-10-06T14:51:46.836Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8d/b954d8c0dc132b68f760aefd45870978deec6818897389dace00fcde32ff/multidict-6.7.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:3d7b6ccce016e29df4b7ca819659f516f0bc7a4b3efa3bb2012ba06431b044f9", size = 242731, upload-time = "2025-10-06T14:51:48.541Z" }, + { url = "https://files.pythonhosted.org/packages/16/9d/a2dac7009125d3540c2f54e194829ea18ac53716c61b655d8ed300120b0f/multidict-6.7.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:171b73bd4ee683d307599b66793ac80981b06f069b62eea1c9e29c9241aa66b0", size = 240193, upload-time = "2025-10-06T14:51:50.355Z" }, + { url = "https://files.pythonhosted.org/packages/39/ca/c05f144128ea232ae2178b008d5011d4e2cea86e4ee8c85c2631b1b94802/multidict-6.7.0-cp314-cp314t-win32.whl", hash = "sha256:b2d7f80c4e1fd010b07cb26820aae86b7e73b681ee4889684fb8d2d4537aab13", size = 48023, upload-time = "2025-10-06T14:51:51.883Z" }, + { url = "https://files.pythonhosted.org/packages/ba/8f/0a60e501584145588be1af5cc829265701ba3c35a64aec8e07cbb71d39bb/multidict-6.7.0-cp314-cp314t-win_amd64.whl", hash = "sha256:09929cab6fcb68122776d575e03c6cc64ee0b8fca48d17e135474b042ce515cd", size = 53507, upload-time = "2025-10-06T14:51:53.672Z" }, + { url = "https://files.pythonhosted.org/packages/7f/ae/3148b988a9c6239903e786eac19c889fab607c31d6efa7fb2147e5680f23/multidict-6.7.0-cp314-cp314t-win_arm64.whl", hash = "sha256:cc41db090ed742f32bd2d2c721861725e6109681eddf835d0a82bd3a5c382827", size = 44804, upload-time = "2025-10-06T14:51:55.415Z" }, + { url = "https://files.pythonhosted.org/packages/b7/da/7d22601b625e241d4f23ef1ebff8acfc60da633c9e7e7922e24d10f592b3/multidict-6.7.0-py3-none-any.whl", hash = "sha256:394fc5c42a333c9ffc3e421a4c85e08580d990e08b99f6bf35b4132114c5dcb3", size = 12317, upload-time = "2025-10-06T14:52:29.272Z" }, +] + +[[package]] +name = "mypy" +version = "1.18.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "pathspec" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/77/8f0d0001ffad290cef2f7f216f96c814866248a0b92a722365ed54648e7e/mypy-1.18.2.tar.gz", hash = "sha256:06a398102a5f203d7477b2923dda3634c36727fa5c237d8f859ef90c42a9924b", size = 3448846, upload-time = "2025-09-19T00:11:10.519Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/04/7f462e6fbba87a72bc8097b93f6842499c428a6ff0c81dd46948d175afe8/mypy-1.18.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:07b8b0f580ca6d289e69209ec9d3911b4a26e5abfde32228a288eb79df129fcc", size = 12898728, upload-time = "2025-09-19T00:10:01.33Z" }, + { url = "https://files.pythonhosted.org/packages/99/5b/61ed4efb64f1871b41fd0b82d29a64640f3516078f6c7905b68ab1ad8b13/mypy-1.18.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ed4482847168439651d3feee5833ccedbf6657e964572706a2adb1f7fa4dfe2e", size = 11910758, upload-time = "2025-09-19T00:10:42.607Z" }, + { url = "https://files.pythonhosted.org/packages/3c/46/d297d4b683cc89a6e4108c4250a6a6b717f5fa96e1a30a7944a6da44da35/mypy-1.18.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3ad2afadd1e9fea5cf99a45a822346971ede8685cc581ed9cd4d42eaf940986", size = 12475342, upload-time = "2025-09-19T00:11:00.371Z" }, + { url = "https://files.pythonhosted.org/packages/83/45/4798f4d00df13eae3bfdf726c9244bcb495ab5bd588c0eed93a2f2dd67f3/mypy-1.18.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a431a6f1ef14cf8c144c6b14793a23ec4eae3db28277c358136e79d7d062f62d", size = 13338709, upload-time = "2025-09-19T00:11:03.358Z" }, + { url = "https://files.pythonhosted.org/packages/d7/09/479f7358d9625172521a87a9271ddd2441e1dab16a09708f056e97007207/mypy-1.18.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7ab28cc197f1dd77a67e1c6f35cd1f8e8b73ed2217e4fc005f9e6a504e46e7ba", size = 13529806, upload-time = "2025-09-19T00:10:26.073Z" }, + { url = "https://files.pythonhosted.org/packages/71/cf/ac0f2c7e9d0ea3c75cd99dff7aec1c9df4a1376537cb90e4c882267ee7e9/mypy-1.18.2-cp313-cp313-win_amd64.whl", hash = "sha256:0e2785a84b34a72ba55fb5daf079a1003a34c05b22238da94fcae2bbe46f3544", size = 9833262, upload-time = "2025-09-19T00:10:40.035Z" }, + { url = "https://files.pythonhosted.org/packages/5a/0c/7d5300883da16f0063ae53996358758b2a2df2a09c72a5061fa79a1f5006/mypy-1.18.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:62f0e1e988ad41c2a110edde6c398383a889d95b36b3e60bcf155f5164c4fdce", size = 12893775, upload-time = "2025-09-19T00:10:03.814Z" }, + { url = "https://files.pythonhosted.org/packages/50/df/2cffbf25737bdb236f60c973edf62e3e7b4ee1c25b6878629e88e2cde967/mypy-1.18.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8795a039bab805ff0c1dfdb8cd3344642c2b99b8e439d057aba30850b8d3423d", size = 11936852, upload-time = "2025-09-19T00:10:51.631Z" }, + { url = "https://files.pythonhosted.org/packages/be/50/34059de13dd269227fb4a03be1faee6e2a4b04a2051c82ac0a0b5a773c9a/mypy-1.18.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6ca1e64b24a700ab5ce10133f7ccd956a04715463d30498e64ea8715236f9c9c", size = 12480242, upload-time = "2025-09-19T00:11:07.955Z" }, + { url = "https://files.pythonhosted.org/packages/5b/11/040983fad5132d85914c874a2836252bbc57832065548885b5bb5b0d4359/mypy-1.18.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d924eef3795cc89fecf6bedc6ed32b33ac13e8321344f6ddbf8ee89f706c05cb", size = 13326683, upload-time = "2025-09-19T00:09:55.572Z" }, + { url = "https://files.pythonhosted.org/packages/e9/ba/89b2901dd77414dd7a8c8729985832a5735053be15b744c18e4586e506ef/mypy-1.18.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:20c02215a080e3a2be3aa50506c67242df1c151eaba0dcbc1e4e557922a26075", size = 13514749, upload-time = "2025-09-19T00:10:44.827Z" }, + { url = "https://files.pythonhosted.org/packages/25/bc/cc98767cffd6b2928ba680f3e5bc969c4152bf7c2d83f92f5a504b92b0eb/mypy-1.18.2-cp314-cp314-win_amd64.whl", hash = "sha256:749b5f83198f1ca64345603118a6f01a4e99ad4bf9d103ddc5a3200cc4614adf", size = 9982959, upload-time = "2025-09-19T00:10:37.344Z" }, + { url = "https://files.pythonhosted.org/packages/87/e3/be76d87158ebafa0309946c4a73831974d4d6ab4f4ef40c3b53a385a66fd/mypy-1.18.2-py3-none-any.whl", hash = "sha256:22a1748707dd62b58d2ae53562ffc4d7f8bcc727e8ac7cbc69c053ddc874d47e", size = 2352367, upload-time = "2025-09-19T00:10:15.489Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "importlib-metadata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/8d/1f5a45fbcb9a7d87809d460f09dc3399e3fbd31d7f3e14888345e9d29951/opentelemetry_api-1.33.1.tar.gz", hash = "sha256:1c6055fc0a2d3f23a50c7e17e16ef75ad489345fd3df1f8b8af7c0bbf8a109e8", size = 65002, upload-time = "2025-05-16T18:52:41.146Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/44/4c45a34def3506122ae61ad684139f0bbc4e00c39555d4f7e20e0e001c8a/opentelemetry_api-1.33.1-py3-none-any.whl", hash = "sha256:4db83ebcf7ea93e64637ec6ee6fabee45c5cbe4abd9cf3da95c43828ddb50b83", size = 65771, upload-time = "2025-05-16T18:52:17.419Z" }, +] + +[[package]] +name = "opentelemetry-distro" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-sdk" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dd/0b/0012cb5947c255d6755cb91e3b9fd9bb1876b7e14d5ab67131c030fd90b2/opentelemetry_distro-0.54b1.tar.gz", hash = "sha256:61d6b97bb7a245fddbb829345bb4ad18be39eb52f770fab89a127107fca3149f", size = 2593, upload-time = "2025-05-16T19:03:19.71Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/b1/5f008a2909d59c02c7b88aa595502d438ca21c15e88edd7620c697a56ce8/opentelemetry_distro-0.54b1-py3-none-any.whl", hash = "sha256:009486513b32b703e275bb2f9ccaf5791676bbf5e2dcfdd90201ddc8f56f122b", size = 3348, upload-time = "2025-05-16T19:02:11.624Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-proto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7a/18/a1ec9dcb6713a48b4bdd10f1c1e4d5d2489d3912b80d2bcc059a9a842836/opentelemetry_exporter_otlp_proto_common-1.33.1.tar.gz", hash = "sha256:c57b3fa2d0595a21c4ed586f74f948d259d9949b58258f11edb398f246bec131", size = 20828, upload-time = "2025-05-16T18:52:43.795Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/09/52/9bcb17e2c29c1194a28e521b9d3f2ced09028934c3c52a8205884c94b2df/opentelemetry_exporter_otlp_proto_common-1.33.1-py3-none-any.whl", hash = "sha256:b81c1de1ad349785e601d02715b2d29d6818aed2c809c20219f3d1f20b038c36", size = 18839, upload-time = "2025-05-16T18:52:22.447Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-grpc" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "googleapis-common-protos" }, + { name = "grpcio" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d8/5f/75ef5a2a917bd0e6e7b83d3fb04c99236ee958f6352ba3019ea9109ae1a6/opentelemetry_exporter_otlp_proto_grpc-1.33.1.tar.gz", hash = "sha256:345696af8dc19785fac268c8063f3dc3d5e274c774b308c634f39d9c21955728", size = 22556, upload-time = "2025-05-16T18:52:44.76Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ba/ec/6047e230bb6d092c304511315b13893b1c9d9260044dd1228c9d48b6ae0e/opentelemetry_exporter_otlp_proto_grpc-1.33.1-py3-none-any.whl", hash = "sha256:7e8da32c7552b756e75b4f9e9c768a61eb47dee60b6550b37af541858d669ce1", size = 18591, upload-time = "2025-05-16T18:52:23.772Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "googleapis-common-protos" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/48/e4314ac0ed2ad043c07693d08c9c4bf5633857f5b72f2fefc64fd2b114f6/opentelemetry_exporter_otlp_proto_http-1.33.1.tar.gz", hash = "sha256:46622d964a441acb46f463ebdc26929d9dec9efb2e54ef06acdc7305e8593c38", size = 15353, upload-time = "2025-05-16T18:52:45.522Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/63/ba/5a4ad007588016fe37f8d36bf08f325fe684494cc1e88ca8fa064a4c8f57/opentelemetry_exporter_otlp_proto_http-1.33.1-py3-none-any.whl", hash = "sha256:ebd6c523b89a2ecba0549adb92537cc2bf647b4ee61afbbd5a4c6535aa3da7cf", size = 17733, upload-time = "2025-05-16T18:52:25.137Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "packaging" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/fd/5756aea3fdc5651b572d8aef7d94d22a0a36e49c8b12fcb78cb905ba8896/opentelemetry_instrumentation-0.54b1.tar.gz", hash = "sha256:7658bf2ff914b02f246ec14779b66671508125c0e4227361e56b5ebf6cef0aec", size = 28436, upload-time = "2025-05-16T19:03:22.223Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/89/0790abc5d9c4fc74bd3e03cb87afe2c820b1d1a112a723c1163ef32453ee/opentelemetry_instrumentation-0.54b1-py3-none-any.whl", hash = "sha256:a4ae45f4a90c78d7006c51524f57cd5aa1231aef031eae905ee34d5423f5b198", size = 31019, upload-time = "2025-05-16T19:02:15.611Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-aio-pika" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/79/e7/b92741e7dc1c38d512fcd0c3d6b3270cbbe3f3965f4280810c3f48688b1f/opentelemetry_instrumentation_aio_pika-0.54b1.tar.gz", hash = "sha256:a1b9f2d2735f1e9808bac263776f445c446c19580c3a24d0ecc02e289b55b21d", size = 10092, upload-time = "2025-05-16T19:03:25.329Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/46/b77e99e0e3a4f473e8a38e46d12269a5ef28ed0f7d52306a06c6b82f2aff/opentelemetry_instrumentation_aio_pika-0.54b1-py3-none-any.whl", hash = "sha256:c1d1a52296937e54a8c69878434c86bdc038d53c1eba6f133c0e63f479484990", size = 13462, upload-time = "2025-05-16T19:02:16.816Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-aiohttp-client" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/04/fe/535efdb090543cb8e23149271c3ef27e37d3862865c52e2b2b58f7b5cb8d/opentelemetry_instrumentation_aiohttp_client-0.54b1.tar.gz", hash = "sha256:c51c643a5587b9efce6c4cae0f5e2202a25fac69caa89643465f57d5d8ba3789", size = 13643, upload-time = "2025-05-16T19:03:27.156Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/de/07f25301d57bb83f29ee1eb5503871bddc132d4362ff9897c605e8c54c04/opentelemetry_instrumentation_aiohttp_client-0.54b1-py3-none-any.whl", hash = "sha256:d9b53c04865e8a4c984c1330e4f1d5570bc28543833a4718cbe4265091ee0e71", size = 11661, upload-time = "2025-05-16T19:02:17.827Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-aiopg" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-dbapi" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/48/4b/ef14e66e9b7b8bf859844c08d78bbb921c7ec41e2008bd657942a15a5797/opentelemetry_instrumentation_aiopg-0.54b1.tar.gz", hash = "sha256:d00a6845bb8f8d45e81d42bc8ba38df88bb7efdc2cd0e572968dc5359f5b8355", size = 11808, upload-time = "2025-05-16T19:03:29.548Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f2/eb/1b7d0ff786ec1734766b082ebceea729c33b5f7d986816411fb8feb74373/opentelemetry_instrumentation_aiopg-0.54b1-py3-none-any.whl", hash = "sha256:1d162793c4dee9db469d89c962f161801027abc55002eeb23c076ab5f1f334d4", size = 12455, upload-time = "2025-05-16T19:02:21.718Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-asgi" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asgiref" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/20/f7/a3377f9771947f4d3d59c96841d3909274f446c030dbe8e4af871695ddee/opentelemetry_instrumentation_asgi-0.54b1.tar.gz", hash = "sha256:ab4df9776b5f6d56a78413c2e8bbe44c90694c67c844a1297865dc1bd926ed3c", size = 24230, upload-time = "2025-05-16T19:03:30.234Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/24/7a6f0ae79cae49927f528ecee2db55a5bddd87b550e310ce03451eae7491/opentelemetry_instrumentation_asgi-0.54b1-py3-none-any.whl", hash = "sha256:84674e822b89af563b283a5283c2ebb9ed585d1b80a1c27fb3ac20b562e9f9fc", size = 16338, upload-time = "2025-05-16T19:02:22.808Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-asyncpg" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b2/66/d2e2ccbb13cf6d6f6c7c8d907021e9bd8b56585c59e28d99ebc74138c3d1/opentelemetry_instrumentation_asyncpg-0.54b1.tar.gz", hash = "sha256:58e50de68b40221c2d6e22d626e5d03d9d6b950ba59504a5fc060c95cdc7c4fb", size = 8717, upload-time = "2025-05-16T19:03:32.31Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f0/93/c17ef16b63d6e073f875bfe4624b9711269a3d208ee11cdfc5cc1b3537d8/opentelemetry_instrumentation_asyncpg-0.54b1-py3-none-any.whl", hash = "sha256:2348843f0c6f0cefb0badc974cbeae244ee89c57e1ae2a587e5f641c23e16fdc", size = 10062, upload-time = "2025-05-16T19:02:26.371Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-aws-lambda" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-propagator-aws-xray" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/77/fd/57a1360203efa8410637679b00b61603782dd84ca9c0b3619192c07e0d1f/opentelemetry_instrumentation_aws_lambda-0.54b1.tar.gz", hash = "sha256:c40f011581abf3cd28d8833fb6218bac75eec3adda7774ff2685f41b279a9fdd", size = 17904, upload-time = "2025-05-16T19:03:33.658Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/f3/c08fee6ae3f2d2b461ee7e7c2b3ac8de52281b236f3593146ba456cd0db7/opentelemetry_instrumentation_aws_lambda-0.54b1-py3-none-any.whl", hash = "sha256:51bc4301b9733fcda616d68197ee5f15108175a217f5fd8db349d53ba14cc172", size = 12484, upload-time = "2025-05-16T19:02:27.421Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-boto" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/b5/5b777b6b1f3ce586141485584a52f0fdd3d63398011b0d02feb822f46f0a/opentelemetry_instrumentation_boto-0.54b1.tar.gz", hash = "sha256:83407a5f6f69cd0bebff802da0d228eb13196a1de713b43e1348b77f80033c6a", size = 9716, upload-time = "2025-05-16T19:03:34.364Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4a/5e/8f8bfb5fa1c51aa66b6af7e4a64d9be9dc9aba6ff2d8c0f405204a5069ea/opentelemetry_instrumentation_boto-0.54b1-py3-none-any.whl", hash = "sha256:b52b1216bee095858bcd0d992360911b6e870acc4f4c9090f8ca1081d9fdede6", size = 10146, upload-time = "2025-05-16T19:02:28.417Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-boto3sqs" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/be/44/232d566fb06a640f386ce2bdd271e64ecaaae9bdcc5c68f84f2552c5e585/opentelemetry_instrumentation_boto3sqs-0.54b1.tar.gz", hash = "sha256:c8bf67bc836bb66da6a1b000e6c1b07229481c75731ea6a0ed0b59b256e035b9", size = 11715, upload-time = "2025-05-16T19:03:35.028Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/db/62ebd5d172eb3997038f24a238792b5ebe604bc70dbda1cba91c3d36a655/opentelemetry_instrumentation_boto3sqs-0.54b1-py3-none-any.whl", hash = "sha256:40ae98fe53584e5b1d61725fc8e153a1be2d6b308f65f56deb4f276a23b43cf4", size = 11672, upload-time = "2025-05-16T19:02:29.62Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-botocore" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-propagator-aws-xray" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/c9/88127b0714881e6801e4921bca445de634b0b3568e607ccc4a606f711ea7/opentelemetry_instrumentation_botocore-0.54b1.tar.gz", hash = "sha256:54f7b0b48398dfc8b8e98deec89df5b4c8c359d803a0d6c8ce4bd972d50c03dd", size = 110252, upload-time = "2025-05-16T19:03:35.805Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/0e/22e35a74e6566feacd8a80f5899242920765f134c0edbb0b943ddb369c0e/opentelemetry_instrumentation_botocore-0.54b1-py3-none-any.whl", hash = "sha256:74d3a36d5bab8447669b25f915a3db6c37ae14a5faa198500471d5b1bbd1902f", size = 35461, upload-time = "2025-05-16T19:02:30.621Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-cassandra" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/26/fb/9a405a3fed8389603bbcd63a74ea303d55992c2c7e9abdc8daeba1945fa9/opentelemetry_instrumentation_cassandra-0.54b1.tar.gz", hash = "sha256:f9a79c0139888eaedb58bb50da42709c7bc6ead9b9f5263164873e4275cefbce", size = 7581, upload-time = "2025-05-16T19:03:36.591Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/ca/e726bfd5dc40eef7961aa5a7a5e7238eb407c84bd709cb531abd09c62302/opentelemetry_instrumentation_cassandra-0.54b1-py3-none-any.whl", hash = "sha256:81b8d963a02ea43ea4a9d00c88cd0b01dda69daf914d6e4984b2e98b1e8fdeb7", size = 8899, upload-time = "2025-05-16T19:02:31.738Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-celery" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7c/71/4ac353874e0f7ca93591e1a74b7a290dec2027733bbb31bd76da3a74f97f/opentelemetry_instrumentation_celery-0.54b1.tar.gz", hash = "sha256:f2bd019afe9286214083ae2db95ed24adf9a0aa2e943177462d64ceb8380d78e", size = 14778, upload-time = "2025-05-16T19:03:37.376Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/be/90e2b7d26915639cfcdf6e200b309c9d64027ff752c56145bc149cd67d68/opentelemetry_instrumentation_celery-0.54b1-py3-none-any.whl", hash = "sha256:892ec6bf829a0d60cf3bffd1a8bb6fd8055f1194167b4e132e33321de8e05c24", size = 13809, upload-time = "2025-05-16T19:02:33.046Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-confluent-kafka" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/a8/472ddb40f8caab693de4a5c2084b1513b67f879060e5e46cfb2f96bc0872/opentelemetry_instrumentation_confluent_kafka-0.54b1.tar.gz", hash = "sha256:1e378b5c88170c7fcd23b07054a61d2af7a7ec5af1aba120446514ef27b7ad82", size = 11615, upload-time = "2025-05-16T19:03:39.409Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5e/9e/107e45d5eb41961a187c28eb4d0da02d133d371dfdd149b1f7ef96e78926/opentelemetry_instrumentation_confluent_kafka-0.54b1-py3-none-any.whl", hash = "sha256:9dc896233a973705e1ac25950ababe23322338f4cd3fff0ccd509759aeb2e802", size = 12624, upload-time = "2025-05-16T19:02:35.018Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-dbapi" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7b/b7/b74e2c7c858cde8909516cbe77cb0e841167d38795c90df524d84440e1f1/opentelemetry_instrumentation_dbapi-0.54b1.tar.gz", hash = "sha256:69421c36994114040d197f7e846c01869d663084c6c2025e85b2d6cfce2f8299", size = 14145, upload-time = "2025-05-16T19:03:40.074Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/6a/98d409ae5ca60ae4e41295a42256d81bb96bd5a7a386ca0343e27494d53d/opentelemetry_instrumentation_dbapi-0.54b1-py3-none-any.whl", hash = "sha256:21bc20cd878a78bf44bab686e9679cef1eed77e53c754c0a09f0ca49f5fd0283", size = 12450, upload-time = "2025-05-16T19:02:36.041Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-django" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-wsgi" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ac/93/8d194bda118fc4c369b9a3091c39eec384137b46f33421272359883c53d9/opentelemetry_instrumentation_django-0.54b1.tar.gz", hash = "sha256:38414f989f60e9dba82928e13f6a20a26baf5cc700f1d891f27e0703ca577802", size = 24866, upload-time = "2025-05-16T19:03:41.183Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/75/1b0ae1b8b7d6a85d5d54e8092c84b18669bd5da6f5ceb3410047674db3c0/opentelemetry_instrumentation_django-0.54b1-py3-none-any.whl", hash = "sha256:462fbd577991021f56152df21ca1fdcd7c4abdc10dd44254a44d515b8e3d61ca", size = 19541, upload-time = "2025-05-16T19:02:37.4Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-elasticsearch" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/31/8b/e7d57ab4aab2d63e2094001e0301d848ec83b86ee428e538101922cd27ed/opentelemetry_instrumentation_elasticsearch-0.54b1.tar.gz", hash = "sha256:d5b6996919679c91e5791457de24d9ff6472887a4e1426b8f2345c52f6ba6f10", size = 14379, upload-time = "2025-05-16T19:03:41.939Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/bd/4919e716190454895c895c37745bbf22d59231d864862a9bc4ac68f4c8d8/opentelemetry_instrumentation_elasticsearch-0.54b1-py3-none-any.whl", hash = "sha256:9f5c968954d72f15e133d06760294f13886d98c4da626374168094035f6dec50", size = 12607, upload-time = "2025-05-16T19:02:38.944Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-falcon" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-wsgi" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1a/7d/73df17199014ea57ae71bb128a5155ea4d81d86d0b61d4c852cec485ccb1/opentelemetry_instrumentation_falcon-0.54b1.tar.gz", hash = "sha256:06e72aac39fd4ac65555a8cb056428d7c4366bb1fafa65e60474d6e3d6c3eada", size = 17176, upload-time = "2025-05-16T19:03:42.651Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/40/65a3cecd312ac380477ff44306c737b6a3d0cb7ec1ec28e09aacdc8904ac/opentelemetry_instrumentation_falcon-0.54b1-py3-none-any.whl", hash = "sha256:6eaf3bf714a6e3398a5ddc132c3e77de851331ee00989302f88a4d4ce829e679", size = 14206, upload-time = "2025-05-16T19:02:40.082Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-fastapi" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-asgi" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/98/3b/9a262cdc1a4defef0e52afebdde3e8add658cc6f922e39e9dcee0da98349/opentelemetry_instrumentation_fastapi-0.54b1.tar.gz", hash = "sha256:1fcad19cef0db7092339b571a59e6f3045c9b58b7fd4670183f7addc459d78df", size = 19325, upload-time = "2025-05-16T19:03:45.359Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/9c/6b2b0f9d6c5dea7528ae0bf4e461dd765b0ae35f13919cd452970bb0d0b3/opentelemetry_instrumentation_fastapi-0.54b1-py3-none-any.whl", hash = "sha256:fb247781cfa75fd09d3d8713c65e4a02bd1e869b00e2c322cc516d4b5429860c", size = 12125, upload-time = "2025-05-16T19:02:41.172Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-flask" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-wsgi" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/b8/d46dcb20889713a355de418a0d31d552089bf4454e1baf48c7b6b3fb6035/opentelemetry_instrumentation_flask-0.54b1.tar.gz", hash = "sha256:683f9963f06d065fc07ceaffa106df1f6f20075318530328f69fde39dfb1192f", size = 19221, upload-time = "2025-05-16T19:03:46.063Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/85/aaaed416e9ee7d5c4ab98b3dba3d66675f44cfdcbf5d683e144a10fafad0/opentelemetry_instrumentation_flask-0.54b1-py3-none-any.whl", hash = "sha256:1f9d44b8ca9bc7d52e2aeb539bc64a88d6fc04f2f67c1ffb278148c99cc8ec6a", size = 14626, upload-time = "2025-05-16T19:02:42.202Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-grpc" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/95/7a/a2e879f5b39d77091181c944064bf99e11646a58242f1e8efa829646bcb1/opentelemetry_instrumentation_grpc-0.54b1.tar.gz", hash = "sha256:4198aab2a380b2807a50112892f9b8a50772169a3722fa99634ef70c6c017ea2", size = 30926, upload-time = "2025-05-16T19:03:46.813Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/51/22ca8af0b9f78029657957f33604813c07dde18fb035dd37a60e2a4070d8/opentelemetry_instrumentation_grpc-0.54b1-py3-none-any.whl", hash = "sha256:c01114c5c147c216f9144da065d4a84bffb2a43b3cb05763b40ec744bbf5206e", size = 27112, upload-time = "2025-05-16T19:02:43.853Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-httpx" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/64/65b2e599c5043a5dbd14c251d48dec4947e2ec8713f601df197ea9b51246/opentelemetry_instrumentation_httpx-0.54b1.tar.gz", hash = "sha256:37e1cd0190f98508d960ec1667c9f148f8c8ad9a6cab127b57c9ad92c37493c3", size = 17734, upload-time = "2025-05-16T19:03:47.762Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/63/f92e93b613b51344a979dc6674641f2c0d24b031f6a08557304398962e41/opentelemetry_instrumentation_httpx-0.54b1-py3-none-any.whl", hash = "sha256:99b8e43ebf1d945ca298d84d32298ba26d1c3431738cea9f69a26c442661745f", size = 14129, upload-time = "2025-05-16T19:02:45.418Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-jinja2" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f7/9d/48836360719cfc0aaa892440b42d2fc3cf83bb84d4f92cda0ad9af7dd598/opentelemetry_instrumentation_jinja2-0.54b1.tar.gz", hash = "sha256:21e435e2029e876e9c91277fb88e9cf235211f96973c64e494b8be7551c7b3e1", size = 8468, upload-time = "2025-05-16T19:03:48.499Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/d4/213e701c74541f860bfc89211ab54b7c9d3c89576dc461bed14d6f1d0e2f/opentelemetry_instrumentation_jinja2-0.54b1-py3-none-any.whl", hash = "sha256:bcefb00e177c3481a0f735ffe96589ee40ba6b603092c19fca7b03fcb5c72a19", size = 9428, upload-time = "2025-05-16T19:02:46.544Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-kafka-python" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b2/1c/232ffeb76dd519d82c6b0f1b28dc33f6583f3a90b35dd3360179d46e0c72/opentelemetry_instrumentation_kafka_python-0.54b1.tar.gz", hash = "sha256:8b3f18be44939a270ca55b8017c5f822b94bdc1372b59a49464b990c715d0ba4", size = 10535, upload-time = "2025-05-16T19:03:49.198Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/88/9998fac3940d818100f0b3b1b67992481df233516d4d0a14fce43d6dcbc8/opentelemetry_instrumentation_kafka_python-0.54b1-py3-none-any.whl", hash = "sha256:ab53ed8af3281a337feb5c1fa01059d5af99ec7aa84f2b360627a20fed385ab7", size = 11502, upload-time = "2025-05-16T19:02:48.012Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-logging" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d9/5b/88ed39f22e8c6eb4f6192ab9a62adaa115579fcbcadb3f0241ee645eea56/opentelemetry_instrumentation_logging-0.54b1.tar.gz", hash = "sha256:893a3cbfda893b64ff71b81991894e2fd6a9267ba85bb6c251f51c0419fbe8fa", size = 9976, upload-time = "2025-05-16T19:03:49.976Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/96/0c/b441fb30d860f25040eaed61e89d68f4d9ee31873159ed18cbc1b92eba56/opentelemetry_instrumentation_logging-0.54b1-py3-none-any.whl", hash = "sha256:01a4cec54348f13941707d857b850b0febf9d49f45d0fcf0673866e079d7357b", size = 12579, upload-time = "2025-05-16T19:02:49.039Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-mysql" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-dbapi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1e/6e/8b203e0f0afb994a2b8734d37d4ffe8a70cd45202bf021c3a531d7b1cb9d/opentelemetry_instrumentation_mysql-0.54b1.tar.gz", hash = "sha256:de3a9367886523f30bd04b51edcf8d0777de7eac4a2467f52478231f51405b49", size = 9390, upload-time = "2025-05-16T19:03:50.66Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/18/aeae1a3cc4dd17f4338d105592a8e6cba572ef9d94089649d4b8a0d7b4dc/opentelemetry_instrumentation_mysql-0.54b1-py3-none-any.whl", hash = "sha256:07cd8c3003b439e0626e2b77f2b7f28f73c75879e28d9260f8d9a9600fb85fc2", size = 10100, upload-time = "2025-05-16T19:02:49.952Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-mysqlclient" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-dbapi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/c6/27ac94688611cb51d20d83855b1dbd8610009f8ccf73e0fdca40648b4db4/opentelemetry_instrumentation_mysqlclient-0.54b1.tar.gz", hash = "sha256:c14abdc5e19015ab7d6aa23ce96122c4f966fac629489eaa614e28da84e94d88", size = 9330, upload-time = "2025-05-16T19:03:51.382Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d6/4d/9d8a5e571c370331c771467a4c51bb2da5ced1c2601bd2990c2a2bdc0caa/opentelemetry_instrumentation_mysqlclient-0.54b1-py3-none-any.whl", hash = "sha256:462972e140586e00a5c0f0025585b2decfd0c4d7189cd42e2f786ca8e9fdab27", size = 10125, upload-time = "2025-05-16T19:02:51.422Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-pika" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "packaging" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5d/8b/e7510900b383a2aaaec728034d8353d9112ce6fb75df1b53094185deae10/opentelemetry_instrumentation_pika-0.54b1.tar.gz", hash = "sha256:b8e20202233fee5aca35bd58db431bdcfeeddd85f83067800ab494c234479f51", size = 12993, upload-time = "2025-05-16T19:03:52.055Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/68/c1dd5a8fcf3e98644ff3d1dfc3db9a7ac65a9ae964011c139343756b1e24/opentelemetry_instrumentation_pika-0.54b1-py3-none-any.whl", hash = "sha256:3098ba31cdf3b390deb18c9eb824fccff9b8a2d51878fdcc7b69f1e6218963dc", size = 13661, upload-time = "2025-05-16T19:02:52.407Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-psycopg2" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-dbapi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/09/dd6e55a852c87ee6402d745486d7d2e32577e728781bc1c89812d2645f48/opentelemetry_instrumentation_psycopg2-0.54b1.tar.gz", hash = "sha256:6e899baf7b6687320491b25d5ceadde5c614a95fb379da8e2a513d430f28102f", size = 10663, upload-time = "2025-05-16T19:03:53.817Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/d0/4915e34533c26f319ba9b5346c0d1aa48d099bb29719674dbace3e4d643b/opentelemetry_instrumentation_psycopg2-0.54b1-py3-none-any.whl", hash = "sha256:2f493b180c2028bcab2ecaff8bd25560dd92a538bba8b9510411f182dd2a075e", size = 10709, upload-time = "2025-05-16T19:02:54.388Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-pymemcache" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4c/58/66b4eb77a1279816b108d41b852f5ae02c69c8442522fb37539c119ff056/opentelemetry_instrumentation_pymemcache-0.54b1.tar.gz", hash = "sha256:03a272e3a416a633f83ee5b494a346d37fbe8249271bbf5e02686c354ae810a9", size = 10606, upload-time = "2025-05-16T19:03:54.485Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8b/91/678a2215292ce4cdfb28e282bef97e63bb497b42e2d677a24db7b979474d/opentelemetry_instrumentation_pymemcache-0.54b1-py3-none-any.whl", hash = "sha256:d752ccc03214cb079733d8d811ba9e624a7b6c76454ce96e30edccfed1f75f91", size = 9685, upload-time = "2025-05-16T19:02:55.389Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-pymongo" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/4c/e214f98f6d0885cd1a4e09740fc68d59dfb5e108c310c0003415eb593a47/opentelemetry_instrumentation_pymongo-0.54b1.tar.gz", hash = "sha256:75cbcfe499009d535e508b869825113fc0888d4d60c544d4337ef65eb4d299f0", size = 9614, upload-time = "2025-05-16T19:03:55.135Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/f4/b4504705ce678ac6118e4c5226b566d940aa4f7baf8e6c585abad36d1197/opentelemetry_instrumentation_pymongo-0.54b1-py3-none-any.whl", hash = "sha256:2331f4f0cbd5a5053edebb956b4dd288d60eb8971d9b6d5927f0753d0651161e", size = 11314, upload-time = "2025-05-16T19:02:56.958Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-pymysql" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-dbapi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2e/f5/f6f3f593c6f95994470eea001960c4891ead94d6583698862d2c1c2eb046/opentelemetry_instrumentation_pymysql-0.54b1.tar.gz", hash = "sha256:c22501ee104c34b70e37e5cdc59d74ffb833d473ac3ecfe899b707bf194e914b", size = 9208, upload-time = "2025-05-16T19:03:57.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/2f/e7a0e6555757cb14c54a4e923f0ba0a0ed9833cfae0fe8334e698d6a2767/opentelemetry_instrumentation_pymysql-0.54b1-py3-none-any.whl", hash = "sha256:54cb13c6ab559cf14e6de94f778e286d8bc89a2262cff59ee3566a41c6ab5dd1", size = 9984, upload-time = "2025-05-16T19:02:58.926Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-pyramid" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-wsgi" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cb/be/488a87bf48049c260da15ecc5ebec0e99287aaabf0a9e94d759066b84872/opentelemetry_instrumentation_pyramid-0.54b1.tar.gz", hash = "sha256:c68d46de5cbf1e804b2b730f7f60bf87f0bc9735e3d21b8359d35705ff8457b3", size = 15046, upload-time = "2025-05-16T19:03:58.137Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7d/eb/456f9a79c0e3ac26036a0d262235b9cde3a085b88c8ec17e1f062b2d2327/opentelemetry_instrumentation_pyramid-0.54b1-py3-none-any.whl", hash = "sha256:11b7f210ff45b754db30f7522bb2e27be902ddea38a59cc16c08e16dd8061f42", size = 13999, upload-time = "2025-05-16T19:02:59.938Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-redis" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c2/01/fad85231c3518bf6349a7ef483ef06a27100da8d1b7531dec9d8d09b94d8/opentelemetry_instrumentation_redis-0.54b1.tar.gz", hash = "sha256:89024c4752147d528e8c51fff0034193e628da339848cda78afe0cf4eb0c7ccb", size = 13908, upload-time = "2025-05-16T19:03:58.876Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/c1/78f18965f16e34a8fecc5b10c52aca1243e75a512a0a0320556a69583f36/opentelemetry_instrumentation_redis-0.54b1-py3-none-any.whl", hash = "sha256:e98992bd38e93081158f9947a1a8eea51d96e8bfe5054894a5b8d1d82117c0c8", size = 14924, upload-time = "2025-05-16T19:03:01.07Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-remoulade" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e4/f5/d360444cd559f67a6d6f2467ca3f036db1894d3ba8c4a82a2c443eae674f/opentelemetry_instrumentation_remoulade-0.54b1.tar.gz", hash = "sha256:0c2f5571985375c55532402238dafb09d0e6b4b8c2a3c18925ef461bb3896c96", size = 8131, upload-time = "2025-05-16T19:03:59.804Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/35/0a17505193fd93e16d26d18a0605a9dedb5bdde9c4aed56f391160ed657b/opentelemetry_instrumentation_remoulade-0.54b1-py3-none-any.whl", hash = "sha256:5d50d298a1d456e1008166d0a20cb7ccada93b502b99cf74f344fb6d1df947c9", size = 10130, upload-time = "2025-05-16T19:03:02.152Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-requests" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e7/45/116da84930d3dc2f5cdd876283ca96e9b96547bccee7eaa0bd01ce6bf046/opentelemetry_instrumentation_requests-0.54b1.tar.gz", hash = "sha256:3eca5d697c5564af04c6a1dd23b6a3ffbaf11e64887c6051655cee03998f4654", size = 15148, upload-time = "2025-05-16T19:04:00.488Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2b/b1/6e33d2c3d3cc9e3ae20a9a77625ec81a509a0e5d7fa87e09e7f879468990/opentelemetry_instrumentation_requests-0.54b1-py3-none-any.whl", hash = "sha256:a0c4cd5d946224f336d6bd73cdabdecc6f80d5c39208f84eb96eb15f16cd41a0", size = 12968, upload-time = "2025-05-16T19:03:03.131Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-sqlalchemy" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "packaging" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ac/33/78a25ae4233d42058bb0b363ba4fea7d7210e53c24e5e31f16d5cf6cf957/opentelemetry_instrumentation_sqlalchemy-0.54b1.tar.gz", hash = "sha256:97839acf1c9b96ded857fca57a09b86a56cf8d9eb6d706b7ceaee9352a460e03", size = 14620, upload-time = "2025-05-16T19:04:01.215Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/2b/1c954885815614ef5c1e8c7bbf57a5275e64cd6fb5946b65e17162a34037/opentelemetry_instrumentation_sqlalchemy-0.54b1-py3-none-any.whl", hash = "sha256:d2ca5edb4c7ecef120d51aad6793b7da1cc80207ccfd31c437ee18f098e7c4c4", size = 14169, upload-time = "2025-05-16T19:03:04.119Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-sqlite3" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-dbapi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/07/cae18dbc2ba1997a382e63f1ee7527dff9557675c2802709ca8a011341c4/opentelemetry_instrumentation_sqlite3-0.54b1.tar.gz", hash = "sha256:e32ec80a2f50df035bf16de142527157b98a60a3863ddcb6aa20beae8a64a24d", size = 7929, upload-time = "2025-05-16T19:04:02.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/8a/7a6b6b1cabc65e237ebbfd10429997579eaa4281c169429c28eb5a60e177/opentelemetry_instrumentation_sqlite3-0.54b1-py3-none-any.whl", hash = "sha256:756c8f51a3b738f4cd52556b2146a6e2e6a33516b494aa4dbb7478702af4a475", size = 9342, upload-time = "2025-05-16T19:03:05.641Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-starlette" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-instrumentation-asgi" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f9/43/c8095007bcc800a5465ebe50b097ab0da8b1d973f9afdcea04d98d2cb81d/opentelemetry_instrumentation_starlette-0.54b1.tar.gz", hash = "sha256:04f5902185166ad0a96bbc5cc184983bdf535ac92b1edc7a6093e9d14efa00d1", size = 14492, upload-time = "2025-05-16T19:04:03.012Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1d/9215d1696a428bbc0c46b8fc7c0189693ba5cdd9032f1dbeff04e9526828/opentelemetry_instrumentation_starlette-0.54b1-py3-none-any.whl", hash = "sha256:533e730308b5e6e99ab2a219c891f8e08ef5e67db76a148cc2f6c4fd5b6bcc0e", size = 11740, upload-time = "2025-05-16T19:03:07.079Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-system-metrics" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "psutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/10/cc/0db64253beac5a58dca621114f1be8c95af3ec8ac31785fb28b6ed82021e/opentelemetry_instrumentation_system_metrics-0.54b1.tar.gz", hash = "sha256:2846ba1019e1672fb605eff3d3af198fa1b8f1540ece70da82a2d20d9b95779b", size = 15007, upload-time = "2025-05-16T19:04:03.758Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f5/fd/e9bd23fd734bbdc028e7ebe3d25855381b696ceca214f80ad7fe74e9079c/opentelemetry_instrumentation_system_metrics-0.54b1-py3-none-any.whl", hash = "sha256:1b6f23cc8cf18b525bdb285c3664b521ce81b1e82c4f3db6a82210b8c37af1e4", size = 13093, upload-time = "2025-05-16T19:03:08.516Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-threading" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a0/bd/561245292e7cc78ac7a0a75537873aea87440cb9493d41371421b3308c2b/opentelemetry_instrumentation_threading-0.54b1.tar.gz", hash = "sha256:3a081085b59675baf7bd93126a681903e6304a5f283df5eaecdd44bcb66df578", size = 8774, upload-time = "2025-05-16T19:04:04.482Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/10/d87ec07d69546adaad525ba5d40d27324a45cba29097d9854a53d9af5047/opentelemetry_instrumentation_threading-0.54b1-py3-none-any.whl", hash = "sha256:bc229e6cd3f2b29fafe0a8dd3141f452e16fcb4906bca4fbf52609f99fb1eb42", size = 9314, upload-time = "2025-05-16T19:03:09.527Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-tornado" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/61/61/9da044c2ae3cea9a4f0e4cf28bbc1a5aaf7052c2b00ad9f305a107da9110/opentelemetry_instrumentation_tornado-0.54b1.tar.gz", hash = "sha256:73a5ba0f915688907dd4640653d3970167715c42a5ef4a948bbcf93ad9682b8d", size = 17089, upload-time = "2025-05-16T19:04:05.666Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/70/858aabf04ef24f409995c032c06c9a96e7c8bb9a257c9981b7fb380b7458/opentelemetry_instrumentation_tornado-0.54b1-py3-none-any.whl", hash = "sha256:3f4773cb3adfd6fdd592f182a72be85ca6cf01500a9973ac17947ce81d9872ee", size = 15327, upload-time = "2025-05-16T19:03:10.527Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-tortoiseorm" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/ec/c1c2916e9448ea2c5fde2700bf6577d42db5a2ed0fda856e388d34e42872/opentelemetry_instrumentation_tortoiseorm-0.54b1.tar.gz", hash = "sha256:f9ffe00bcdfa895dfa1a512f4fde186ebd816a4636afd26a7716f258b4c7e3f9", size = 8263, upload-time = "2025-05-16T19:04:06.372Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/e0/81eb1ec3cbe436030c32ada365f6fcf9e034c882d8c3060dfe35ffdfabc0/opentelemetry_instrumentation_tortoiseorm-0.54b1-py3-none-any.whl", hash = "sha256:0335efcd4f5e240efecc36f909939dbc6fb8c9b0733dc3f0615a39c3f6544c7e", size = 10158, upload-time = "2025-05-16T19:03:11.572Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-urllib" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/52/47ecbce59d47e4543286ab88753efe1903f40a80c05397407375b4e600c2/opentelemetry_instrumentation_urllib-0.54b1.tar.gz", hash = "sha256:99943400b6814ebf072735e0fb42dc5c74705f30b64ebed3778f0e7c6e16d63e", size = 13788, upload-time = "2025-05-16T19:04:07.028Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/2a/d8c9876d80d89f728c89439a880eaccedab3ffe1cc83b2c49abf17b81038/opentelemetry_instrumentation_urllib-0.54b1-py3-none-any.whl", hash = "sha256:94744470733f61f3dd282be7868e93f5bc277f07a0aeda7c836c913cbcf4f416", size = 12625, upload-time = "2025-05-16T19:03:12.701Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-urllib3" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/6f/76a46806cd21002cac1bfd087f5e4674b195ab31ab44c773ca534b6bb546/opentelemetry_instrumentation_urllib3-0.54b1.tar.gz", hash = "sha256:0d30ba3b230e4100cfadaad29174bf7bceac70e812e4f5204e681e4b55a74cd9", size = 15697, upload-time = "2025-05-16T19:04:07.709Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/7a/d75bec41edb6deaf1d2859bab66a84c8ba03e822e7eafdb245da205e53f6/opentelemetry_instrumentation_urllib3-0.54b1-py3-none-any.whl", hash = "sha256:e87958c297ddd36d30e1c9069f34a9690e845e4ccc2662dd80e99ed976d4c03e", size = 13123, upload-time = "2025-05-16T19:03:14.053Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-wsgi" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a9/0f/442eba02bd277fae2f5eb3ac5f8dd5f8cc52ddbe080506748871b91a63ab/opentelemetry_instrumentation_wsgi-0.54b1.tar.gz", hash = "sha256:261ad737e0058812aaae6bb7d6e0fa7344de62464c5df30c82bea180e735b903", size = 18244, upload-time = "2025-05-16T19:04:08.448Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/aa/2f/075156d123e589d6728cc4c1a43d0335fa16e8f4a9f723a4af9267d91169/opentelemetry_instrumentation_wsgi-0.54b1-py3-none-any.whl", hash = "sha256:6d99dca32ce232251cd321bf86e8c9d0a60c5f088bcbe5ad55d12a2006fe056e", size = 14378, upload-time = "2025-05-16T19:03:15.074Z" }, +] + +[[package]] +name = "opentelemetry-processor-baggage" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-sdk" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9b/47/6ebc196ca33a79e6e8839d33ebf1b9a7d88646f48b12c5687e5a90300879/opentelemetry_processor_baggage-0.54b1.tar.gz", hash = "sha256:d3ec2a99fb8b88ca1153cf9b1b8eae76bd2bb518fb900f758a8d24e439276055", size = 7579, upload-time = "2025-05-16T19:04:09.148Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b0/9f/db3a2e7162dc73f012b440c5600acaab301170cffe8d8ccce5e069bc4176/opentelemetry_processor_baggage-0.54b1-py3-none-any.whl", hash = "sha256:1502475016c90b68642c9377803fd77b7f295d0b33e0d3449ba113b405de2b49", size = 8877, upload-time = "2025-05-16T19:03:16.127Z" }, +] + +[[package]] +name = "opentelemetry-propagator-aws-xray" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/32/13/310a7f3c789eb9bb51f8ee9b88fb4b9f4f1e7191c8c96c7ea6f15eaa99b5/opentelemetry-propagator-aws-xray-1.0.1.tar.gz", hash = "sha256:6e8be667bbcf17c3d81d70b2a7cdec0b11257ff64d3829ffe75b810ba1b49f86", size = 8932, upload-time = "2021-10-18T22:07:40.108Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/24/2b1694b9452ac7ab3567dcb80902f7c5c8a39962751d5a4c54a357caa49e/opentelemetry_propagator_aws_xray-1.0.1-py3-none-any.whl", hash = "sha256:49267a1d72b3f04880ac75e24f9ef38fe323e2f3156c4531e0e00c71c0829c0f", size = 10812, upload-time = "2021-10-18T22:07:38.08Z" }, +] + +[[package]] +name = "opentelemetry-propagator-b3" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "opentelemetry-api" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1c/b4/4fe00e8c63175e35c310ac4e5091b3c22a468a6098e8a5eacd8b991d6989/opentelemetry_propagator_b3-1.33.1.tar.gz", hash = "sha256:46bbe76d95ac7e1f50b263230aa1ce86445120f10c7008d66cb08266468561a3", size = 9618, upload-time = "2025-05-16T18:52:50.973Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/67/4a/16676216b5b8db95a6bdeb529bf17603e14c70ac15fcadca8de2bd135c65/opentelemetry_propagator_b3-1.33.1-py3-none-any.whl", hash = "sha256:5c65708fbecb317ab4f1880e81f7bb0bf48caa2e1d52fe31f89d1cb86172a69c", size = 8936, upload-time = "2025-05-16T18:52:34.125Z" }, +] + +[[package]] +name = "opentelemetry-propagator-jaeger" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7c/28/2be617ef9bf804f65864d17eef13af582992d529c61d58a8a17d711b918a/opentelemetry_propagator_jaeger-1.33.1.tar.gz", hash = "sha256:b4cd3f123a720db872401e2179f7384c70922a6b9bab2873f003419be82bb5e3", size = 8676, upload-time = "2025-05-16T18:52:51.559Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/66/24/a20343cfa49b38192ca6e314294b50a76d427c7dcbfd1a3ddb19706fed71/opentelemetry_propagator_jaeger-1.33.1-py3-none-any.whl", hash = "sha256:d5cfd139b245b32b45edda478b7be1fc52ecc93a199aa6ed7fd074086d81d083", size = 8778, upload-time = "2025-05-16T18:52:34.976Z" }, +] + +[[package]] +name = "opentelemetry-propagator-ot-trace" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-sdk" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/01/a3/b1bc6a7dc4aa7467b7d4537452a4fb089cb82246138fed6a3272e9ec2de9/opentelemetry_propagator_ot_trace-0.54b1.tar.gz", hash = "sha256:ce6bbebe9a3e57d8abada605b3ef296d363c764bb9a075677ea6f7aed7ddf8e6", size = 5026, upload-time = "2025-05-16T19:04:10.126Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/62/cab99d81b9de2f74e80cf5deac45c31ec110d65a6d9b043152cffe2e3edd/opentelemetry_propagator_ot_trace-0.54b1-py3-none-any.whl", hash = "sha256:3c7885bdee37b28562e17cd8cb72747102fdccd9d4e557f5b4afb109092db829", size = 4769, upload-time = "2025-05-16T19:03:17.047Z" }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f6/dc/791f3d60a1ad8235930de23eea735ae1084be1c6f96fdadf38710662a7e5/opentelemetry_proto-1.33.1.tar.gz", hash = "sha256:9627b0a5c90753bf3920c398908307063e4458b287bb890e5c1d6fa11ad50b68", size = 34363, upload-time = "2025-05-16T18:52:52.141Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/29/48609f4c875c2b6c80930073c82dd1cafd36b6782244c01394007b528960/opentelemetry_proto-1.33.1-py3-none-any.whl", hash = "sha256:243d285d9f29663fc7ea91a7171fcc1ccbbfff43b48df0774fd64a37d98eda70", size = 55854, upload-time = "2025-05-16T18:52:36.269Z" }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/67/12/909b98a7d9b110cce4b28d49b2e311797cffdce180371f35eba13a72dd00/opentelemetry_sdk-1.33.1.tar.gz", hash = "sha256:85b9fcf7c3d23506fbc9692fd210b8b025a1920535feec50bd54ce203d57a531", size = 161885, upload-time = "2025-05-16T18:52:52.832Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/8e/ae2d0742041e0bd7fe0d2dcc5e7cce51dcf7d3961a26072d5b43cc8fa2a7/opentelemetry_sdk-1.33.1-py3-none-any.whl", hash = "sha256:19ea73d9a01be29cacaa5d6c8ce0adc0b7f7b4d58cc52f923e4413609f670112", size = 118950, upload-time = "2025-05-16T18:52:37.297Z" }, +] + +[[package]] +name = "opentelemetry-sdk-extension-aws" +version = "2.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-sdk" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/86/52a95a0128b5aeb9db76e3ee6f9aeb6f2417ad24da28747318cbdf11c43d/opentelemetry_sdk_extension_aws-2.0.2.tar.gz", hash = "sha256:9faa9bdf480d1c5c53151dabee75735c94dbde09e4762c68ff5c7bd4aa3408f3", size = 16014, upload-time = "2024-08-05T17:45:06.44Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2d/6c/c85409f89ebe33d0998391f6e68ae0f2353a8e526450aad8b177ed5a26d3/opentelemetry_sdk_extension_aws-2.0.2-py3-none-any.whl", hash = "sha256:4c6e4b9fec01a4a9cfeac5272ce5aae6bc80e080a6bae1e52098746f53a7b32d", size = 18652, upload-time = "2024-08-05T17:45:05.27Z" }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "opentelemetry-api" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/2c/d7990fc1ffc82889d466e7cd680788ace44a26789809924813b164344393/opentelemetry_semantic_conventions-0.54b1.tar.gz", hash = "sha256:d1cecedae15d19bdaafca1e56b29a66aa286f50b5d08f036a145c7f3e9ef9cee", size = 118642, upload-time = "2025-05-16T18:52:53.962Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/80/08b1698c52ff76d96ba440bf15edc2f4bc0a279868778928e947c1004bdd/opentelemetry_semantic_conventions-0.54b1-py3-none-any.whl", hash = "sha256:29dab644a7e435b58d3a3918b58c333c92686236b30f7891d5e51f02933ca60d", size = 194938, upload-time = "2025-05-16T18:52:38.796Z" }, +] + +[[package]] +name = "opentelemetry-util-http" +version = "0.54b1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a8/9f/1d8a1d1f34b9f62f2b940b388bf07b8167a8067e70870055bd05db354e5c/opentelemetry_util_http-0.54b1.tar.gz", hash = "sha256:f0b66868c19fbaf9c9d4e11f4a7599fa15d5ea50b884967a26ccd9d72c7c9d15", size = 8044, upload-time = "2025-05-16T19:04:10.79Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/ef/c5aa08abca6894792beed4c0405e85205b35b8e73d653571c9ff13a8e34e/opentelemetry_util_http-0.54b1-py3-none-any.whl", hash = "sha256:b1c91883f980344a1c3c486cffd47ae5c9c1dd7323f9cbe9fdb7cadb401c87c9", size = 7301, upload-time = "2025-05-16T19:03:18.18Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +] + +[[package]] +name = "pillow" +version = "11.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/93/0952f2ed8db3a5a4c7a11f91965d6184ebc8cd7cbb7941a260d5f018cd2d/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd", size = 2128328, upload-time = "2025-07-01T09:14:35.276Z" }, + { url = "https://files.pythonhosted.org/packages/4b/e8/100c3d114b1a0bf4042f27e0f87d2f25e857e838034e98ca98fe7b8c0a9c/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8", size = 2170652, upload-time = "2025-07-01T09:14:37.203Z" }, + { url = "https://files.pythonhosted.org/packages/aa/86/3f758a28a6e381758545f7cdb4942e1cb79abd271bea932998fc0db93cb6/pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f", size = 2227443, upload-time = "2025-07-01T09:14:39.344Z" }, + { url = "https://files.pythonhosted.org/packages/01/f4/91d5b3ffa718df2f53b0dc109877993e511f4fd055d7e9508682e8aba092/pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c", size = 5278474, upload-time = "2025-07-01T09:14:41.843Z" }, + { url = "https://files.pythonhosted.org/packages/f9/0e/37d7d3eca6c879fbd9dba21268427dffda1ab00d4eb05b32923d4fbe3b12/pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd", size = 4686038, upload-time = "2025-07-01T09:14:44.008Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b0/3426e5c7f6565e752d81221af9d3676fdbb4f352317ceafd42899aaf5d8a/pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e", size = 5864407, upload-time = "2025-07-03T13:10:15.628Z" }, + { url = "https://files.pythonhosted.org/packages/fc/c1/c6c423134229f2a221ee53f838d4be9d82bab86f7e2f8e75e47b6bf6cd77/pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1", size = 7639094, upload-time = "2025-07-03T13:10:21.857Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c9/09e6746630fe6372c67c648ff9deae52a2bc20897d51fa293571977ceb5d/pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805", size = 5973503, upload-time = "2025-07-01T09:14:45.698Z" }, + { url = "https://files.pythonhosted.org/packages/d5/1c/a2a29649c0b1983d3ef57ee87a66487fdeb45132df66ab30dd37f7dbe162/pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8", size = 6642574, upload-time = "2025-07-01T09:14:47.415Z" }, + { url = "https://files.pythonhosted.org/packages/36/de/d5cc31cc4b055b6c6fd990e3e7f0f8aaf36229a2698501bcb0cdf67c7146/pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2", size = 6084060, upload-time = "2025-07-01T09:14:49.636Z" }, + { url = "https://files.pythonhosted.org/packages/d5/ea/502d938cbaeec836ac28a9b730193716f0114c41325db428e6b280513f09/pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b", size = 6721407, upload-time = "2025-07-01T09:14:51.962Z" }, + { url = "https://files.pythonhosted.org/packages/45/9c/9c5e2a73f125f6cbc59cc7087c8f2d649a7ae453f83bd0362ff7c9e2aee2/pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3", size = 6273841, upload-time = "2025-07-01T09:14:54.142Z" }, + { url = "https://files.pythonhosted.org/packages/23/85/397c73524e0cd212067e0c969aa245b01d50183439550d24d9f55781b776/pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51", size = 6978450, upload-time = "2025-07-01T09:14:56.436Z" }, + { url = "https://files.pythonhosted.org/packages/17/d2/622f4547f69cd173955194b78e4d19ca4935a1b0f03a302d655c9f6aae65/pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580", size = 2423055, upload-time = "2025-07-01T09:14:58.072Z" }, + { url = "https://files.pythonhosted.org/packages/dd/80/a8a2ac21dda2e82480852978416cfacd439a4b490a501a288ecf4fe2532d/pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e", size = 5281110, upload-time = "2025-07-01T09:14:59.79Z" }, + { url = "https://files.pythonhosted.org/packages/44/d6/b79754ca790f315918732e18f82a8146d33bcd7f4494380457ea89eb883d/pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d", size = 4689547, upload-time = "2025-07-01T09:15:01.648Z" }, + { url = "https://files.pythonhosted.org/packages/49/20/716b8717d331150cb00f7fdd78169c01e8e0c219732a78b0e59b6bdb2fd6/pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced", size = 5901554, upload-time = "2025-07-03T13:10:27.018Z" }, + { url = "https://files.pythonhosted.org/packages/74/cf/a9f3a2514a65bb071075063a96f0a5cf949c2f2fce683c15ccc83b1c1cab/pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c", size = 7669132, upload-time = "2025-07-03T13:10:33.01Z" }, + { url = "https://files.pythonhosted.org/packages/98/3c/da78805cbdbee9cb43efe8261dd7cc0b4b93f2ac79b676c03159e9db2187/pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8", size = 6005001, upload-time = "2025-07-01T09:15:03.365Z" }, + { url = "https://files.pythonhosted.org/packages/6c/fa/ce044b91faecf30e635321351bba32bab5a7e034c60187fe9698191aef4f/pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59", size = 6668814, upload-time = "2025-07-01T09:15:05.655Z" }, + { url = "https://files.pythonhosted.org/packages/7b/51/90f9291406d09bf93686434f9183aba27b831c10c87746ff49f127ee80cb/pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe", size = 6113124, upload-time = "2025-07-01T09:15:07.358Z" }, + { url = "https://files.pythonhosted.org/packages/cd/5a/6fec59b1dfb619234f7636d4157d11fb4e196caeee220232a8d2ec48488d/pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c", size = 6747186, upload-time = "2025-07-01T09:15:09.317Z" }, + { url = "https://files.pythonhosted.org/packages/49/6b/00187a044f98255225f172de653941e61da37104a9ea60e4f6887717e2b5/pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788", size = 6277546, upload-time = "2025-07-01T09:15:11.311Z" }, + { url = "https://files.pythonhosted.org/packages/e8/5c/6caaba7e261c0d75bab23be79f1d06b5ad2a2ae49f028ccec801b0e853d6/pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31", size = 6985102, upload-time = "2025-07-01T09:15:13.164Z" }, + { url = "https://files.pythonhosted.org/packages/f3/7e/b623008460c09a0cb38263c93b828c666493caee2eb34ff67f778b87e58c/pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e", size = 2424803, upload-time = "2025-07-01T09:15:15.695Z" }, + { url = "https://files.pythonhosted.org/packages/73/f4/04905af42837292ed86cb1b1dabe03dce1edc008ef14c473c5c7e1443c5d/pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12", size = 5278520, upload-time = "2025-07-01T09:15:17.429Z" }, + { url = "https://files.pythonhosted.org/packages/41/b0/33d79e377a336247df6348a54e6d2a2b85d644ca202555e3faa0cf811ecc/pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a", size = 4686116, upload-time = "2025-07-01T09:15:19.423Z" }, + { url = "https://files.pythonhosted.org/packages/49/2d/ed8bc0ab219ae8768f529597d9509d184fe8a6c4741a6864fea334d25f3f/pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632", size = 5864597, upload-time = "2025-07-03T13:10:38.404Z" }, + { url = "https://files.pythonhosted.org/packages/b5/3d/b932bb4225c80b58dfadaca9d42d08d0b7064d2d1791b6a237f87f661834/pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673", size = 7638246, upload-time = "2025-07-03T13:10:44.987Z" }, + { url = "https://files.pythonhosted.org/packages/09/b5/0487044b7c096f1b48f0d7ad416472c02e0e4bf6919541b111efd3cae690/pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027", size = 5973336, upload-time = "2025-07-01T09:15:21.237Z" }, + { url = "https://files.pythonhosted.org/packages/a8/2d/524f9318f6cbfcc79fbc004801ea6b607ec3f843977652fdee4857a7568b/pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77", size = 6642699, upload-time = "2025-07-01T09:15:23.186Z" }, + { url = "https://files.pythonhosted.org/packages/6f/d2/a9a4f280c6aefedce1e8f615baaa5474e0701d86dd6f1dede66726462bbd/pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874", size = 6083789, upload-time = "2025-07-01T09:15:25.1Z" }, + { url = "https://files.pythonhosted.org/packages/fe/54/86b0cd9dbb683a9d5e960b66c7379e821a19be4ac5810e2e5a715c09a0c0/pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a", size = 6720386, upload-time = "2025-07-01T09:15:27.378Z" }, + { url = "https://files.pythonhosted.org/packages/e7/95/88efcaf384c3588e24259c4203b909cbe3e3c2d887af9e938c2022c9dd48/pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214", size = 6370911, upload-time = "2025-07-01T09:15:29.294Z" }, + { url = "https://files.pythonhosted.org/packages/2e/cc/934e5820850ec5eb107e7b1a72dd278140731c669f396110ebc326f2a503/pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635", size = 7117383, upload-time = "2025-07-01T09:15:31.128Z" }, + { url = "https://files.pythonhosted.org/packages/d6/e9/9c0a616a71da2a5d163aa37405e8aced9a906d574b4a214bede134e731bc/pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6", size = 2511385, upload-time = "2025-07-01T09:15:33.328Z" }, + { url = "https://files.pythonhosted.org/packages/1a/33/c88376898aff369658b225262cd4f2659b13e8178e7534df9e6e1fa289f6/pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae", size = 5281129, upload-time = "2025-07-01T09:15:35.194Z" }, + { url = "https://files.pythonhosted.org/packages/1f/70/d376247fb36f1844b42910911c83a02d5544ebd2a8bad9efcc0f707ea774/pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653", size = 4689580, upload-time = "2025-07-01T09:15:37.114Z" }, + { url = "https://files.pythonhosted.org/packages/eb/1c/537e930496149fbac69efd2fc4329035bbe2e5475b4165439e3be9cb183b/pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6", size = 5902860, upload-time = "2025-07-03T13:10:50.248Z" }, + { url = "https://files.pythonhosted.org/packages/bd/57/80f53264954dcefeebcf9dae6e3eb1daea1b488f0be8b8fef12f79a3eb10/pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36", size = 7670694, upload-time = "2025-07-03T13:10:56.432Z" }, + { url = "https://files.pythonhosted.org/packages/70/ff/4727d3b71a8578b4587d9c276e90efad2d6fe0335fd76742a6da08132e8c/pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b", size = 6005888, upload-time = "2025-07-01T09:15:39.436Z" }, + { url = "https://files.pythonhosted.org/packages/05/ae/716592277934f85d3be51d7256f3636672d7b1abfafdc42cf3f8cbd4b4c8/pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477", size = 6670330, upload-time = "2025-07-01T09:15:41.269Z" }, + { url = "https://files.pythonhosted.org/packages/e7/bb/7fe6cddcc8827b01b1a9766f5fdeb7418680744f9082035bdbabecf1d57f/pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50", size = 6114089, upload-time = "2025-07-01T09:15:43.13Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f5/06bfaa444c8e80f1a8e4bff98da9c83b37b5be3b1deaa43d27a0db37ef84/pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b", size = 6748206, upload-time = "2025-07-01T09:15:44.937Z" }, + { url = "https://files.pythonhosted.org/packages/f0/77/bc6f92a3e8e6e46c0ca78abfffec0037845800ea38c73483760362804c41/pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12", size = 6377370, upload-time = "2025-07-01T09:15:46.673Z" }, + { url = "https://files.pythonhosted.org/packages/4a/82/3a721f7d69dca802befb8af08b7c79ebcab461007ce1c18bd91a5d5896f9/pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db", size = 7121500, upload-time = "2025-07-01T09:15:48.512Z" }, + { url = "https://files.pythonhosted.org/packages/89/c7/5572fa4a3f45740eaab6ae86fcdf7195b55beac1371ac8c619d880cfe948/pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa", size = 2512835, upload-time = "2025-07-01T09:15:50.399Z" }, +] + +[[package]] +name = "pip" +version = "25.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/6e/74a3f0179a4a73a53d66ce57fdb4de0080a8baa1de0063de206d6167acc2/pip-25.3.tar.gz", hash = "sha256:8d0538dbbd7babbd207f261ed969c65de439f6bc9e5dbd3b3b9a77f25d95f343", size = 1803014, upload-time = "2025-10-25T00:55:41.394Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/3c/d717024885424591d5376220b5e836c2d5293ce2011523c9de23ff7bf068/pip-25.3-py3-none-any.whl", hash = "sha256:9655943313a94722b7774661c21049070f6bbb0a1516bf02f7c8d5d9201514cd", size = 1778622, upload-time = "2025-10-25T00:55:39.247Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/61/33/9611380c2bdb1225fdef633e2a9610622310fed35ab11dac9620972ee088/platformdirs-4.5.0.tar.gz", hash = "sha256:70ddccdd7c99fc5942e9fc25636a8b34d04c24b335100223152c2803e4063312", size = 21632, upload-time = "2025-10-08T17:44:48.791Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/cb/ac7874b3e5d58441674fb70742e6c374b28b0c7cb988d37d991cde47166c/platformdirs-4.5.0-py3-none-any.whl", hash = "sha256:e578a81bb873cbb89a41fcc904c7ef523cc18284b7e3b3ccf06aca1403b7ebd3", size = 18651, upload-time = "2025-10-08T17:44:47.223Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.52" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/96/06e01a7b38dce6fe1db213e061a4602dd6032a8a97ef6c1a862537732421/prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855", size = 434198, upload-time = "2025-08-27T15:24:02.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955", size = 391431, upload-time = "2025-08-27T15:23:59.498Z" }, +] + +[[package]] +name = "propcache" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9e/da/e9fc233cf63743258bff22b3dfa7ea5baef7b5bc324af47a0ad89b8ffc6f/propcache-0.4.1.tar.gz", hash = "sha256:f48107a8c637e80362555f37ecf49abe20370e557cc4ab374f04ec4423c97c3d", size = 46442, upload-time = "2025-10-08T19:49:02.291Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/df/6d9c1b6ac12b003837dde8a10231a7344512186e87b36e855bef32241942/propcache-0.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:43eedf29202c08550aac1d14e0ee619b0430aaef78f85864c1a892294fbc28cf", size = 77750, upload-time = "2025-10-08T19:47:07.648Z" }, + { url = "https://files.pythonhosted.org/packages/8b/e8/677a0025e8a2acf07d3418a2e7ba529c9c33caf09d3c1f25513023c1db56/propcache-0.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d62cdfcfd89ccb8de04e0eda998535c406bf5e060ffd56be6c586cbcc05b3311", size = 44780, upload-time = "2025-10-08T19:47:08.851Z" }, + { url = "https://files.pythonhosted.org/packages/89/a4/92380f7ca60f99ebae761936bc48a72a639e8a47b29050615eef757cb2a7/propcache-0.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cae65ad55793da34db5f54e4029b89d3b9b9490d8abe1b4c7ab5d4b8ec7ebf74", size = 46308, upload-time = "2025-10-08T19:47:09.982Z" }, + { url = "https://files.pythonhosted.org/packages/2d/48/c5ac64dee5262044348d1d78a5f85dd1a57464a60d30daee946699963eb3/propcache-0.4.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:333ddb9031d2704a301ee3e506dc46b1fe5f294ec198ed6435ad5b6a085facfe", size = 208182, upload-time = "2025-10-08T19:47:11.319Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0c/cd762dd011a9287389a6a3eb43aa30207bde253610cca06824aeabfe9653/propcache-0.4.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:fd0858c20f078a32cf55f7e81473d96dcf3b93fd2ccdb3d40fdf54b8573df3af", size = 211215, upload-time = "2025-10-08T19:47:13.146Z" }, + { url = "https://files.pythonhosted.org/packages/30/3e/49861e90233ba36890ae0ca4c660e95df565b2cd15d4a68556ab5865974e/propcache-0.4.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:678ae89ebc632c5c204c794f8dab2837c5f159aeb59e6ed0539500400577298c", size = 218112, upload-time = "2025-10-08T19:47:14.913Z" }, + { url = "https://files.pythonhosted.org/packages/f1/8b/544bc867e24e1bd48f3118cecd3b05c694e160a168478fa28770f22fd094/propcache-0.4.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d472aeb4fbf9865e0c6d622d7f4d54a4e101a89715d8904282bb5f9a2f476c3f", size = 204442, upload-time = "2025-10-08T19:47:16.277Z" }, + { url = "https://files.pythonhosted.org/packages/50/a6/4282772fd016a76d3e5c0df58380a5ea64900afd836cec2c2f662d1b9bb3/propcache-0.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4d3df5fa7e36b3225954fba85589da77a0fe6a53e3976de39caf04a0db4c36f1", size = 199398, upload-time = "2025-10-08T19:47:17.962Z" }, + { url = "https://files.pythonhosted.org/packages/3e/ec/d8a7cd406ee1ddb705db2139f8a10a8a427100347bd698e7014351c7af09/propcache-0.4.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:ee17f18d2498f2673e432faaa71698032b0127ebf23ae5974eeaf806c279df24", size = 196920, upload-time = "2025-10-08T19:47:19.355Z" }, + { url = "https://files.pythonhosted.org/packages/f6/6c/f38ab64af3764f431e359f8baf9e0a21013e24329e8b85d2da32e8ed07ca/propcache-0.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:580e97762b950f993ae618e167e7be9256b8353c2dcd8b99ec100eb50f5286aa", size = 203748, upload-time = "2025-10-08T19:47:21.338Z" }, + { url = "https://files.pythonhosted.org/packages/d6/e3/fa846bd70f6534d647886621388f0a265254d30e3ce47e5c8e6e27dbf153/propcache-0.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:501d20b891688eb8e7aa903021f0b72d5a55db40ffaab27edefd1027caaafa61", size = 205877, upload-time = "2025-10-08T19:47:23.059Z" }, + { url = "https://files.pythonhosted.org/packages/e2/39/8163fc6f3133fea7b5f2827e8eba2029a0277ab2c5beee6c1db7b10fc23d/propcache-0.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a0bd56e5b100aef69bd8562b74b46254e7c8812918d3baa700c8a8009b0af66", size = 199437, upload-time = "2025-10-08T19:47:24.445Z" }, + { url = "https://files.pythonhosted.org/packages/93/89/caa9089970ca49c7c01662bd0eeedfe85494e863e8043565aeb6472ce8fe/propcache-0.4.1-cp313-cp313-win32.whl", hash = "sha256:bcc9aaa5d80322bc2fb24bb7accb4a30f81e90ab8d6ba187aec0744bc302ad81", size = 37586, upload-time = "2025-10-08T19:47:25.736Z" }, + { url = "https://files.pythonhosted.org/packages/f5/ab/f76ec3c3627c883215b5c8080debb4394ef5a7a29be811f786415fc1e6fd/propcache-0.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:381914df18634f5494334d201e98245c0596067504b9372d8cf93f4bb23e025e", size = 40790, upload-time = "2025-10-08T19:47:26.847Z" }, + { url = "https://files.pythonhosted.org/packages/59/1b/e71ae98235f8e2ba5004d8cb19765a74877abf189bc53fc0c80d799e56c3/propcache-0.4.1-cp313-cp313-win_arm64.whl", hash = "sha256:8873eb4460fd55333ea49b7d189749ecf6e55bf85080f11b1c4530ed3034cba1", size = 37158, upload-time = "2025-10-08T19:47:27.961Z" }, + { url = "https://files.pythonhosted.org/packages/83/ce/a31bbdfc24ee0dcbba458c8175ed26089cf109a55bbe7b7640ed2470cfe9/propcache-0.4.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:92d1935ee1f8d7442da9c0c4fa7ac20d07e94064184811b685f5c4fada64553b", size = 81451, upload-time = "2025-10-08T19:47:29.445Z" }, + { url = "https://files.pythonhosted.org/packages/25/9c/442a45a470a68456e710d96cacd3573ef26a1d0a60067e6a7d5e655621ed/propcache-0.4.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:473c61b39e1460d386479b9b2f337da492042447c9b685f28be4f74d3529e566", size = 46374, upload-time = "2025-10-08T19:47:30.579Z" }, + { url = "https://files.pythonhosted.org/packages/f4/bf/b1d5e21dbc3b2e889ea4327044fb16312a736d97640fb8b6aa3f9c7b3b65/propcache-0.4.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c0ef0aaafc66fbd87842a3fe3902fd889825646bc21149eafe47be6072725835", size = 48396, upload-time = "2025-10-08T19:47:31.79Z" }, + { url = "https://files.pythonhosted.org/packages/f4/04/5b4c54a103d480e978d3c8a76073502b18db0c4bc17ab91b3cb5092ad949/propcache-0.4.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95393b4d66bfae908c3ca8d169d5f79cd65636ae15b5e7a4f6e67af675adb0e", size = 275950, upload-time = "2025-10-08T19:47:33.481Z" }, + { url = "https://files.pythonhosted.org/packages/b4/c1/86f846827fb969c4b78b0af79bba1d1ea2156492e1b83dea8b8a6ae27395/propcache-0.4.1-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c07fda85708bc48578467e85099645167a955ba093be0a2dcba962195676e859", size = 273856, upload-time = "2025-10-08T19:47:34.906Z" }, + { url = "https://files.pythonhosted.org/packages/36/1d/fc272a63c8d3bbad6878c336c7a7dea15e8f2d23a544bda43205dfa83ada/propcache-0.4.1-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:af223b406d6d000830c6f65f1e6431783fc3f713ba3e6cc8c024d5ee96170a4b", size = 280420, upload-time = "2025-10-08T19:47:36.338Z" }, + { url = "https://files.pythonhosted.org/packages/07/0c/01f2219d39f7e53d52e5173bcb09c976609ba30209912a0680adfb8c593a/propcache-0.4.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a78372c932c90ee474559c5ddfffd718238e8673c340dc21fe45c5b8b54559a0", size = 263254, upload-time = "2025-10-08T19:47:37.692Z" }, + { url = "https://files.pythonhosted.org/packages/2d/18/cd28081658ce597898f0c4d174d4d0f3c5b6d4dc27ffafeef835c95eb359/propcache-0.4.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:564d9f0d4d9509e1a870c920a89b2fec951b44bf5ba7d537a9e7c1ccec2c18af", size = 261205, upload-time = "2025-10-08T19:47:39.659Z" }, + { url = "https://files.pythonhosted.org/packages/7a/71/1f9e22eb8b8316701c2a19fa1f388c8a3185082607da8e406a803c9b954e/propcache-0.4.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:17612831fda0138059cc5546f4d12a2aacfb9e47068c06af35c400ba58ba7393", size = 247873, upload-time = "2025-10-08T19:47:41.084Z" }, + { url = "https://files.pythonhosted.org/packages/4a/65/3d4b61f36af2b4eddba9def857959f1016a51066b4f1ce348e0cf7881f58/propcache-0.4.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:41a89040cb10bd345b3c1a873b2bf36413d48da1def52f268a055f7398514874", size = 262739, upload-time = "2025-10-08T19:47:42.51Z" }, + { url = "https://files.pythonhosted.org/packages/2a/42/26746ab087faa77c1c68079b228810436ccd9a5ce9ac85e2b7307195fd06/propcache-0.4.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e35b88984e7fa64aacecea39236cee32dd9bd8c55f57ba8a75cf2399553f9bd7", size = 263514, upload-time = "2025-10-08T19:47:43.927Z" }, + { url = "https://files.pythonhosted.org/packages/94/13/630690fe201f5502d2403dd3cfd451ed8858fe3c738ee88d095ad2ff407b/propcache-0.4.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f8b465489f927b0df505cbe26ffbeed4d6d8a2bbc61ce90eb074ff129ef0ab1", size = 257781, upload-time = "2025-10-08T19:47:45.448Z" }, + { url = "https://files.pythonhosted.org/packages/92/f7/1d4ec5841505f423469efbfc381d64b7b467438cd5a4bbcbb063f3b73d27/propcache-0.4.1-cp313-cp313t-win32.whl", hash = "sha256:2ad890caa1d928c7c2965b48f3a3815c853180831d0e5503d35cf00c472f4717", size = 41396, upload-time = "2025-10-08T19:47:47.202Z" }, + { url = "https://files.pythonhosted.org/packages/48/f0/615c30622316496d2cbbc29f5985f7777d3ada70f23370608c1d3e081c1f/propcache-0.4.1-cp313-cp313t-win_amd64.whl", hash = "sha256:f7ee0e597f495cf415bcbd3da3caa3bd7e816b74d0d52b8145954c5e6fd3ff37", size = 44897, upload-time = "2025-10-08T19:47:48.336Z" }, + { url = "https://files.pythonhosted.org/packages/fd/ca/6002e46eccbe0e33dcd4069ef32f7f1c9e243736e07adca37ae8c4830ec3/propcache-0.4.1-cp313-cp313t-win_arm64.whl", hash = "sha256:929d7cbe1f01bb7baffb33dc14eb5691c95831450a26354cd210a8155170c93a", size = 39789, upload-time = "2025-10-08T19:47:49.876Z" }, + { url = "https://files.pythonhosted.org/packages/8e/5c/bca52d654a896f831b8256683457ceddd490ec18d9ec50e97dfd8fc726a8/propcache-0.4.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3f7124c9d820ba5548d431afb4632301acf965db49e666aa21c305cbe8c6de12", size = 78152, upload-time = "2025-10-08T19:47:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/65/9b/03b04e7d82a5f54fb16113d839f5ea1ede58a61e90edf515f6577c66fa8f/propcache-0.4.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:c0d4b719b7da33599dfe3b22d3db1ef789210a0597bc650b7cee9c77c2be8c5c", size = 44869, upload-time = "2025-10-08T19:47:52.594Z" }, + { url = "https://files.pythonhosted.org/packages/b2/fa/89a8ef0468d5833a23fff277b143d0573897cf75bd56670a6d28126c7d68/propcache-0.4.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9f302f4783709a78240ebc311b793f123328716a60911d667e0c036bc5dcbded", size = 46596, upload-time = "2025-10-08T19:47:54.073Z" }, + { url = "https://files.pythonhosted.org/packages/86/bd/47816020d337f4a746edc42fe8d53669965138f39ee117414c7d7a340cfe/propcache-0.4.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c80ee5802e3fb9ea37938e7eecc307fb984837091d5fd262bb37238b1ae97641", size = 206981, upload-time = "2025-10-08T19:47:55.715Z" }, + { url = "https://files.pythonhosted.org/packages/df/f6/c5fa1357cc9748510ee55f37173eb31bfde6d94e98ccd9e6f033f2fc06e1/propcache-0.4.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ed5a841e8bb29a55fb8159ed526b26adc5bdd7e8bd7bf793ce647cb08656cdf4", size = 211490, upload-time = "2025-10-08T19:47:57.499Z" }, + { url = "https://files.pythonhosted.org/packages/80/1e/e5889652a7c4a3846683401a48f0f2e5083ce0ec1a8a5221d8058fbd1adf/propcache-0.4.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:55c72fd6ea2da4c318e74ffdf93c4fe4e926051133657459131a95c846d16d44", size = 215371, upload-time = "2025-10-08T19:47:59.317Z" }, + { url = "https://files.pythonhosted.org/packages/b2/f2/889ad4b2408f72fe1a4f6a19491177b30ea7bf1a0fd5f17050ca08cfc882/propcache-0.4.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8326e144341460402713f91df60ade3c999d601e7eb5ff8f6f7862d54de0610d", size = 201424, upload-time = "2025-10-08T19:48:00.67Z" }, + { url = "https://files.pythonhosted.org/packages/27/73/033d63069b57b0812c8bd19f311faebeceb6ba31b8f32b73432d12a0b826/propcache-0.4.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:060b16ae65bc098da7f6d25bf359f1f31f688384858204fe5d652979e0015e5b", size = 197566, upload-time = "2025-10-08T19:48:02.604Z" }, + { url = "https://files.pythonhosted.org/packages/dc/89/ce24f3dc182630b4e07aa6d15f0ff4b14ed4b9955fae95a0b54c58d66c05/propcache-0.4.1-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:89eb3fa9524f7bec9de6e83cf3faed9d79bffa560672c118a96a171a6f55831e", size = 193130, upload-time = "2025-10-08T19:48:04.499Z" }, + { url = "https://files.pythonhosted.org/packages/a9/24/ef0d5fd1a811fb5c609278d0209c9f10c35f20581fcc16f818da959fc5b4/propcache-0.4.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:dee69d7015dc235f526fe80a9c90d65eb0039103fe565776250881731f06349f", size = 202625, upload-time = "2025-10-08T19:48:06.213Z" }, + { url = "https://files.pythonhosted.org/packages/f5/02/98ec20ff5546f68d673df2f7a69e8c0d076b5abd05ca882dc7ee3a83653d/propcache-0.4.1-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5558992a00dfd54ccbc64a32726a3357ec93825a418a401f5cc67df0ac5d9e49", size = 204209, upload-time = "2025-10-08T19:48:08.432Z" }, + { url = "https://files.pythonhosted.org/packages/a0/87/492694f76759b15f0467a2a93ab68d32859672b646aa8a04ce4864e7932d/propcache-0.4.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c9b822a577f560fbd9554812526831712c1436d2c046cedee4c3796d3543b144", size = 197797, upload-time = "2025-10-08T19:48:09.968Z" }, + { url = "https://files.pythonhosted.org/packages/ee/36/66367de3575db1d2d3f3d177432bd14ee577a39d3f5d1b3d5df8afe3b6e2/propcache-0.4.1-cp314-cp314-win32.whl", hash = "sha256:ab4c29b49d560fe48b696cdcb127dd36e0bc2472548f3bf56cc5cb3da2b2984f", size = 38140, upload-time = "2025-10-08T19:48:11.232Z" }, + { url = "https://files.pythonhosted.org/packages/0c/2a/a758b47de253636e1b8aef181c0b4f4f204bf0dd964914fb2af90a95b49b/propcache-0.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:5a103c3eb905fcea0ab98be99c3a9a5ab2de60228aa5aceedc614c0281cf6153", size = 41257, upload-time = "2025-10-08T19:48:12.707Z" }, + { url = "https://files.pythonhosted.org/packages/34/5e/63bd5896c3fec12edcbd6f12508d4890d23c265df28c74b175e1ef9f4f3b/propcache-0.4.1-cp314-cp314-win_arm64.whl", hash = "sha256:74c1fb26515153e482e00177a1ad654721bf9207da8a494a0c05e797ad27b992", size = 38097, upload-time = "2025-10-08T19:48:13.923Z" }, + { url = "https://files.pythonhosted.org/packages/99/85/9ff785d787ccf9bbb3f3106f79884a130951436f58392000231b4c737c80/propcache-0.4.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:824e908bce90fb2743bd6b59db36eb4f45cd350a39637c9f73b1c1ea66f5b75f", size = 81455, upload-time = "2025-10-08T19:48:15.16Z" }, + { url = "https://files.pythonhosted.org/packages/90/85/2431c10c8e7ddb1445c1f7c4b54d886e8ad20e3c6307e7218f05922cad67/propcache-0.4.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c2b5e7db5328427c57c8e8831abda175421b709672f6cfc3d630c3b7e2146393", size = 46372, upload-time = "2025-10-08T19:48:16.424Z" }, + { url = "https://files.pythonhosted.org/packages/01/20/b0972d902472da9bcb683fa595099911f4d2e86e5683bcc45de60dd05dc3/propcache-0.4.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6f6ff873ed40292cd4969ef5310179afd5db59fdf055897e282485043fc80ad0", size = 48411, upload-time = "2025-10-08T19:48:17.577Z" }, + { url = "https://files.pythonhosted.org/packages/e2/e3/7dc89f4f21e8f99bad3d5ddb3a3389afcf9da4ac69e3deb2dcdc96e74169/propcache-0.4.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:49a2dc67c154db2c1463013594c458881a069fcf98940e61a0569016a583020a", size = 275712, upload-time = "2025-10-08T19:48:18.901Z" }, + { url = "https://files.pythonhosted.org/packages/20/67/89800c8352489b21a8047c773067644e3897f02ecbbd610f4d46b7f08612/propcache-0.4.1-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:005f08e6a0529984491e37d8dbc3dd86f84bd78a8ceb5fa9a021f4c48d4984be", size = 273557, upload-time = "2025-10-08T19:48:20.762Z" }, + { url = "https://files.pythonhosted.org/packages/e2/a1/b52b055c766a54ce6d9c16d9aca0cad8059acd9637cdf8aa0222f4a026ef/propcache-0.4.1-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5c3310452e0d31390da9035c348633b43d7e7feb2e37be252be6da45abd1abcc", size = 280015, upload-time = "2025-10-08T19:48:22.592Z" }, + { url = "https://files.pythonhosted.org/packages/48/c8/33cee30bd890672c63743049f3c9e4be087e6780906bfc3ec58528be59c1/propcache-0.4.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c3c70630930447f9ef1caac7728c8ad1c56bc5015338b20fed0d08ea2480b3a", size = 262880, upload-time = "2025-10-08T19:48:23.947Z" }, + { url = "https://files.pythonhosted.org/packages/0c/b1/8f08a143b204b418285c88b83d00edbd61afbc2c6415ffafc8905da7038b/propcache-0.4.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8e57061305815dfc910a3634dcf584f08168a8836e6999983569f51a8544cd89", size = 260938, upload-time = "2025-10-08T19:48:25.656Z" }, + { url = "https://files.pythonhosted.org/packages/cf/12/96e4664c82ca2f31e1c8dff86afb867348979eb78d3cb8546a680287a1e9/propcache-0.4.1-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:521a463429ef54143092c11a77e04056dd00636f72e8c45b70aaa3140d639726", size = 247641, upload-time = "2025-10-08T19:48:27.207Z" }, + { url = "https://files.pythonhosted.org/packages/18/ed/e7a9cfca28133386ba52278136d42209d3125db08d0a6395f0cba0c0285c/propcache-0.4.1-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:120c964da3fdc75e3731aa392527136d4ad35868cc556fd09bb6d09172d9a367", size = 262510, upload-time = "2025-10-08T19:48:28.65Z" }, + { url = "https://files.pythonhosted.org/packages/f5/76/16d8bf65e8845dd62b4e2b57444ab81f07f40caa5652b8969b87ddcf2ef6/propcache-0.4.1-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:d8f353eb14ee3441ee844ade4277d560cdd68288838673273b978e3d6d2c8f36", size = 263161, upload-time = "2025-10-08T19:48:30.133Z" }, + { url = "https://files.pythonhosted.org/packages/e7/70/c99e9edb5d91d5ad8a49fa3c1e8285ba64f1476782fed10ab251ff413ba1/propcache-0.4.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ab2943be7c652f09638800905ee1bab2c544e537edb57d527997a24c13dc1455", size = 257393, upload-time = "2025-10-08T19:48:31.567Z" }, + { url = "https://files.pythonhosted.org/packages/08/02/87b25304249a35c0915d236575bc3574a323f60b47939a2262b77632a3ee/propcache-0.4.1-cp314-cp314t-win32.whl", hash = "sha256:05674a162469f31358c30bcaa8883cb7829fa3110bf9c0991fe27d7896c42d85", size = 42546, upload-time = "2025-10-08T19:48:32.872Z" }, + { url = "https://files.pythonhosted.org/packages/cb/ef/3c6ecf8b317aa982f309835e8f96987466123c6e596646d4e6a1dfcd080f/propcache-0.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:990f6b3e2a27d683cb7602ed6c86f15ee6b43b1194736f9baaeb93d0016633b1", size = 46259, upload-time = "2025-10-08T19:48:34.226Z" }, + { url = "https://files.pythonhosted.org/packages/c4/2d/346e946d4951f37eca1e4f55be0f0174c52cd70720f84029b02f296f4a38/propcache-0.4.1-cp314-cp314t-win_arm64.whl", hash = "sha256:ecef2343af4cc68e05131e45024ba34f6095821988a9d0a02aa7c73fcc448aa9", size = 40428, upload-time = "2025-10-08T19:48:35.441Z" }, + { url = "https://files.pythonhosted.org/packages/5b/5a/bc7b4a4ef808fa59a816c17b20c4bef6884daebbdf627ff2a161da67da19/propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237", size = 13305, upload-time = "2025-10-08T19:49:00.792Z" }, +] + +[[package]] +name = "protobuf" +version = "5.29.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/29/d09e70352e4e88c9c7a198d5645d7277811448d76c23b00345670f7c8a38/protobuf-5.29.5.tar.gz", hash = "sha256:bc1463bafd4b0929216c35f437a8e28731a2b7fe3d98bb77a600efced5a15c84", size = 425226, upload-time = "2025-05-28T23:51:59.82Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/11/6e40e9fc5bba02988a214c07cf324595789ca7820160bfd1f8be96e48539/protobuf-5.29.5-cp310-abi3-win32.whl", hash = "sha256:3f1c6468a2cfd102ff4703976138844f78ebd1fb45f49011afc5139e9e283079", size = 422963, upload-time = "2025-05-28T23:51:41.204Z" }, + { url = "https://files.pythonhosted.org/packages/81/7f/73cefb093e1a2a7c3ffd839e6f9fcafb7a427d300c7f8aef9c64405d8ac6/protobuf-5.29.5-cp310-abi3-win_amd64.whl", hash = "sha256:3f76e3a3675b4a4d867b52e4a5f5b78a2ef9565549d4037e06cf7b0942b1d3fc", size = 434818, upload-time = "2025-05-28T23:51:44.297Z" }, + { url = "https://files.pythonhosted.org/packages/dd/73/10e1661c21f139f2c6ad9b23040ff36fee624310dc28fba20d33fdae124c/protobuf-5.29.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e38c5add5a311f2a6eb0340716ef9b039c1dfa428b28f25a7838ac329204a671", size = 418091, upload-time = "2025-05-28T23:51:45.907Z" }, + { url = "https://files.pythonhosted.org/packages/6c/04/98f6f8cf5b07ab1294c13f34b4e69b3722bb609c5b701d6c169828f9f8aa/protobuf-5.29.5-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:fa18533a299d7ab6c55a238bf8629311439995f2e7eca5caaff08663606e9015", size = 319824, upload-time = "2025-05-28T23:51:47.545Z" }, + { url = "https://files.pythonhosted.org/packages/85/e4/07c80521879c2d15f321465ac24c70efe2381378c00bf5e56a0f4fbac8cd/protobuf-5.29.5-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:63848923da3325e1bf7e9003d680ce6e14b07e55d0473253a690c3a8b8fd6e61", size = 319942, upload-time = "2025-05-28T23:51:49.11Z" }, + { url = "https://files.pythonhosted.org/packages/7e/cc/7e77861000a0691aeea8f4566e5d3aa716f2b1dece4a24439437e41d3d25/protobuf-5.29.5-py3-none-any.whl", hash = "sha256:6cf42630262c59b2d8de33954443d94b746c952b01434fc58a417fdbd2e84bd5", size = 172823, upload-time = "2025-05-28T23:51:58.157Z" }, +] + +[[package]] +name = "psutil" +version = "7.1.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e1/88/bdd0a41e5857d5d703287598cbf08dad90aed56774ea52ae071bae9071b6/psutil-7.1.3.tar.gz", hash = "sha256:6c86281738d77335af7aec228328e944b30930899ea760ecf33a4dba66be5e74", size = 489059, upload-time = "2025-11-02T12:25:54.619Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/93/0c49e776b8734fef56ec9c5c57f923922f2cf0497d62e0f419465f28f3d0/psutil-7.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0005da714eee687b4b8decd3d6cc7c6db36215c9e74e5ad2264b90c3df7d92dc", size = 239751, upload-time = "2025-11-02T12:25:58.161Z" }, + { url = "https://files.pythonhosted.org/packages/6f/8d/b31e39c769e70780f007969815195a55c81a63efebdd4dbe9e7a113adb2f/psutil-7.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:19644c85dcb987e35eeeaefdc3915d059dac7bd1167cdcdbf27e0ce2df0c08c0", size = 240368, upload-time = "2025-11-02T12:26:00.491Z" }, + { url = "https://files.pythonhosted.org/packages/62/61/23fd4acc3c9eebbf6b6c78bcd89e5d020cfde4acf0a9233e9d4e3fa698b4/psutil-7.1.3-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95ef04cf2e5ba0ab9eaafc4a11eaae91b44f4ef5541acd2ee91d9108d00d59a7", size = 287134, upload-time = "2025-11-02T12:26:02.613Z" }, + { url = "https://files.pythonhosted.org/packages/30/1c/f921a009ea9ceb51aa355cb0cc118f68d354db36eae18174bab63affb3e6/psutil-7.1.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1068c303be3a72f8e18e412c5b2a8f6d31750fb152f9cb106b54090296c9d251", size = 289904, upload-time = "2025-11-02T12:26:05.207Z" }, + { url = "https://files.pythonhosted.org/packages/a6/82/62d68066e13e46a5116df187d319d1724b3f437ddd0f958756fc052677f4/psutil-7.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:18349c5c24b06ac5612c0428ec2a0331c26443d259e2a0144a9b24b4395b58fa", size = 249642, upload-time = "2025-11-02T12:26:07.447Z" }, + { url = "https://files.pythonhosted.org/packages/df/ad/c1cd5fe965c14a0392112f68362cfceb5230819dbb5b1888950d18a11d9f/psutil-7.1.3-cp313-cp313t-win_arm64.whl", hash = "sha256:c525ffa774fe4496282fb0b1187725793de3e7c6b29e41562733cae9ada151ee", size = 245518, upload-time = "2025-11-02T12:26:09.719Z" }, + { url = "https://files.pythonhosted.org/packages/2e/bb/6670bded3e3236eb4287c7bcdc167e9fae6e1e9286e437f7111caed2f909/psutil-7.1.3-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b403da1df4d6d43973dc004d19cee3b848e998ae3154cc8097d139b77156c353", size = 239843, upload-time = "2025-11-02T12:26:11.968Z" }, + { url = "https://files.pythonhosted.org/packages/b8/66/853d50e75a38c9a7370ddbeefabdd3d3116b9c31ef94dc92c6729bc36bec/psutil-7.1.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ad81425efc5e75da3f39b3e636293360ad8d0b49bed7df824c79764fb4ba9b8b", size = 240369, upload-time = "2025-11-02T12:26:14.358Z" }, + { url = "https://files.pythonhosted.org/packages/41/bd/313aba97cb5bfb26916dc29cf0646cbe4dd6a89ca69e8c6edce654876d39/psutil-7.1.3-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f33a3702e167783a9213db10ad29650ebf383946e91bc77f28a5eb083496bc9", size = 288210, upload-time = "2025-11-02T12:26:16.699Z" }, + { url = "https://files.pythonhosted.org/packages/c2/fa/76e3c06e760927a0cfb5705eb38164254de34e9bd86db656d4dbaa228b04/psutil-7.1.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fac9cd332c67f4422504297889da5ab7e05fd11e3c4392140f7370f4208ded1f", size = 291182, upload-time = "2025-11-02T12:26:18.848Z" }, + { url = "https://files.pythonhosted.org/packages/0f/1d/5774a91607035ee5078b8fd747686ebec28a962f178712de100d00b78a32/psutil-7.1.3-cp314-cp314t-win_amd64.whl", hash = "sha256:3792983e23b69843aea49c8f5b8f115572c5ab64c153bada5270086a2123c7e7", size = 250466, upload-time = "2025-11-02T12:26:21.183Z" }, + { url = "https://files.pythonhosted.org/packages/00/ca/e426584bacb43a5cb1ac91fae1937f478cd8fbe5e4ff96574e698a2c77cd/psutil-7.1.3-cp314-cp314t-win_arm64.whl", hash = "sha256:31d77fcedb7529f27bb3a0472bea9334349f9a04160e8e6e5020f22c59893264", size = 245756, upload-time = "2025-11-02T12:26:23.148Z" }, + { url = "https://files.pythonhosted.org/packages/ef/94/46b9154a800253e7ecff5aaacdf8ebf43db99de4a2dfa18575b02548654e/psutil-7.1.3-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2bdbcd0e58ca14996a42adf3621a6244f1bb2e2e528886959c72cf1e326677ab", size = 238359, upload-time = "2025-11-02T12:26:25.284Z" }, + { url = "https://files.pythonhosted.org/packages/68/3a/9f93cff5c025029a36d9a92fef47220ab4692ee7f2be0fba9f92813d0cb8/psutil-7.1.3-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:bc31fa00f1fbc3c3802141eede66f3a2d51d89716a194bf2cd6fc68310a19880", size = 239171, upload-time = "2025-11-02T12:26:27.23Z" }, + { url = "https://files.pythonhosted.org/packages/ce/b1/5f49af514f76431ba4eea935b8ad3725cdeb397e9245ab919dbc1d1dc20f/psutil-7.1.3-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3bb428f9f05c1225a558f53e30ccbad9930b11c3fc206836242de1091d3e7dd3", size = 263261, upload-time = "2025-11-02T12:26:29.48Z" }, + { url = "https://files.pythonhosted.org/packages/e0/95/992c8816a74016eb095e73585d747e0a8ea21a061ed3689474fabb29a395/psutil-7.1.3-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56d974e02ca2c8eb4812c3f76c30e28836fffc311d55d979f1465c1feeb2b68b", size = 264635, upload-time = "2025-11-02T12:26:31.74Z" }, + { url = "https://files.pythonhosted.org/packages/55/4c/c3ed1a622b6ae2fd3c945a366e64eb35247a31e4db16cf5095e269e8eb3c/psutil-7.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:f39c2c19fe824b47484b96f9692932248a54c43799a84282cfe58d05a6449efd", size = 247633, upload-time = "2025-11-02T12:26:33.887Z" }, + { url = "https://files.pythonhosted.org/packages/c9/ad/33b2ccec09bf96c2b2ef3f9a6f66baac8253d7565d8839e024a6b905d45d/psutil-7.1.3-cp37-abi3-win_arm64.whl", hash = "sha256:bd0d69cee829226a761e92f28140bec9a5ee9d5b4fb4b0cc589068dbfff559b1", size = 244608, upload-time = "2025-11-02T12:26:36.136Z" }, +] + +[[package]] +name = "pycparser" +version = "2.23" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, +] + +[[package]] +name = "pydantic" +version = "2.12.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8d/35/d319ed522433215526689bad428a94058b6dd12190ce7ddd78618ac14b28/pydantic-2.12.2.tar.gz", hash = "sha256:7b8fa15b831a4bbde9d5b84028641ac3080a4ca2cbd4a621a661687e741624fd", size = 816358, upload-time = "2025-10-14T15:02:21.842Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6c/98/468cb649f208a6f1279448e6e5247b37ae79cf5e4041186f1e2ef3d16345/pydantic-2.12.2-py3-none-any.whl", hash = "sha256:25ff718ee909acd82f1ff9b1a4acfd781bb23ab3739adaa7144f19a6a4e231ae", size = 460628, upload-time = "2025-10-14T15:02:19.623Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/18/d0944e8eaaa3efd0a91b0f1fc537d3be55ad35091b6a87638211ba691964/pydantic_core-2.41.4.tar.gz", hash = "sha256:70e47929a9d4a1905a67e4b687d5946026390568a8e952b92824118063cee4d5", size = 457557, upload-time = "2025-10-14T10:23:47.909Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/d0/c20adabd181a029a970738dfe23710b52a31f1258f591874fcdec7359845/pydantic_core-2.41.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:85e050ad9e5f6fe1004eec65c914332e52f429bc0ae12d6fa2092407a462c746", size = 2105688, upload-time = "2025-10-14T10:20:54.448Z" }, + { url = "https://files.pythonhosted.org/packages/00/b6/0ce5c03cec5ae94cca220dfecddc453c077d71363b98a4bbdb3c0b22c783/pydantic_core-2.41.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7393f1d64792763a48924ba31d1e44c2cfbc05e3b1c2c9abb4ceeadd912cced", size = 1910807, upload-time = "2025-10-14T10:20:56.115Z" }, + { url = "https://files.pythonhosted.org/packages/68/3e/800d3d02c8beb0b5c069c870cbb83799d085debf43499c897bb4b4aaff0d/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94dab0940b0d1fb28bcab847adf887c66a27a40291eedf0b473be58761c9799a", size = 1956669, upload-time = "2025-10-14T10:20:57.874Z" }, + { url = "https://files.pythonhosted.org/packages/60/a4/24271cc71a17f64589be49ab8bd0751f6a0a03046c690df60989f2f95c2c/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de7c42f897e689ee6f9e93c4bec72b99ae3b32a2ade1c7e4798e690ff5246e02", size = 2051629, upload-time = "2025-10-14T10:21:00.006Z" }, + { url = "https://files.pythonhosted.org/packages/68/de/45af3ca2f175d91b96bfb62e1f2d2f1f9f3b14a734afe0bfeff079f78181/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:664b3199193262277b8b3cd1e754fb07f2c6023289c815a1e1e8fb415cb247b1", size = 2224049, upload-time = "2025-10-14T10:21:01.801Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/ae4e1ff84672bf869d0a77af24fd78387850e9497753c432875066b5d622/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95b253b88f7d308b1c0b417c4624f44553ba4762816f94e6986819b9c273fb2", size = 2342409, upload-time = "2025-10-14T10:21:03.556Z" }, + { url = "https://files.pythonhosted.org/packages/18/62/273dd70b0026a085c7b74b000394e1ef95719ea579c76ea2f0cc8893736d/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1351f5bbdbbabc689727cb91649a00cb9ee7203e0a6e54e9f5ba9e22e384b84", size = 2069635, upload-time = "2025-10-14T10:21:05.385Z" }, + { url = "https://files.pythonhosted.org/packages/30/03/cf485fff699b4cdaea469bc481719d3e49f023241b4abb656f8d422189fc/pydantic_core-2.41.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1affa4798520b148d7182da0615d648e752de4ab1a9566b7471bc803d88a062d", size = 2194284, upload-time = "2025-10-14T10:21:07.122Z" }, + { url = "https://files.pythonhosted.org/packages/f9/7e/c8e713db32405dfd97211f2fc0a15d6bf8adb7640f3d18544c1f39526619/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7b74e18052fea4aa8dea2fb7dbc23d15439695da6cbe6cfc1b694af1115df09d", size = 2137566, upload-time = "2025-10-14T10:21:08.981Z" }, + { url = "https://files.pythonhosted.org/packages/04/f7/db71fd4cdccc8b75990f79ccafbbd66757e19f6d5ee724a6252414483fb4/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:285b643d75c0e30abda9dc1077395624f314a37e3c09ca402d4015ef5979f1a2", size = 2316809, upload-time = "2025-10-14T10:21:10.805Z" }, + { url = "https://files.pythonhosted.org/packages/76/63/a54973ddb945f1bca56742b48b144d85c9fc22f819ddeb9f861c249d5464/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f52679ff4218d713b3b33f88c89ccbf3a5c2c12ba665fb80ccc4192b4608dbab", size = 2311119, upload-time = "2025-10-14T10:21:12.583Z" }, + { url = "https://files.pythonhosted.org/packages/f8/03/5d12891e93c19218af74843a27e32b94922195ded2386f7b55382f904d2f/pydantic_core-2.41.4-cp313-cp313-win32.whl", hash = "sha256:ecde6dedd6fff127c273c76821bb754d793be1024bc33314a120f83a3c69460c", size = 1981398, upload-time = "2025-10-14T10:21:14.584Z" }, + { url = "https://files.pythonhosted.org/packages/be/d8/fd0de71f39db91135b7a26996160de71c073d8635edfce8b3c3681be0d6d/pydantic_core-2.41.4-cp313-cp313-win_amd64.whl", hash = "sha256:d081a1f3800f05409ed868ebb2d74ac39dd0c1ff6c035b5162356d76030736d4", size = 2030735, upload-time = "2025-10-14T10:21:16.432Z" }, + { url = "https://files.pythonhosted.org/packages/72/86/c99921c1cf6650023c08bfab6fe2d7057a5142628ef7ccfa9921f2dda1d5/pydantic_core-2.41.4-cp313-cp313-win_arm64.whl", hash = "sha256:f8e49c9c364a7edcbe2a310f12733aad95b022495ef2a8d653f645e5d20c1564", size = 1973209, upload-time = "2025-10-14T10:21:18.213Z" }, + { url = "https://files.pythonhosted.org/packages/36/0d/b5706cacb70a8414396efdda3d72ae0542e050b591119e458e2490baf035/pydantic_core-2.41.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ed97fd56a561f5eb5706cebe94f1ad7c13b84d98312a05546f2ad036bafe87f4", size = 1877324, upload-time = "2025-10-14T10:21:20.363Z" }, + { url = "https://files.pythonhosted.org/packages/de/2d/cba1fa02cfdea72dfb3a9babb067c83b9dff0bbcb198368e000a6b756ea7/pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a870c307bf1ee91fc58a9a61338ff780d01bfae45922624816878dce784095d2", size = 1884515, upload-time = "2025-10-14T10:21:22.339Z" }, + { url = "https://files.pythonhosted.org/packages/07/ea/3df927c4384ed9b503c9cc2d076cf983b4f2adb0c754578dfb1245c51e46/pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25e97bc1f5f8f7985bdc2335ef9e73843bb561eb1fa6831fdfc295c1c2061cf", size = 2042819, upload-time = "2025-10-14T10:21:26.683Z" }, + { url = "https://files.pythonhosted.org/packages/6a/ee/df8e871f07074250270a3b1b82aad4cd0026b588acd5d7d3eb2fcb1471a3/pydantic_core-2.41.4-cp313-cp313t-win_amd64.whl", hash = "sha256:d405d14bea042f166512add3091c1af40437c2e7f86988f3915fabd27b1e9cd2", size = 1995866, upload-time = "2025-10-14T10:21:28.951Z" }, + { url = "https://files.pythonhosted.org/packages/fc/de/b20f4ab954d6d399499c33ec4fafc46d9551e11dc1858fb7f5dca0748ceb/pydantic_core-2.41.4-cp313-cp313t-win_arm64.whl", hash = "sha256:19f3684868309db5263a11bace3c45d93f6f24afa2ffe75a647583df22a2ff89", size = 1970034, upload-time = "2025-10-14T10:21:30.869Z" }, + { url = "https://files.pythonhosted.org/packages/54/28/d3325da57d413b9819365546eb9a6e8b7cbd9373d9380efd5f74326143e6/pydantic_core-2.41.4-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:e9205d97ed08a82ebb9a307e92914bb30e18cdf6f6b12ca4bedadb1588a0bfe1", size = 2102022, upload-time = "2025-10-14T10:21:32.809Z" }, + { url = "https://files.pythonhosted.org/packages/9e/24/b58a1bc0d834bf1acc4361e61233ee217169a42efbdc15a60296e13ce438/pydantic_core-2.41.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:82df1f432b37d832709fbcc0e24394bba04a01b6ecf1ee87578145c19cde12ac", size = 1905495, upload-time = "2025-10-14T10:21:34.812Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a4/71f759cc41b7043e8ecdaab81b985a9b6cad7cec077e0b92cff8b71ecf6b/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3b4cc4539e055cfa39a3763c939f9d409eb40e85813257dcd761985a108554", size = 1956131, upload-time = "2025-10-14T10:21:36.924Z" }, + { url = "https://files.pythonhosted.org/packages/b0/64/1e79ac7aa51f1eec7c4cda8cbe456d5d09f05fdd68b32776d72168d54275/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b1eb1754fce47c63d2ff57fdb88c351a6c0150995890088b33767a10218eaa4e", size = 2052236, upload-time = "2025-10-14T10:21:38.927Z" }, + { url = "https://files.pythonhosted.org/packages/e9/e3/a3ffc363bd4287b80f1d43dc1c28ba64831f8dfc237d6fec8f2661138d48/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6ab5ab30ef325b443f379ddb575a34969c333004fca5a1daa0133a6ffaad616", size = 2223573, upload-time = "2025-10-14T10:21:41.574Z" }, + { url = "https://files.pythonhosted.org/packages/28/27/78814089b4d2e684a9088ede3790763c64693c3d1408ddc0a248bc789126/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31a41030b1d9ca497634092b46481b937ff9397a86f9f51bd41c4767b6fc04af", size = 2342467, upload-time = "2025-10-14T10:21:44.018Z" }, + { url = "https://files.pythonhosted.org/packages/92/97/4de0e2a1159cb85ad737e03306717637842c88c7fd6d97973172fb183149/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a44ac1738591472c3d020f61c6df1e4015180d6262ebd39bf2aeb52571b60f12", size = 2063754, upload-time = "2025-10-14T10:21:46.466Z" }, + { url = "https://files.pythonhosted.org/packages/0f/50/8cb90ce4b9efcf7ae78130afeb99fd1c86125ccdf9906ef64b9d42f37c25/pydantic_core-2.41.4-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d72f2b5e6e82ab8f94ea7d0d42f83c487dc159c5240d8f83beae684472864e2d", size = 2196754, upload-time = "2025-10-14T10:21:48.486Z" }, + { url = "https://files.pythonhosted.org/packages/34/3b/ccdc77af9cd5082723574a1cc1bcae7a6acacc829d7c0a06201f7886a109/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c4d1e854aaf044487d31143f541f7aafe7b482ae72a022c664b2de2e466ed0ad", size = 2137115, upload-time = "2025-10-14T10:21:50.63Z" }, + { url = "https://files.pythonhosted.org/packages/ca/ba/e7c7a02651a8f7c52dc2cff2b64a30c313e3b57c7d93703cecea76c09b71/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:b568af94267729d76e6ee5ececda4e283d07bbb28e8148bb17adad93d025d25a", size = 2317400, upload-time = "2025-10-14T10:21:52.959Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ba/6c533a4ee8aec6b812c643c49bb3bd88d3f01e3cebe451bb85512d37f00f/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:6d55fb8b1e8929b341cc313a81a26e0d48aa3b519c1dbaadec3a6a2b4fcad025", size = 2312070, upload-time = "2025-10-14T10:21:55.419Z" }, + { url = "https://files.pythonhosted.org/packages/22/ae/f10524fcc0ab8d7f96cf9a74c880243576fd3e72bd8ce4f81e43d22bcab7/pydantic_core-2.41.4-cp314-cp314-win32.whl", hash = "sha256:5b66584e549e2e32a1398df11da2e0a7eff45d5c2d9db9d5667c5e6ac764d77e", size = 1982277, upload-time = "2025-10-14T10:21:57.474Z" }, + { url = "https://files.pythonhosted.org/packages/b4/dc/e5aa27aea1ad4638f0c3fb41132f7eb583bd7420ee63204e2d4333a3bbf9/pydantic_core-2.41.4-cp314-cp314-win_amd64.whl", hash = "sha256:557a0aab88664cc552285316809cab897716a372afaf8efdbef756f8b890e894", size = 2024608, upload-time = "2025-10-14T10:21:59.557Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/51d89cc2612bd147198e120a13f150afbf0bcb4615cddb049ab10b81b79e/pydantic_core-2.41.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f1ea6f48a045745d0d9f325989d8abd3f1eaf47dd00485912d1a3a63c623a8d", size = 1967614, upload-time = "2025-10-14T10:22:01.847Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c2/472f2e31b95eff099961fa050c376ab7156a81da194f9edb9f710f68787b/pydantic_core-2.41.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6c1fe4c5404c448b13188dd8bd2ebc2bdd7e6727fa61ff481bcc2cca894018da", size = 1876904, upload-time = "2025-10-14T10:22:04.062Z" }, + { url = "https://files.pythonhosted.org/packages/4a/07/ea8eeb91173807ecdae4f4a5f4b150a520085b35454350fc219ba79e66a3/pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:523e7da4d43b113bf8e7b49fa4ec0c35bf4fe66b2230bfc5c13cc498f12c6c3e", size = 1882538, upload-time = "2025-10-14T10:22:06.39Z" }, + { url = "https://files.pythonhosted.org/packages/1e/29/b53a9ca6cd366bfc928823679c6a76c7a4c69f8201c0ba7903ad18ebae2f/pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5729225de81fb65b70fdb1907fcf08c75d498f4a6f15af005aabb1fdadc19dfa", size = 2041183, upload-time = "2025-10-14T10:22:08.812Z" }, + { url = "https://files.pythonhosted.org/packages/c7/3d/f8c1a371ceebcaf94d6dd2d77c6cf4b1c078e13a5837aee83f760b4f7cfd/pydantic_core-2.41.4-cp314-cp314t-win_amd64.whl", hash = "sha256:de2cfbb09e88f0f795fd90cf955858fc2c691df65b1f21f0aa00b99f3fbc661d", size = 1993542, upload-time = "2025-10-14T10:22:11.332Z" }, + { url = "https://files.pythonhosted.org/packages/8a/ac/9fc61b4f9d079482a290afe8d206b8f490e9fd32d4fc03ed4fc698214e01/pydantic_core-2.41.4-cp314-cp314t-win_arm64.whl", hash = "sha256:d34f950ae05a83e0ede899c595f312ca976023ea1db100cd5aa188f7005e3ab0", size = 1973897, upload-time = "2025-10-14T10:22:13.444Z" }, +] + +[[package]] +name = "pydantic-settings" +version = "2.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/20/c5/dbbc27b814c71676593d1c3f718e6cd7d4f00652cefa24b75f7aa3efb25e/pydantic_settings-2.11.0.tar.gz", hash = "sha256:d0e87a1c7d33593beb7194adb8470fc426e95ba02af83a0f23474a04c9a08180", size = 188394, upload-time = "2025-09-24T14:19:11.764Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/d6/887a1ff844e64aa823fb4905978d882a633cfe295c32eacad582b78a7d8b/pydantic_settings-2.11.0-py3-none-any.whl", hash = "sha256:fe2cea3413b9530d10f3a5875adffb17ada5c1e1bab0b2885546d7310415207c", size = 48608, upload-time = "2025-09-24T14:19:10.015Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, +] + +[[package]] +name = "pytest-cov" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coverage" }, + { name = "pluggy" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5e/f7/c933acc76f5208b3b00089573cf6a2bc26dc80a8aece8f52bb7d6b1855ca/pytest_cov-7.0.0.tar.gz", hash = "sha256:33c97eda2e049a0c5298e91f519302a1334c26ac65c1a483d6206fd458361af1", size = 54328, upload-time = "2025-09-09T10:57:02.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/49/1377b49de7d0c1ce41292161ea0f721913fa8722c19fb9c1e3aa0367eecb/pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861", size = 22424, upload-time = "2025-09-09T10:57:00.695Z" }, +] + +[[package]] +name = "pytest-mock" +version = "3.15.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/68/14/eb014d26be205d38ad5ad20d9a80f7d201472e08167f0bb4361e251084a9/pytest_mock-3.15.1.tar.gz", hash = "sha256:1849a238f6f396da19762269de72cb1814ab44416fa73a8686deac10b0d87a0f", size = 34036, upload-time = "2025-09-16T16:37:27.081Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/cc/06253936f4a7fa2e0f48dfe6d851d9c56df896a9ab09ac019d70b760619c/pytest_mock-3.15.1-py3-none-any.whl", hash = "sha256:0a25e2eb88fe5168d535041d09a4529a188176ae608a6d249ee65abc0949630d", size = 10095, upload-time = "2025-09-16T16:37:25.734Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload-time = "2025-06-24T04:21:07.341Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" }, +] + +[[package]] +name = "python-multipart" +version = "0.0.20" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158, upload-time = "2024-12-16T19:45:46.972Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" }, +] + +[[package]] +name = "pytokens" +version = "0.1.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/30/5f/e959a442435e24f6fb5a01aec6c657079ceaca1b3baf18561c3728d681da/pytokens-0.1.10.tar.gz", hash = "sha256:c9a4bfa0be1d26aebce03e6884ba454e842f186a59ea43a6d3b25af58223c044", size = 12171, upload-time = "2025-02-19T14:51:22.001Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/e5/63bed382f6a7a5ba70e7e132b8b7b8abbcf4888ffa6be4877698dcfbed7d/pytokens-0.1.10-py3-none-any.whl", hash = "sha256:db7b72284e480e69fb085d9f251f66b3d2df8b7166059261258ff35f50fb711b", size = 12046, upload-time = "2025-02-19T14:51:18.694Z" }, +] + +[[package]] +name = "pywin32" +version = "311" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/be/3fd5de0979fcb3994bfee0d65ed8ca9506a8a1260651b86174f6a86f52b3/pywin32-311-cp313-cp313-win32.whl", hash = "sha256:f95ba5a847cba10dd8c4d8fefa9f2a6cf283b8b88ed6178fa8a6c1ab16054d0d", size = 8705700, upload-time = "2025-07-14T20:13:26.471Z" }, + { url = "https://files.pythonhosted.org/packages/e3/28/e0a1909523c6890208295a29e05c2adb2126364e289826c0a8bc7297bd5c/pywin32-311-cp313-cp313-win_amd64.whl", hash = "sha256:718a38f7e5b058e76aee1c56ddd06908116d35147e133427e59a3983f703a20d", size = 9494700, upload-time = "2025-07-14T20:13:28.243Z" }, + { url = "https://files.pythonhosted.org/packages/04/bf/90339ac0f55726dce7d794e6d79a18a91265bdf3aa70b6b9ca52f35e022a/pywin32-311-cp313-cp313-win_arm64.whl", hash = "sha256:7b4075d959648406202d92a2310cb990fea19b535c7f4a78d3f5e10b926eeb8a", size = 8709318, upload-time = "2025-07-14T20:13:30.348Z" }, + { url = "https://files.pythonhosted.org/packages/c9/31/097f2e132c4f16d99a22bfb777e0fd88bd8e1c634304e102f313af69ace5/pywin32-311-cp314-cp314-win32.whl", hash = "sha256:b7a2c10b93f8986666d0c803ee19b5990885872a7de910fc460f9b0c2fbf92ee", size = 8840714, upload-time = "2025-07-14T20:13:32.449Z" }, + { url = "https://files.pythonhosted.org/packages/90/4b/07c77d8ba0e01349358082713400435347df8426208171ce297da32c313d/pywin32-311-cp314-cp314-win_amd64.whl", hash = "sha256:3aca44c046bd2ed8c90de9cb8427f581c479e594e99b5c0bb19b29c10fd6cb87", size = 9656800, upload-time = "2025-07-14T20:13:34.312Z" }, + { url = "https://files.pythonhosted.org/packages/c0/d2/21af5c535501a7233e734b8af901574572da66fcc254cb35d0609c9080dd/pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42", size = 8932540, upload-time = "2025-07-14T20:13:36.379Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" }, + { url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" }, + { url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" }, + { url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" }, + { url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" }, + { url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" }, + { url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" }, + { url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" }, + { url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" }, + { url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" }, + { url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" }, + { url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" }, + { url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" }, + { url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" }, + { url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" }, + { url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" }, + { url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" }, + { url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" }, + { url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" }, + { url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" }, + { url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" }, + { url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" }, + { url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" }, + { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, +] + +[[package]] +name = "referencing" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "responses" +version = "0.25.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyyaml" }, + { name = "requests" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0e/95/89c054ad70bfef6da605338b009b2e283485835351a9935c7bfbfaca7ffc/responses-0.25.8.tar.gz", hash = "sha256:9374d047a575c8f781b94454db5cab590b6029505f488d12899ddb10a4af1cf4", size = 79320, upload-time = "2025-08-08T19:01:46.709Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/4c/cc276ce57e572c102d9542d383b2cfd551276581dc60004cb94fe8774c11/responses-0.25.8-py3-none-any.whl", hash = "sha256:0c710af92def29c8352ceadff0c3fe340ace27cf5af1bbe46fb71275bcd2831c", size = 34769, upload-time = "2025-08-08T19:01:45.018Z" }, +] + +[[package]] +name = "rich" +version = "14.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fb/d2/8920e102050a0de7bfabeb4c4614a49248cf8d5d7a8d01885fbb24dc767a/rich-14.2.0.tar.gz", hash = "sha256:73ff50c7c0c1c77c8243079283f4edb376f0f6442433aecb8ce7e6d0b92d1fe4", size = 219990, upload-time = "2025-10-09T14:16:53.064Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/7a/b0178788f8dc6cafce37a212c99565fa1fe7872c70c6c9c1e1a372d9d88f/rich-14.2.0-py3-none-any.whl", hash = "sha256:76bc51fe2e57d2b1be1f96c524b890b816e334ab4c1e45888799bfaab0021edd", size = 243393, upload-time = "2025-10-09T14:16:51.245Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.27.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e9/dd/2c0cbe774744272b0ae725f44032c77bdcab6e8bcf544bffa3b6e70c8dba/rpds_py-0.27.1.tar.gz", hash = "sha256:26a1c73171d10b7acccbded82bf6a586ab8203601e565badc74bbbf8bc5a10f8", size = 27479, upload-time = "2025-08-27T12:16:36.024Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/77/610aeee8d41e39080c7e14afa5387138e3c9fa9756ab893d09d99e7d8e98/rpds_py-0.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e4b9fcfbc021633863a37e92571d6f91851fa656f0180246e84cbd8b3f6b329b", size = 361741, upload-time = "2025-08-27T12:13:31.039Z" }, + { url = "https://files.pythonhosted.org/packages/3a/fc/c43765f201c6a1c60be2043cbdb664013def52460a4c7adace89d6682bf4/rpds_py-0.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1441811a96eadca93c517d08df75de45e5ffe68aa3089924f963c782c4b898cf", size = 345574, upload-time = "2025-08-27T12:13:32.902Z" }, + { url = "https://files.pythonhosted.org/packages/20/42/ee2b2ca114294cd9847d0ef9c26d2b0851b2e7e00bf14cc4c0b581df0fc3/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55266dafa22e672f5a4f65019015f90336ed31c6383bd53f5e7826d21a0e0b83", size = 385051, upload-time = "2025-08-27T12:13:34.228Z" }, + { url = "https://files.pythonhosted.org/packages/fd/e8/1e430fe311e4799e02e2d1af7c765f024e95e17d651612425b226705f910/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d78827d7ac08627ea2c8e02c9e5b41180ea5ea1f747e9db0915e3adf36b62dcf", size = 398395, upload-time = "2025-08-27T12:13:36.132Z" }, + { url = "https://files.pythonhosted.org/packages/82/95/9dc227d441ff2670651c27a739acb2535ccaf8b351a88d78c088965e5996/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae92443798a40a92dc5f0b01d8a7c93adde0c4dc965310a29ae7c64d72b9fad2", size = 524334, upload-time = "2025-08-27T12:13:37.562Z" }, + { url = "https://files.pythonhosted.org/packages/87/01/a670c232f401d9ad461d9a332aa4080cd3cb1d1df18213dbd0d2a6a7ab51/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c46c9dd2403b66a2a3b9720ec4b74d4ab49d4fabf9f03dfdce2d42af913fe8d0", size = 407691, upload-time = "2025-08-27T12:13:38.94Z" }, + { url = "https://files.pythonhosted.org/packages/03/36/0a14aebbaa26fe7fab4780c76f2239e76cc95a0090bdb25e31d95c492fcd/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2efe4eb1d01b7f5f1939f4ef30ecea6c6b3521eec451fb93191bf84b2a522418", size = 386868, upload-time = "2025-08-27T12:13:40.192Z" }, + { url = "https://files.pythonhosted.org/packages/3b/03/8c897fb8b5347ff6c1cc31239b9611c5bf79d78c984430887a353e1409a1/rpds_py-0.27.1-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:15d3b4d83582d10c601f481eca29c3f138d44c92187d197aff663a269197c02d", size = 405469, upload-time = "2025-08-27T12:13:41.496Z" }, + { url = "https://files.pythonhosted.org/packages/da/07/88c60edc2df74850d496d78a1fdcdc7b54360a7f610a4d50008309d41b94/rpds_py-0.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4ed2e16abbc982a169d30d1a420274a709949e2cbdef119fe2ec9d870b42f274", size = 422125, upload-time = "2025-08-27T12:13:42.802Z" }, + { url = "https://files.pythonhosted.org/packages/6b/86/5f4c707603e41b05f191a749984f390dabcbc467cf833769b47bf14ba04f/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a75f305c9b013289121ec0f1181931975df78738cdf650093e6b86d74aa7d8dd", size = 562341, upload-time = "2025-08-27T12:13:44.472Z" }, + { url = "https://files.pythonhosted.org/packages/b2/92/3c0cb2492094e3cd9baf9e49bbb7befeceb584ea0c1a8b5939dca4da12e5/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:67ce7620704745881a3d4b0ada80ab4d99df390838839921f99e63c474f82cf2", size = 592511, upload-time = "2025-08-27T12:13:45.898Z" }, + { url = "https://files.pythonhosted.org/packages/10/bb/82e64fbb0047c46a168faa28d0d45a7851cd0582f850b966811d30f67ad8/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9d992ac10eb86d9b6f369647b6a3f412fc0075cfd5d799530e84d335e440a002", size = 557736, upload-time = "2025-08-27T12:13:47.408Z" }, + { url = "https://files.pythonhosted.org/packages/00/95/3c863973d409210da7fb41958172c6b7dbe7fc34e04d3cc1f10bb85e979f/rpds_py-0.27.1-cp313-cp313-win32.whl", hash = "sha256:4f75e4bd8ab8db624e02c8e2fc4063021b58becdbe6df793a8111d9343aec1e3", size = 221462, upload-time = "2025-08-27T12:13:48.742Z" }, + { url = "https://files.pythonhosted.org/packages/ce/2c/5867b14a81dc217b56d95a9f2a40fdbc56a1ab0181b80132beeecbd4b2d6/rpds_py-0.27.1-cp313-cp313-win_amd64.whl", hash = "sha256:f9025faafc62ed0b75a53e541895ca272815bec18abe2249ff6501c8f2e12b83", size = 232034, upload-time = "2025-08-27T12:13:50.11Z" }, + { url = "https://files.pythonhosted.org/packages/c7/78/3958f3f018c01923823f1e47f1cc338e398814b92d83cd278364446fac66/rpds_py-0.27.1-cp313-cp313-win_arm64.whl", hash = "sha256:ed10dc32829e7d222b7d3b93136d25a406ba9788f6a7ebf6809092da1f4d279d", size = 222392, upload-time = "2025-08-27T12:13:52.587Z" }, + { url = "https://files.pythonhosted.org/packages/01/76/1cdf1f91aed5c3a7bf2eba1f1c4e4d6f57832d73003919a20118870ea659/rpds_py-0.27.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:92022bbbad0d4426e616815b16bc4127f83c9a74940e1ccf3cfe0b387aba0228", size = 358355, upload-time = "2025-08-27T12:13:54.012Z" }, + { url = "https://files.pythonhosted.org/packages/c3/6f/bf142541229374287604caf3bb2a4ae17f0a580798fd72d3b009b532db4e/rpds_py-0.27.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:47162fdab9407ec3f160805ac3e154df042e577dd53341745fc7fb3f625e6d92", size = 342138, upload-time = "2025-08-27T12:13:55.791Z" }, + { url = "https://files.pythonhosted.org/packages/1a/77/355b1c041d6be40886c44ff5e798b4e2769e497b790f0f7fd1e78d17e9a8/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb89bec23fddc489e5d78b550a7b773557c9ab58b7946154a10a6f7a214a48b2", size = 380247, upload-time = "2025-08-27T12:13:57.683Z" }, + { url = "https://files.pythonhosted.org/packages/d6/a4/d9cef5c3946ea271ce2243c51481971cd6e34f21925af2783dd17b26e815/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e48af21883ded2b3e9eb48cb7880ad8598b31ab752ff3be6457001d78f416723", size = 390699, upload-time = "2025-08-27T12:13:59.137Z" }, + { url = "https://files.pythonhosted.org/packages/3a/06/005106a7b8c6c1a7e91b73169e49870f4af5256119d34a361ae5240a0c1d/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f5b7bd8e219ed50299e58551a410b64daafb5017d54bbe822e003856f06a802", size = 521852, upload-time = "2025-08-27T12:14:00.583Z" }, + { url = "https://files.pythonhosted.org/packages/e5/3e/50fb1dac0948e17a02eb05c24510a8fe12d5ce8561c6b7b7d1339ab7ab9c/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08f1e20bccf73b08d12d804d6e1c22ca5530e71659e6673bce31a6bb71c1e73f", size = 402582, upload-time = "2025-08-27T12:14:02.034Z" }, + { url = "https://files.pythonhosted.org/packages/cb/b0/f4e224090dc5b0ec15f31a02d746ab24101dd430847c4d99123798661bfc/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dc5dceeaefcc96dc192e3a80bbe1d6c410c469e97bdd47494a7d930987f18b2", size = 384126, upload-time = "2025-08-27T12:14:03.437Z" }, + { url = "https://files.pythonhosted.org/packages/54/77/ac339d5f82b6afff1df8f0fe0d2145cc827992cb5f8eeb90fc9f31ef7a63/rpds_py-0.27.1-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:d76f9cc8665acdc0c9177043746775aa7babbf479b5520b78ae4002d889f5c21", size = 399486, upload-time = "2025-08-27T12:14:05.443Z" }, + { url = "https://files.pythonhosted.org/packages/d6/29/3e1c255eee6ac358c056a57d6d6869baa00a62fa32eea5ee0632039c50a3/rpds_py-0.27.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:134fae0e36022edad8290a6661edf40c023562964efea0cc0ec7f5d392d2aaef", size = 414832, upload-time = "2025-08-27T12:14:06.902Z" }, + { url = "https://files.pythonhosted.org/packages/3f/db/6d498b844342deb3fa1d030598db93937a9964fcf5cb4da4feb5f17be34b/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:eb11a4f1b2b63337cfd3b4d110af778a59aae51c81d195768e353d8b52f88081", size = 557249, upload-time = "2025-08-27T12:14:08.37Z" }, + { url = "https://files.pythonhosted.org/packages/60/f3/690dd38e2310b6f68858a331399b4d6dbb9132c3e8ef8b4333b96caf403d/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:13e608ac9f50a0ed4faec0e90ece76ae33b34c0e8656e3dceb9a7db994c692cd", size = 587356, upload-time = "2025-08-27T12:14:10.034Z" }, + { url = "https://files.pythonhosted.org/packages/86/e3/84507781cccd0145f35b1dc32c72675200c5ce8d5b30f813e49424ef68fc/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dd2135527aa40f061350c3f8f89da2644de26cd73e4de458e79606384f4f68e7", size = 555300, upload-time = "2025-08-27T12:14:11.783Z" }, + { url = "https://files.pythonhosted.org/packages/e5/ee/375469849e6b429b3516206b4580a79e9ef3eb12920ddbd4492b56eaacbe/rpds_py-0.27.1-cp313-cp313t-win32.whl", hash = "sha256:3020724ade63fe320a972e2ffd93b5623227e684315adce194941167fee02688", size = 216714, upload-time = "2025-08-27T12:14:13.629Z" }, + { url = "https://files.pythonhosted.org/packages/21/87/3fc94e47c9bd0742660e84706c311a860dcae4374cf4a03c477e23ce605a/rpds_py-0.27.1-cp313-cp313t-win_amd64.whl", hash = "sha256:8ee50c3e41739886606388ba3ab3ee2aae9f35fb23f833091833255a31740797", size = 228943, upload-time = "2025-08-27T12:14:14.937Z" }, + { url = "https://files.pythonhosted.org/packages/70/36/b6e6066520a07cf029d385de869729a895917b411e777ab1cde878100a1d/rpds_py-0.27.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:acb9aafccaae278f449d9c713b64a9e68662e7799dbd5859e2c6b3c67b56d334", size = 362472, upload-time = "2025-08-27T12:14:16.333Z" }, + { url = "https://files.pythonhosted.org/packages/af/07/b4646032e0dcec0df9c73a3bd52f63bc6c5f9cda992f06bd0e73fe3fbebd/rpds_py-0.27.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b7fb801aa7f845ddf601c49630deeeccde7ce10065561d92729bfe81bd21fb33", size = 345676, upload-time = "2025-08-27T12:14:17.764Z" }, + { url = "https://files.pythonhosted.org/packages/b0/16/2f1003ee5d0af4bcb13c0cf894957984c32a6751ed7206db2aee7379a55e/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe0dd05afb46597b9a2e11c351e5e4283c741237e7f617ffb3252780cca9336a", size = 385313, upload-time = "2025-08-27T12:14:19.829Z" }, + { url = "https://files.pythonhosted.org/packages/05/cd/7eb6dd7b232e7f2654d03fa07f1414d7dfc980e82ba71e40a7c46fd95484/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b6dfb0e058adb12d8b1d1b25f686e94ffa65d9995a5157afe99743bf7369d62b", size = 399080, upload-time = "2025-08-27T12:14:21.531Z" }, + { url = "https://files.pythonhosted.org/packages/20/51/5829afd5000ec1cb60f304711f02572d619040aa3ec033d8226817d1e571/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed090ccd235f6fa8bb5861684567f0a83e04f52dfc2e5c05f2e4b1309fcf85e7", size = 523868, upload-time = "2025-08-27T12:14:23.485Z" }, + { url = "https://files.pythonhosted.org/packages/05/2c/30eebca20d5db95720ab4d2faec1b5e4c1025c473f703738c371241476a2/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf876e79763eecf3e7356f157540d6a093cef395b65514f17a356f62af6cc136", size = 408750, upload-time = "2025-08-27T12:14:24.924Z" }, + { url = "https://files.pythonhosted.org/packages/90/1a/cdb5083f043597c4d4276eae4e4c70c55ab5accec078da8611f24575a367/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12ed005216a51b1d6e2b02a7bd31885fe317e45897de81d86dcce7d74618ffff", size = 387688, upload-time = "2025-08-27T12:14:27.537Z" }, + { url = "https://files.pythonhosted.org/packages/7c/92/cf786a15320e173f945d205ab31585cc43969743bb1a48b6888f7a2b0a2d/rpds_py-0.27.1-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:ee4308f409a40e50593c7e3bb8cbe0b4d4c66d1674a316324f0c2f5383b486f9", size = 407225, upload-time = "2025-08-27T12:14:28.981Z" }, + { url = "https://files.pythonhosted.org/packages/33/5c/85ee16df5b65063ef26017bef33096557a4c83fbe56218ac7cd8c235f16d/rpds_py-0.27.1-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0b08d152555acf1f455154d498ca855618c1378ec810646fcd7c76416ac6dc60", size = 423361, upload-time = "2025-08-27T12:14:30.469Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8e/1c2741307fcabd1a334ecf008e92c4f47bb6f848712cf15c923becfe82bb/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:dce51c828941973a5684d458214d3a36fcd28da3e1875d659388f4f9f12cc33e", size = 562493, upload-time = "2025-08-27T12:14:31.987Z" }, + { url = "https://files.pythonhosted.org/packages/04/03/5159321baae9b2222442a70c1f988cbbd66b9be0675dd3936461269be360/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:c1476d6f29eb81aa4151c9a31219b03f1f798dc43d8af1250a870735516a1212", size = 592623, upload-time = "2025-08-27T12:14:33.543Z" }, + { url = "https://files.pythonhosted.org/packages/ff/39/c09fd1ad28b85bc1d4554a8710233c9f4cefd03d7717a1b8fbfd171d1167/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:3ce0cac322b0d69b63c9cdb895ee1b65805ec9ffad37639f291dd79467bee675", size = 558800, upload-time = "2025-08-27T12:14:35.436Z" }, + { url = "https://files.pythonhosted.org/packages/c5/d6/99228e6bbcf4baa764b18258f519a9035131d91b538d4e0e294313462a98/rpds_py-0.27.1-cp314-cp314-win32.whl", hash = "sha256:dfbfac137d2a3d0725758cd141f878bf4329ba25e34979797c89474a89a8a3a3", size = 221943, upload-time = "2025-08-27T12:14:36.898Z" }, + { url = "https://files.pythonhosted.org/packages/be/07/c802bc6b8e95be83b79bdf23d1aa61d68324cb1006e245d6c58e959e314d/rpds_py-0.27.1-cp314-cp314-win_amd64.whl", hash = "sha256:a6e57b0abfe7cc513450fcf529eb486b6e4d3f8aee83e92eb5f1ef848218d456", size = 233739, upload-time = "2025-08-27T12:14:38.386Z" }, + { url = "https://files.pythonhosted.org/packages/c8/89/3e1b1c16d4c2d547c5717377a8df99aee8099ff050f87c45cb4d5fa70891/rpds_py-0.27.1-cp314-cp314-win_arm64.whl", hash = "sha256:faf8d146f3d476abfee026c4ae3bdd9ca14236ae4e4c310cbd1cf75ba33d24a3", size = 223120, upload-time = "2025-08-27T12:14:39.82Z" }, + { url = "https://files.pythonhosted.org/packages/62/7e/dc7931dc2fa4a6e46b2a4fa744a9fe5c548efd70e0ba74f40b39fa4a8c10/rpds_py-0.27.1-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:ba81d2b56b6d4911ce735aad0a1d4495e808b8ee4dc58715998741a26874e7c2", size = 358944, upload-time = "2025-08-27T12:14:41.199Z" }, + { url = "https://files.pythonhosted.org/packages/e6/22/4af76ac4e9f336bfb1a5f240d18a33c6b2fcaadb7472ac7680576512b49a/rpds_py-0.27.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:84f7d509870098de0e864cad0102711c1e24e9b1a50ee713b65928adb22269e4", size = 342283, upload-time = "2025-08-27T12:14:42.699Z" }, + { url = "https://files.pythonhosted.org/packages/1c/15/2a7c619b3c2272ea9feb9ade67a45c40b3eeb500d503ad4c28c395dc51b4/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e960fc78fecd1100539f14132425e1d5fe44ecb9239f8f27f079962021523e", size = 380320, upload-time = "2025-08-27T12:14:44.157Z" }, + { url = "https://files.pythonhosted.org/packages/a2/7d/4c6d243ba4a3057e994bb5bedd01b5c963c12fe38dde707a52acdb3849e7/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:62f85b665cedab1a503747617393573995dac4600ff51869d69ad2f39eb5e817", size = 391760, upload-time = "2025-08-27T12:14:45.845Z" }, + { url = "https://files.pythonhosted.org/packages/b4/71/b19401a909b83bcd67f90221330bc1ef11bc486fe4e04c24388d28a618ae/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fed467af29776f6556250c9ed85ea5a4dd121ab56a5f8b206e3e7a4c551e48ec", size = 522476, upload-time = "2025-08-27T12:14:47.364Z" }, + { url = "https://files.pythonhosted.org/packages/e4/44/1a3b9715c0455d2e2f0f6df5ee6d6f5afdc423d0773a8a682ed2b43c566c/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2729615f9d430af0ae6b36cf042cb55c0936408d543fb691e1a9e36648fd35a", size = 403418, upload-time = "2025-08-27T12:14:49.991Z" }, + { url = "https://files.pythonhosted.org/packages/1c/4b/fb6c4f14984eb56673bc868a66536f53417ddb13ed44b391998100a06a96/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b207d881a9aef7ba753d69c123a35d96ca7cb808056998f6b9e8747321f03b8", size = 384771, upload-time = "2025-08-27T12:14:52.159Z" }, + { url = "https://files.pythonhosted.org/packages/c0/56/d5265d2d28b7420d7b4d4d85cad8ef891760f5135102e60d5c970b976e41/rpds_py-0.27.1-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:639fd5efec029f99b79ae47e5d7e00ad8a773da899b6309f6786ecaf22948c48", size = 400022, upload-time = "2025-08-27T12:14:53.859Z" }, + { url = "https://files.pythonhosted.org/packages/8f/e9/9f5fc70164a569bdd6ed9046486c3568d6926e3a49bdefeeccfb18655875/rpds_py-0.27.1-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fecc80cb2a90e28af8a9b366edacf33d7a91cbfe4c2c4544ea1246e949cfebeb", size = 416787, upload-time = "2025-08-27T12:14:55.673Z" }, + { url = "https://files.pythonhosted.org/packages/d4/64/56dd03430ba491db943a81dcdef115a985aac5f44f565cd39a00c766d45c/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:42a89282d711711d0a62d6f57d81aa43a1368686c45bc1c46b7f079d55692734", size = 557538, upload-time = "2025-08-27T12:14:57.245Z" }, + { url = "https://files.pythonhosted.org/packages/3f/36/92cc885a3129993b1d963a2a42ecf64e6a8e129d2c7cc980dbeba84e55fb/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:cf9931f14223de59551ab9d38ed18d92f14f055a5f78c1d8ad6493f735021bbb", size = 588512, upload-time = "2025-08-27T12:14:58.728Z" }, + { url = "https://files.pythonhosted.org/packages/dd/10/6b283707780a81919f71625351182b4f98932ac89a09023cb61865136244/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f39f58a27cc6e59f432b568ed8429c7e1641324fbe38131de852cd77b2d534b0", size = 555813, upload-time = "2025-08-27T12:15:00.334Z" }, + { url = "https://files.pythonhosted.org/packages/04/2e/30b5ea18c01379da6272a92825dd7e53dc9d15c88a19e97932d35d430ef7/rpds_py-0.27.1-cp314-cp314t-win32.whl", hash = "sha256:d5fa0ee122dc09e23607a28e6d7b150da16c662e66409bbe85230e4c85bb528a", size = 217385, upload-time = "2025-08-27T12:15:01.937Z" }, + { url = "https://files.pythonhosted.org/packages/32/7d/97119da51cb1dd3f2f3c0805f155a3aa4a95fa44fe7d78ae15e69edf4f34/rpds_py-0.27.1-cp314-cp314t-win_amd64.whl", hash = "sha256:6567d2bb951e21232c2f660c24cf3470bb96de56cdcb3f071a83feeaff8a2772", size = 230097, upload-time = "2025-08-27T12:15:03.961Z" }, +] + +[[package]] +name = "s3transfer" +version = "0.14.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/62/74/8d69dcb7a9efe8baa2046891735e5dfe433ad558ae23d9e3c14c633d1d58/s3transfer-0.14.0.tar.gz", hash = "sha256:eff12264e7c8b4985074ccce27a3b38a485bb7f7422cc8046fee9be4983e4125", size = 151547, upload-time = "2025-09-09T19:23:31.089Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/f0/ae7ca09223a81a1d890b2557186ea015f6e0502e9b8cb8e1813f1d8cfa4e/s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456", size = 85712, upload-time = "2025-09-09T19:23:30.041Z" }, +] + +[[package]] +name = "setuptools" +version = "80.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "slack-bolt" +version = "1.26.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "slack-sdk" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/14/0f490731fbfc95b5711e8124b30bb6e2a4be5edad22256891adad66f8b79/slack_bolt-1.26.0.tar.gz", hash = "sha256:b0b806b9dcf009ee50172830c1d170e231cd873c5b819703bbcdc59a0fe5ff3e", size = 129915, upload-time = "2025-10-06T23:41:51.708Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/77/57aff95f88f2f1a959088ff29c45ceaf8dcad540e9966b647d6942a007f0/slack_bolt-1.26.0-py2.py3-none-any.whl", hash = "sha256:d8386ecb27aaa487c1a5e4b43a4125f532100fc3a26e49dd2a66f5837ff2e3be", size = 230084, upload-time = "2025-10-06T23:41:50.118Z" }, +] + +[[package]] +name = "slack-sdk" +version = "3.37.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/c2/0a174a155623d7dc3ed4d1360cdf755590acdc2c3fc9ce0d2340f468909f/slack_sdk-3.37.0.tar.gz", hash = "sha256:242d6cffbd9e843af807487ff04853189b812081aeaa22f90a8f159f20220ed9", size = 241612, upload-time = "2025-10-06T23:07:20.856Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/fd/a502ee24d8c7d12a8f749878ae0949b8eeb50aeac22dc5a613d417a256d0/slack_sdk-3.37.0-py2.py3-none-any.whl", hash = "sha256:e108a0836eafda74d8a95e76c12c2bcb010e645d504d8497451e4c7ebb229c87", size = 302751, upload-time = "2025-10-06T23:07:19.542Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "soupsieve" +version = "2.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/e6/21ccce3262dd4889aa3332e5a119a3491a95e8f60939870a3a035aabac0d/soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f", size = 103472, upload-time = "2025-08-27T15:39:51.78Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/a0/bb38d3b76b8cae341dad93a2dd83ab7462e6dbcdd84d43f54ee60a8dc167/soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c", size = 36679, upload-time = "2025-08-27T15:39:50.179Z" }, +] + +[[package]] +name = "sse-starlette" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/6f/22ed6e33f8a9e76ca0a412405f31abb844b779d52c5f96660766edcd737c/sse_starlette-3.0.2.tar.gz", hash = "sha256:ccd60b5765ebb3584d0de2d7a6e4f745672581de4f5005ab31c3a25d10b52b3a", size = 20985, upload-time = "2025-07-27T09:07:44.565Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/10/c78f463b4ef22eef8491f218f692be838282cd65480f6e423d7730dfd1fb/sse_starlette-3.0.2-py3-none-any.whl", hash = "sha256:16b7cbfddbcd4eaca11f7b586f3b8a080f1afe952c15813455b162edea619e5a", size = 11297, upload-time = "2025-07-27T09:07:43.268Z" }, +] + +[[package]] +name = "starlette" +version = "0.48.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/a5/d6f429d43394057b67a6b5bbe6eae2f77a6bf7459d961fdb224bf206eee6/starlette-0.48.0.tar.gz", hash = "sha256:7e8cee469a8ab2352911528110ce9088fdc6a37d9876926e73da7ce4aa4c7a46", size = 2652949, upload-time = "2025-09-13T08:41:05.699Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/72/2db2f49247d0a18b4f1bb9a5a39a0162869acf235f3a96418363947b3d46/starlette-0.48.0-py3-none-any.whl", hash = "sha256:0764ca97b097582558ecb498132ed0c7d942f233f365b86ba37770e026510659", size = 73736, upload-time = "2025-09-13T08:41:03.869Z" }, +] + +[[package]] +name = "strands-agents" +version = "1.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "boto3" }, + { name = "botocore" }, + { name = "docstring-parser" }, + { name = "mcp" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation-threading" }, + { name = "opentelemetry-sdk" }, + { name = "pydantic" }, + { name = "typing-extensions" }, + { name = "watchdog" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/56/3d3cb9bf62d45f97befe82fbb73ad65b46e9a6efd21151c38c466cd87c11/strands_agents-1.12.0.tar.gz", hash = "sha256:8f17e775971505ab7841a3139cde9879632a26cdd9cd55be74de83f0e7f804c0", size = 418141, upload-time = "2025-10-10T15:16:45.753Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/05/2f0fbce4a3acd7b9c042368bbe8038409a7b30d65138bd3b37a06d1a4cc4/strands_agents-1.12.0-py3-none-any.whl", hash = "sha256:af0f9c8a175666009863d0fb4438e71000ea3a2f0cbda3dc308c35dd4f9a1eb0", size = 216043, upload-time = "2025-10-10T15:16:44.043Z" }, +] + +[package.optional-dependencies] +otel = [ + { name = "opentelemetry-exporter-otlp-proto-http" }, +] + +[[package]] +name = "strands-agents-tools" +version = "0.2.11" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "aws-requests-auth" }, + { name = "botocore" }, + { name = "dill" }, + { name = "markdownify" }, + { name = "pillow" }, + { name = "prompt-toolkit" }, + { name = "pyjwt" }, + { name = "requests" }, + { name = "rich" }, + { name = "slack-bolt" }, + { name = "strands-agents" }, + { name = "sympy" }, + { name = "tenacity" }, + { name = "tzdata", marker = "sys_platform == 'win32'" }, + { name = "watchdog" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/37/a2/5cd71b9db26c98d6289883fe969e884964fbd2a4b78cb75073d6651f0041/strands_agents_tools-0.2.11.tar.gz", hash = "sha256:5ef192b68eddeccb96c47227ca841ccce3aedff5db0953a0af7b7212a09428df", size = 445792, upload-time = "2025-10-10T16:58:26.396Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/1c/a903b4947e3e0e56c2a1db7008c286c399b2e37c6362c569e8a07006e046/strands_agents_tools-0.2.11-py3-none-any.whl", hash = "sha256:ebff41ba782e1ce59530e11321780eae0ffdb5b61e7aee7408c46c1a8f29f18d", size = 297958, upload-time = "2025-10-10T16:58:24.213Z" }, +] + +[[package]] +name = "sympy" +version = "1.14.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mpmath" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921, upload-time = "2025-04-27T18:05:01.611Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" }, +] + +[[package]] +name = "tenacity" +version = "9.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/d4/2b0cd0fe285e14b36db076e78c93766ff1d529d70408bd1d2a5a84f1d929/tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb", size = 48036, upload-time = "2025-04-02T08:25:09.966Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + +[[package]] +name = "tzdata" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, +] + +[[package]] +name = "urllib3" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/57/1616c8274c3442d802621abf5deb230771c7a0fec9414cb6763900eb3868/uvicorn-0.37.0.tar.gz", hash = "sha256:4115c8add6d3fd536c8ee77f0e14a7fd2ebba939fed9b02583a97f80648f9e13", size = 80367, upload-time = "2025-09-23T13:33:47.486Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/cd/584a2ceb5532af99dd09e50919e3615ba99aa127e9850eafe5f31ddfdb9a/uvicorn-0.37.0-py3-none-any.whl", hash = "sha256:913b2b88672343739927ce381ff9e2ad62541f9f8289664fa1d1d3803fa2ce6c", size = 67976, upload-time = "2025-09-23T13:33:45.842Z" }, +] + +[[package]] +name = "watchdog" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/98/b0345cabdce2041a01293ba483333582891a3bd5769b08eceb0d406056ef/watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c", size = 96480, upload-time = "2024-11-01T14:06:42.952Z" }, + { url = "https://files.pythonhosted.org/packages/85/83/cdf13902c626b28eedef7ec4f10745c52aad8a8fe7eb04ed7b1f111ca20e/watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134", size = 88451, upload-time = "2024-11-01T14:06:45.084Z" }, + { url = "https://files.pythonhosted.org/packages/fe/c4/225c87bae08c8b9ec99030cd48ae9c4eca050a59bf5c2255853e18c87b50/watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b", size = 89057, upload-time = "2024-11-01T14:06:47.324Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" }, + { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" }, + { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" }, + { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" }, + { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" }, + { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" }, + { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" }, + { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" }, + { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" }, +] + +[[package]] +name = "wcwidth" +version = "0.2.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/30/6b0809f4510673dc723187aeaf24c7f5459922d01e2f794277a3dfb90345/wcwidth-0.2.14.tar.gz", hash = "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605", size = 102293, upload-time = "2025-09-22T16:29:53.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" }, +] + +[[package]] +name = "werkzeug" +version = "3.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/69/83029f1f6300c5fb2471d621ab06f6ec6b3324685a2ce0f9777fd4a8b71e/werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746", size = 806925, upload-time = "2024-11-08T15:52:18.093Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/24/ab44c871b0f07f491e5d2ad12c9bd7358e527510618cb1b803a88e986db1/werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e", size = 224498, upload-time = "2024-11-08T15:52:16.132Z" }, +] + +[[package]] +name = "wheel" +version = "0.45.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/98/2d9906746cdc6a6ef809ae6338005b3f21bb568bea3165cfc6a243fdc25c/wheel-0.45.1.tar.gz", hash = "sha256:661e1abd9198507b1409a20c02106d9670b2576e916d58f520316666abca6729", size = 107545, upload-time = "2024-11-23T00:18:23.513Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0b/2c/87f3254fd8ffd29e4c02732eee68a83a1d3c346ae39bc6822dcbcb697f2b/wheel-0.45.1-py3-none-any.whl", hash = "sha256:708e7481cc80179af0e556bbf0cc00b8444c7321e2700b8d8580231d13017248", size = 72494, upload-time = "2024-11-23T00:18:21.207Z" }, +] + +[[package]] +name = "wrapt" +version = "1.17.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/8f/aeb76c5b46e273670962298c23e7ddde79916cb74db802131d49a85e4b7d/wrapt-1.17.3.tar.gz", hash = "sha256:f66eb08feaa410fe4eebd17f2a2c8e2e46d3476e9f8c783daa8e09e0faa666d0", size = 55547, upload-time = "2025-08-12T05:53:21.714Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/f6/759ece88472157acb55fc195e5b116e06730f1b651b5b314c66291729193/wrapt-1.17.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a47681378a0439215912ef542c45a783484d4dd82bac412b71e59cf9c0e1cea0", size = 54003, upload-time = "2025-08-12T05:51:48.627Z" }, + { url = "https://files.pythonhosted.org/packages/4f/a9/49940b9dc6d47027dc850c116d79b4155f15c08547d04db0f07121499347/wrapt-1.17.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a30837587c6ee3cd1a4d1c2ec5d24e77984d44e2f34547e2323ddb4e22eb77", size = 39025, upload-time = "2025-08-12T05:51:37.156Z" }, + { url = "https://files.pythonhosted.org/packages/45/35/6a08de0f2c96dcdd7fe464d7420ddb9a7655a6561150e5fc4da9356aeaab/wrapt-1.17.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:16ecf15d6af39246fe33e507105d67e4b81d8f8d2c6598ff7e3ca1b8a37213f7", size = 39108, upload-time = "2025-08-12T05:51:58.425Z" }, + { url = "https://files.pythonhosted.org/packages/0c/37/6faf15cfa41bf1f3dba80cd3f5ccc6622dfccb660ab26ed79f0178c7497f/wrapt-1.17.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6fd1ad24dc235e4ab88cda009e19bf347aabb975e44fd5c2fb22a3f6e4141277", size = 88072, upload-time = "2025-08-12T05:52:37.53Z" }, + { url = "https://files.pythonhosted.org/packages/78/f2/efe19ada4a38e4e15b6dff39c3e3f3f73f5decf901f66e6f72fe79623a06/wrapt-1.17.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ed61b7c2d49cee3c027372df5809a59d60cf1b6c2f81ee980a091f3afed6a2d", size = 88214, upload-time = "2025-08-12T05:52:15.886Z" }, + { url = "https://files.pythonhosted.org/packages/40/90/ca86701e9de1622b16e09689fc24b76f69b06bb0150990f6f4e8b0eeb576/wrapt-1.17.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:423ed5420ad5f5529db9ce89eac09c8a2f97da18eb1c870237e84c5a5c2d60aa", size = 87105, upload-time = "2025-08-12T05:52:17.914Z" }, + { url = "https://files.pythonhosted.org/packages/fd/e0/d10bd257c9a3e15cbf5523025252cc14d77468e8ed644aafb2d6f54cb95d/wrapt-1.17.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e01375f275f010fcbf7f643b4279896d04e571889b8a5b3f848423d91bf07050", size = 87766, upload-time = "2025-08-12T05:52:39.243Z" }, + { url = "https://files.pythonhosted.org/packages/e8/cf/7d848740203c7b4b27eb55dbfede11aca974a51c3d894f6cc4b865f42f58/wrapt-1.17.3-cp313-cp313-win32.whl", hash = "sha256:53e5e39ff71b3fc484df8a522c933ea2b7cdd0d5d15ae82e5b23fde87d44cbd8", size = 36711, upload-time = "2025-08-12T05:53:10.074Z" }, + { url = "https://files.pythonhosted.org/packages/57/54/35a84d0a4d23ea675994104e667ceff49227ce473ba6a59ba2c84f250b74/wrapt-1.17.3-cp313-cp313-win_amd64.whl", hash = "sha256:1f0b2f40cf341ee8cc1a97d51ff50dddb9fcc73241b9143ec74b30fc4f44f6cb", size = 38885, upload-time = "2025-08-12T05:53:08.695Z" }, + { url = "https://files.pythonhosted.org/packages/01/77/66e54407c59d7b02a3c4e0af3783168fff8e5d61def52cda8728439d86bc/wrapt-1.17.3-cp313-cp313-win_arm64.whl", hash = "sha256:7425ac3c54430f5fc5e7b6f41d41e704db073309acfc09305816bc6a0b26bb16", size = 36896, upload-time = "2025-08-12T05:52:55.34Z" }, + { url = "https://files.pythonhosted.org/packages/02/a2/cd864b2a14f20d14f4c496fab97802001560f9f41554eef6df201cd7f76c/wrapt-1.17.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cf30f6e3c077c8e6a9a7809c94551203c8843e74ba0c960f4a98cd80d4665d39", size = 54132, upload-time = "2025-08-12T05:51:49.864Z" }, + { url = "https://files.pythonhosted.org/packages/d5/46/d011725b0c89e853dc44cceb738a307cde5d240d023d6d40a82d1b4e1182/wrapt-1.17.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e228514a06843cae89621384cfe3a80418f3c04aadf8a3b14e46a7be704e4235", size = 39091, upload-time = "2025-08-12T05:51:38.935Z" }, + { url = "https://files.pythonhosted.org/packages/2e/9e/3ad852d77c35aae7ddebdbc3b6d35ec8013af7d7dddad0ad911f3d891dae/wrapt-1.17.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5ea5eb3c0c071862997d6f3e02af1d055f381b1d25b286b9d6644b79db77657c", size = 39172, upload-time = "2025-08-12T05:51:59.365Z" }, + { url = "https://files.pythonhosted.org/packages/c3/f7/c983d2762bcce2326c317c26a6a1e7016f7eb039c27cdf5c4e30f4160f31/wrapt-1.17.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:281262213373b6d5e4bb4353bc36d1ba4084e6d6b5d242863721ef2bf2c2930b", size = 87163, upload-time = "2025-08-12T05:52:40.965Z" }, + { url = "https://files.pythonhosted.org/packages/e4/0f/f673f75d489c7f22d17fe0193e84b41540d962f75fce579cf6873167c29b/wrapt-1.17.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc4a8d2b25efb6681ecacad42fca8859f88092d8732b170de6a5dddd80a1c8fa", size = 87963, upload-time = "2025-08-12T05:52:20.326Z" }, + { url = "https://files.pythonhosted.org/packages/df/61/515ad6caca68995da2fac7a6af97faab8f78ebe3bf4f761e1b77efbc47b5/wrapt-1.17.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:373342dd05b1d07d752cecbec0c41817231f29f3a89aa8b8843f7b95992ed0c7", size = 86945, upload-time = "2025-08-12T05:52:21.581Z" }, + { url = "https://files.pythonhosted.org/packages/d3/bd/4e70162ce398462a467bc09e768bee112f1412e563620adc353de9055d33/wrapt-1.17.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d40770d7c0fd5cbed9d84b2c3f2e156431a12c9a37dc6284060fb4bec0b7ffd4", size = 86857, upload-time = "2025-08-12T05:52:43.043Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b8/da8560695e9284810b8d3df8a19396a6e40e7518059584a1a394a2b35e0a/wrapt-1.17.3-cp314-cp314-win32.whl", hash = "sha256:fbd3c8319de8e1dc79d346929cd71d523622da527cca14e0c1d257e31c2b8b10", size = 37178, upload-time = "2025-08-12T05:53:12.605Z" }, + { url = "https://files.pythonhosted.org/packages/db/c8/b71eeb192c440d67a5a0449aaee2310a1a1e8eca41676046f99ed2487e9f/wrapt-1.17.3-cp314-cp314-win_amd64.whl", hash = "sha256:e1a4120ae5705f673727d3253de3ed0e016f7cd78dc463db1b31e2463e1f3cf6", size = 39310, upload-time = "2025-08-12T05:53:11.106Z" }, + { url = "https://files.pythonhosted.org/packages/45/20/2cda20fd4865fa40f86f6c46ed37a2a8356a7a2fde0773269311f2af56c7/wrapt-1.17.3-cp314-cp314-win_arm64.whl", hash = "sha256:507553480670cab08a800b9463bdb881b2edeed77dc677b0a5915e6106e91a58", size = 37266, upload-time = "2025-08-12T05:52:56.531Z" }, + { url = "https://files.pythonhosted.org/packages/77/ed/dd5cf21aec36c80443c6f900449260b80e2a65cf963668eaef3b9accce36/wrapt-1.17.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ed7c635ae45cfbc1a7371f708727bf74690daedc49b4dba310590ca0bd28aa8a", size = 56544, upload-time = "2025-08-12T05:51:51.109Z" }, + { url = "https://files.pythonhosted.org/packages/8d/96/450c651cc753877ad100c7949ab4d2e2ecc4d97157e00fa8f45df682456a/wrapt-1.17.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:249f88ed15503f6492a71f01442abddd73856a0032ae860de6d75ca62eed8067", size = 40283, upload-time = "2025-08-12T05:51:39.912Z" }, + { url = "https://files.pythonhosted.org/packages/d1/86/2fcad95994d9b572db57632acb6f900695a648c3e063f2cd344b3f5c5a37/wrapt-1.17.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5a03a38adec8066d5a37bea22f2ba6bbf39fcdefbe2d91419ab864c3fb515454", size = 40366, upload-time = "2025-08-12T05:52:00.693Z" }, + { url = "https://files.pythonhosted.org/packages/64/0e/f4472f2fdde2d4617975144311f8800ef73677a159be7fe61fa50997d6c0/wrapt-1.17.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5d4478d72eb61c36e5b446e375bbc49ed002430d17cdec3cecb36993398e1a9e", size = 108571, upload-time = "2025-08-12T05:52:44.521Z" }, + { url = "https://files.pythonhosted.org/packages/cc/01/9b85a99996b0a97c8a17484684f206cbb6ba73c1ce6890ac668bcf3838fb/wrapt-1.17.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:223db574bb38637e8230eb14b185565023ab624474df94d2af18f1cdb625216f", size = 113094, upload-time = "2025-08-12T05:52:22.618Z" }, + { url = "https://files.pythonhosted.org/packages/25/02/78926c1efddcc7b3aa0bc3d6b33a822f7d898059f7cd9ace8c8318e559ef/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e405adefb53a435f01efa7ccdec012c016b5a1d3f35459990afc39b6be4d5056", size = 110659, upload-time = "2025-08-12T05:52:24.057Z" }, + { url = "https://files.pythonhosted.org/packages/dc/ee/c414501ad518ac3e6fe184753632fe5e5ecacdcf0effc23f31c1e4f7bfcf/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:88547535b787a6c9ce4086917b6e1d291aa8ed914fdd3a838b3539dc95c12804", size = 106946, upload-time = "2025-08-12T05:52:45.976Z" }, + { url = "https://files.pythonhosted.org/packages/be/44/a1bd64b723d13bb151d6cc91b986146a1952385e0392a78567e12149c7b4/wrapt-1.17.3-cp314-cp314t-win32.whl", hash = "sha256:41b1d2bc74c2cac6f9074df52b2efbef2b30bdfe5f40cb78f8ca22963bc62977", size = 38717, upload-time = "2025-08-12T05:53:15.214Z" }, + { url = "https://files.pythonhosted.org/packages/79/d9/7cfd5a312760ac4dd8bf0184a6ee9e43c33e47f3dadc303032ce012b8fa3/wrapt-1.17.3-cp314-cp314t-win_amd64.whl", hash = "sha256:73d496de46cd2cdbdbcce4ae4bcdb4afb6a11234a1df9c085249d55166b95116", size = 41334, upload-time = "2025-08-12T05:53:14.178Z" }, + { url = "https://files.pythonhosted.org/packages/46/78/10ad9781128ed2f99dbc474f43283b13fea8ba58723e98844367531c18e9/wrapt-1.17.3-cp314-cp314t-win_arm64.whl", hash = "sha256:f38e60678850c42461d4202739f9bf1e3a737c7ad283638251e79cc49effb6b6", size = 38471, upload-time = "2025-08-12T05:52:57.784Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f6/a933bd70f98e9cf3e08167fc5cd7aaaca49147e48411c0bd5ae701bb2194/wrapt-1.17.3-py3-none-any.whl", hash = "sha256:7171ae35d2c33d326ac19dd8facb1e82e5fd04ef8c6c0e394d7af55a55051c22", size = 23591, upload-time = "2025-08-12T05:53:20.674Z" }, +] + +[[package]] +name = "xmltodict" +version = "1.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/aa/917ceeed4dbb80d2f04dbd0c784b7ee7bba8ae5a54837ef0e5e062cd3cfb/xmltodict-1.0.2.tar.gz", hash = "sha256:54306780b7c2175a3967cad1db92f218207e5bc1aba697d887807c0fb68b7649", size = 25725, upload-time = "2025-09-17T21:59:26.459Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/20/69a0e6058bc5ea74892d089d64dfc3a62ba78917ec5e2cfa70f7c92ba3a5/xmltodict-1.0.2-py3-none-any.whl", hash = "sha256:62d0fddb0dcbc9f642745d8bbf4d81fd17d6dfaec5a15b5c1876300aad92af0d", size = 13893, upload-time = "2025-09-17T21:59:24.859Z" }, +] + +[[package]] +name = "yarl" +version = "1.22.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "multidict" }, + { name = "propcache" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/57/63/0c6ebca57330cd313f6102b16dd57ffaf3ec4c83403dcb45dbd15c6f3ea1/yarl-1.22.0.tar.gz", hash = "sha256:bebf8557577d4401ba8bd9ff33906f1376c877aa78d1fe216ad01b4d6745af71", size = 187169, upload-time = "2025-10-06T14:12:55.963Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ea/f3/d67de7260456ee105dc1d162d43a019ecad6b91e2f51809d6cddaa56690e/yarl-1.22.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8dee9c25c74997f6a750cd317b8ca63545169c098faee42c84aa5e506c819b53", size = 139980, upload-time = "2025-10-06T14:10:14.601Z" }, + { url = "https://files.pythonhosted.org/packages/01/88/04d98af0b47e0ef42597b9b28863b9060bb515524da0a65d5f4db160b2d5/yarl-1.22.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01e73b85a5434f89fc4fe27dcda2aff08ddf35e4d47bbbea3bdcd25321af538a", size = 93424, upload-time = "2025-10-06T14:10:16.115Z" }, + { url = "https://files.pythonhosted.org/packages/18/91/3274b215fd8442a03975ce6bee5fe6aa57a8326b29b9d3d56234a1dca244/yarl-1.22.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:22965c2af250d20c873cdbee8ff958fb809940aeb2e74ba5f20aaf6b7ac8c70c", size = 93821, upload-time = "2025-10-06T14:10:17.993Z" }, + { url = "https://files.pythonhosted.org/packages/61/3a/caf4e25036db0f2da4ca22a353dfeb3c9d3c95d2761ebe9b14df8fc16eb0/yarl-1.22.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b4f15793aa49793ec8d1c708ab7f9eded1aa72edc5174cae703651555ed1b601", size = 373243, upload-time = "2025-10-06T14:10:19.44Z" }, + { url = "https://files.pythonhosted.org/packages/6e/9e/51a77ac7516e8e7803b06e01f74e78649c24ee1021eca3d6a739cb6ea49c/yarl-1.22.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5542339dcf2747135c5c85f68680353d5cb9ffd741c0f2e8d832d054d41f35a", size = 342361, upload-time = "2025-10-06T14:10:21.124Z" }, + { url = "https://files.pythonhosted.org/packages/d4/f8/33b92454789dde8407f156c00303e9a891f1f51a0330b0fad7c909f87692/yarl-1.22.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5c401e05ad47a75869c3ab3e35137f8468b846770587e70d71e11de797d113df", size = 387036, upload-time = "2025-10-06T14:10:22.902Z" }, + { url = "https://files.pythonhosted.org/packages/d9/9a/c5db84ea024f76838220280f732970aa4ee154015d7f5c1bfb60a267af6f/yarl-1.22.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:243dda95d901c733f5b59214d28b0120893d91777cb8aa043e6ef059d3cddfe2", size = 397671, upload-time = "2025-10-06T14:10:24.523Z" }, + { url = "https://files.pythonhosted.org/packages/11/c9/cd8538dc2e7727095e0c1d867bad1e40c98f37763e6d995c1939f5fdc7b1/yarl-1.22.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bec03d0d388060058f5d291a813f21c011041938a441c593374da6077fe21b1b", size = 377059, upload-time = "2025-10-06T14:10:26.406Z" }, + { url = "https://files.pythonhosted.org/packages/a1/b9/ab437b261702ced75122ed78a876a6dec0a1b0f5e17a4ac7a9a2482d8abe/yarl-1.22.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0748275abb8c1e1e09301ee3cf90c8a99678a4e92e4373705f2a2570d581273", size = 365356, upload-time = "2025-10-06T14:10:28.461Z" }, + { url = "https://files.pythonhosted.org/packages/b2/9d/8e1ae6d1d008a9567877b08f0ce4077a29974c04c062dabdb923ed98e6fe/yarl-1.22.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:47fdb18187e2a4e18fda2c25c05d8251a9e4a521edaed757fef033e7d8498d9a", size = 361331, upload-time = "2025-10-06T14:10:30.541Z" }, + { url = "https://files.pythonhosted.org/packages/ca/5a/09b7be3905962f145b73beb468cdd53db8aa171cf18c80400a54c5b82846/yarl-1.22.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c7044802eec4524fde550afc28edda0dd5784c4c45f0be151a2d3ba017daca7d", size = 382590, upload-time = "2025-10-06T14:10:33.352Z" }, + { url = "https://files.pythonhosted.org/packages/aa/7f/59ec509abf90eda5048b0bc3e2d7b5099dffdb3e6b127019895ab9d5ef44/yarl-1.22.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:139718f35149ff544caba20fce6e8a2f71f1e39b92c700d8438a0b1d2a631a02", size = 385316, upload-time = "2025-10-06T14:10:35.034Z" }, + { url = "https://files.pythonhosted.org/packages/e5/84/891158426bc8036bfdfd862fabd0e0fa25df4176ec793e447f4b85cf1be4/yarl-1.22.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e1b51bebd221006d3d2f95fbe124b22b247136647ae5dcc8c7acafba66e5ee67", size = 374431, upload-time = "2025-10-06T14:10:37.76Z" }, + { url = "https://files.pythonhosted.org/packages/bb/49/03da1580665baa8bef5e8ed34c6df2c2aca0a2f28bf397ed238cc1bbc6f2/yarl-1.22.0-cp313-cp313-win32.whl", hash = "sha256:d3e32536234a95f513bd374e93d717cf6b2231a791758de6c509e3653f234c95", size = 81555, upload-time = "2025-10-06T14:10:39.649Z" }, + { url = "https://files.pythonhosted.org/packages/9a/ee/450914ae11b419eadd067c6183ae08381cfdfcb9798b90b2b713bbebddda/yarl-1.22.0-cp313-cp313-win_amd64.whl", hash = "sha256:47743b82b76d89a1d20b83e60d5c20314cbd5ba2befc9cda8f28300c4a08ed4d", size = 86965, upload-time = "2025-10-06T14:10:41.313Z" }, + { url = "https://files.pythonhosted.org/packages/98/4d/264a01eae03b6cf629ad69bae94e3b0e5344741e929073678e84bf7a3e3b/yarl-1.22.0-cp313-cp313-win_arm64.whl", hash = "sha256:5d0fcda9608875f7d052eff120c7a5da474a6796fe4d83e152e0e4d42f6d1a9b", size = 81205, upload-time = "2025-10-06T14:10:43.167Z" }, + { url = "https://files.pythonhosted.org/packages/88/fc/6908f062a2f77b5f9f6d69cecb1747260831ff206adcbc5b510aff88df91/yarl-1.22.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:719ae08b6972befcba4310e49edb1161a88cdd331e3a694b84466bd938a6ab10", size = 146209, upload-time = "2025-10-06T14:10:44.643Z" }, + { url = "https://files.pythonhosted.org/packages/65/47/76594ae8eab26210b4867be6f49129861ad33da1f1ebdf7051e98492bf62/yarl-1.22.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:47d8a5c446df1c4db9d21b49619ffdba90e77c89ec6e283f453856c74b50b9e3", size = 95966, upload-time = "2025-10-06T14:10:46.554Z" }, + { url = "https://files.pythonhosted.org/packages/ab/ce/05e9828a49271ba6b5b038b15b3934e996980dd78abdfeb52a04cfb9467e/yarl-1.22.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:cfebc0ac8333520d2d0423cbbe43ae43c8838862ddb898f5ca68565e395516e9", size = 97312, upload-time = "2025-10-06T14:10:48.007Z" }, + { url = "https://files.pythonhosted.org/packages/d1/c5/7dffad5e4f2265b29c9d7ec869c369e4223166e4f9206fc2243ee9eea727/yarl-1.22.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4398557cbf484207df000309235979c79c4356518fd5c99158c7d38203c4da4f", size = 361967, upload-time = "2025-10-06T14:10:49.997Z" }, + { url = "https://files.pythonhosted.org/packages/50/b2/375b933c93a54bff7fc041e1a6ad2c0f6f733ffb0c6e642ce56ee3b39970/yarl-1.22.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2ca6fd72a8cd803be290d42f2dec5cdcd5299eeb93c2d929bf060ad9efaf5de0", size = 323949, upload-time = "2025-10-06T14:10:52.004Z" }, + { url = "https://files.pythonhosted.org/packages/66/50/bfc2a29a1d78644c5a7220ce2f304f38248dc94124a326794e677634b6cf/yarl-1.22.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca1f59c4e1ab6e72f0a23c13fca5430f889634166be85dbf1013683e49e3278e", size = 361818, upload-time = "2025-10-06T14:10:54.078Z" }, + { url = "https://files.pythonhosted.org/packages/46/96/f3941a46af7d5d0f0498f86d71275696800ddcdd20426298e572b19b91ff/yarl-1.22.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6c5010a52015e7c70f86eb967db0f37f3c8bd503a695a49f8d45700144667708", size = 372626, upload-time = "2025-10-06T14:10:55.767Z" }, + { url = "https://files.pythonhosted.org/packages/c1/42/8b27c83bb875cd89448e42cd627e0fb971fa1675c9ec546393d18826cb50/yarl-1.22.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d7672ecf7557476642c88497c2f8d8542f8e36596e928e9bcba0e42e1e7d71f", size = 341129, upload-time = "2025-10-06T14:10:57.985Z" }, + { url = "https://files.pythonhosted.org/packages/49/36/99ca3122201b382a3cf7cc937b95235b0ac944f7e9f2d5331d50821ed352/yarl-1.22.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3b7c88eeef021579d600e50363e0b6ee4f7f6f728cd3486b9d0f3ee7b946398d", size = 346776, upload-time = "2025-10-06T14:10:59.633Z" }, + { url = "https://files.pythonhosted.org/packages/85/b4/47328bf996acd01a4c16ef9dcd2f59c969f495073616586f78cd5f2efb99/yarl-1.22.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:f4afb5c34f2c6fecdcc182dfcfc6af6cccf1aa923eed4d6a12e9d96904e1a0d8", size = 334879, upload-time = "2025-10-06T14:11:01.454Z" }, + { url = "https://files.pythonhosted.org/packages/c2/ad/b77d7b3f14a4283bffb8e92c6026496f6de49751c2f97d4352242bba3990/yarl-1.22.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:59c189e3e99a59cf8d83cbb31d4db02d66cda5a1a4374e8a012b51255341abf5", size = 350996, upload-time = "2025-10-06T14:11:03.452Z" }, + { url = "https://files.pythonhosted.org/packages/81/c8/06e1d69295792ba54d556f06686cbd6a7ce39c22307100e3fb4a2c0b0a1d/yarl-1.22.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:5a3bf7f62a289fa90f1990422dc8dff5a458469ea71d1624585ec3a4c8d6960f", size = 356047, upload-time = "2025-10-06T14:11:05.115Z" }, + { url = "https://files.pythonhosted.org/packages/4b/b8/4c0e9e9f597074b208d18cef227d83aac36184bfbc6eab204ea55783dbc5/yarl-1.22.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:de6b9a04c606978fdfe72666fa216ffcf2d1a9f6a381058d4378f8d7b1e5de62", size = 342947, upload-time = "2025-10-06T14:11:08.137Z" }, + { url = "https://files.pythonhosted.org/packages/e0/e5/11f140a58bf4c6ad7aca69a892bff0ee638c31bea4206748fc0df4ebcb3a/yarl-1.22.0-cp313-cp313t-win32.whl", hash = "sha256:1834bb90991cc2999f10f97f5f01317f99b143284766d197e43cd5b45eb18d03", size = 86943, upload-time = "2025-10-06T14:11:10.284Z" }, + { url = "https://files.pythonhosted.org/packages/31/74/8b74bae38ed7fe6793d0c15a0c8207bbb819cf287788459e5ed230996cdd/yarl-1.22.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ff86011bd159a9d2dfc89c34cfd8aff12875980e3bd6a39ff097887520e60249", size = 93715, upload-time = "2025-10-06T14:11:11.739Z" }, + { url = "https://files.pythonhosted.org/packages/69/66/991858aa4b5892d57aef7ee1ba6b4d01ec3b7eb3060795d34090a3ca3278/yarl-1.22.0-cp313-cp313t-win_arm64.whl", hash = "sha256:7861058d0582b847bc4e3a4a4c46828a410bca738673f35a29ba3ca5db0b473b", size = 83857, upload-time = "2025-10-06T14:11:13.586Z" }, + { url = "https://files.pythonhosted.org/packages/46/b3/e20ef504049f1a1c54a814b4b9bed96d1ac0e0610c3b4da178f87209db05/yarl-1.22.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:34b36c2c57124530884d89d50ed2c1478697ad7473efd59cfd479945c95650e4", size = 140520, upload-time = "2025-10-06T14:11:15.465Z" }, + { url = "https://files.pythonhosted.org/packages/e4/04/3532d990fdbab02e5ede063676b5c4260e7f3abea2151099c2aa745acc4c/yarl-1.22.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:0dd9a702591ca2e543631c2a017e4a547e38a5c0f29eece37d9097e04a7ac683", size = 93504, upload-time = "2025-10-06T14:11:17.106Z" }, + { url = "https://files.pythonhosted.org/packages/11/63/ff458113c5c2dac9a9719ac68ee7c947cb621432bcf28c9972b1c0e83938/yarl-1.22.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:594fcab1032e2d2cc3321bb2e51271e7cd2b516c7d9aee780ece81b07ff8244b", size = 94282, upload-time = "2025-10-06T14:11:19.064Z" }, + { url = "https://files.pythonhosted.org/packages/a7/bc/315a56aca762d44a6aaaf7ad253f04d996cb6b27bad34410f82d76ea8038/yarl-1.22.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f3d7a87a78d46a2e3d5b72587ac14b4c16952dd0887dbb051451eceac774411e", size = 372080, upload-time = "2025-10-06T14:11:20.996Z" }, + { url = "https://files.pythonhosted.org/packages/3f/3f/08e9b826ec2e099ea6e7c69a61272f4f6da62cb5b1b63590bb80ca2e4a40/yarl-1.22.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:852863707010316c973162e703bddabec35e8757e67fcb8ad58829de1ebc8590", size = 338696, upload-time = "2025-10-06T14:11:22.847Z" }, + { url = "https://files.pythonhosted.org/packages/e3/9f/90360108e3b32bd76789088e99538febfea24a102380ae73827f62073543/yarl-1.22.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:131a085a53bfe839a477c0845acf21efc77457ba2bcf5899618136d64f3303a2", size = 387121, upload-time = "2025-10-06T14:11:24.889Z" }, + { url = "https://files.pythonhosted.org/packages/98/92/ab8d4657bd5b46a38094cfaea498f18bb70ce6b63508fd7e909bd1f93066/yarl-1.22.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:078a8aefd263f4d4f923a9677b942b445a2be970ca24548a8102689a3a8ab8da", size = 394080, upload-time = "2025-10-06T14:11:27.307Z" }, + { url = "https://files.pythonhosted.org/packages/f5/e7/d8c5a7752fef68205296201f8ec2bf718f5c805a7a7e9880576c67600658/yarl-1.22.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bca03b91c323036913993ff5c738d0842fc9c60c4648e5c8d98331526df89784", size = 372661, upload-time = "2025-10-06T14:11:29.387Z" }, + { url = "https://files.pythonhosted.org/packages/b6/2e/f4d26183c8db0bb82d491b072f3127fb8c381a6206a3a56332714b79b751/yarl-1.22.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:68986a61557d37bb90d3051a45b91fa3d5c516d177dfc6dd6f2f436a07ff2b6b", size = 364645, upload-time = "2025-10-06T14:11:31.423Z" }, + { url = "https://files.pythonhosted.org/packages/80/7c/428e5812e6b87cd00ee8e898328a62c95825bf37c7fa87f0b6bb2ad31304/yarl-1.22.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:4792b262d585ff0dff6bcb787f8492e40698443ec982a3568c2096433660c694", size = 355361, upload-time = "2025-10-06T14:11:33.055Z" }, + { url = "https://files.pythonhosted.org/packages/ec/2a/249405fd26776f8b13c067378ef4d7dd49c9098d1b6457cdd152a99e96a9/yarl-1.22.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ebd4549b108d732dba1d4ace67614b9545b21ece30937a63a65dd34efa19732d", size = 381451, upload-time = "2025-10-06T14:11:35.136Z" }, + { url = "https://files.pythonhosted.org/packages/67/a8/fb6b1adbe98cf1e2dd9fad71003d3a63a1bc22459c6e15f5714eb9323b93/yarl-1.22.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:f87ac53513d22240c7d59203f25cc3beac1e574c6cd681bbfd321987b69f95fd", size = 383814, upload-time = "2025-10-06T14:11:37.094Z" }, + { url = "https://files.pythonhosted.org/packages/d9/f9/3aa2c0e480fb73e872ae2814c43bc1e734740bb0d54e8cb2a95925f98131/yarl-1.22.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:22b029f2881599e2f1b06f8f1db2ee63bd309e2293ba2d566e008ba12778b8da", size = 370799, upload-time = "2025-10-06T14:11:38.83Z" }, + { url = "https://files.pythonhosted.org/packages/50/3c/af9dba3b8b5eeb302f36f16f92791f3ea62e3f47763406abf6d5a4a3333b/yarl-1.22.0-cp314-cp314-win32.whl", hash = "sha256:6a635ea45ba4ea8238463b4f7d0e721bad669f80878b7bfd1f89266e2ae63da2", size = 82990, upload-time = "2025-10-06T14:11:40.624Z" }, + { url = "https://files.pythonhosted.org/packages/ac/30/ac3a0c5bdc1d6efd1b41fa24d4897a4329b3b1e98de9449679dd327af4f0/yarl-1.22.0-cp314-cp314-win_amd64.whl", hash = "sha256:0d6e6885777af0f110b0e5d7e5dda8b704efed3894da26220b7f3d887b839a79", size = 88292, upload-time = "2025-10-06T14:11:42.578Z" }, + { url = "https://files.pythonhosted.org/packages/df/0a/227ab4ff5b998a1b7410abc7b46c9b7a26b0ca9e86c34ba4b8d8bc7c63d5/yarl-1.22.0-cp314-cp314-win_arm64.whl", hash = "sha256:8218f4e98d3c10d683584cb40f0424f4b9fd6e95610232dd75e13743b070ee33", size = 82888, upload-time = "2025-10-06T14:11:44.863Z" }, + { url = "https://files.pythonhosted.org/packages/06/5e/a15eb13db90abd87dfbefb9760c0f3f257ac42a5cac7e75dbc23bed97a9f/yarl-1.22.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:45c2842ff0e0d1b35a6bf1cd6c690939dacb617a70827f715232b2e0494d55d1", size = 146223, upload-time = "2025-10-06T14:11:46.796Z" }, + { url = "https://files.pythonhosted.org/packages/18/82/9665c61910d4d84f41a5bf6837597c89e665fa88aa4941080704645932a9/yarl-1.22.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:d947071e6ebcf2e2bee8fce76e10faca8f7a14808ca36a910263acaacef08eca", size = 95981, upload-time = "2025-10-06T14:11:48.845Z" }, + { url = "https://files.pythonhosted.org/packages/5d/9a/2f65743589809af4d0a6d3aa749343c4b5f4c380cc24a8e94a3c6625a808/yarl-1.22.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:334b8721303e61b00019474cc103bdac3d7b1f65e91f0bfedeec2d56dfe74b53", size = 97303, upload-time = "2025-10-06T14:11:50.897Z" }, + { url = "https://files.pythonhosted.org/packages/b0/ab/5b13d3e157505c43c3b43b5a776cbf7b24a02bc4cccc40314771197e3508/yarl-1.22.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e7ce67c34138a058fd092f67d07a72b8e31ff0c9236e751957465a24b28910c", size = 361820, upload-time = "2025-10-06T14:11:52.549Z" }, + { url = "https://files.pythonhosted.org/packages/fb/76/242a5ef4677615cf95330cfc1b4610e78184400699bdda0acb897ef5e49a/yarl-1.22.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d77e1b2c6d04711478cb1c4ab90db07f1609ccf06a287d5607fcd90dc9863acf", size = 323203, upload-time = "2025-10-06T14:11:54.225Z" }, + { url = "https://files.pythonhosted.org/packages/8c/96/475509110d3f0153b43d06164cf4195c64d16999e0c7e2d8a099adcd6907/yarl-1.22.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c4647674b6150d2cae088fc07de2738a84b8bcedebef29802cf0b0a82ab6face", size = 363173, upload-time = "2025-10-06T14:11:56.069Z" }, + { url = "https://files.pythonhosted.org/packages/c9/66/59db471aecfbd559a1fd48aedd954435558cd98c7d0da8b03cc6c140a32c/yarl-1.22.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efb07073be061c8f79d03d04139a80ba33cbd390ca8f0297aae9cce6411e4c6b", size = 373562, upload-time = "2025-10-06T14:11:58.783Z" }, + { url = "https://files.pythonhosted.org/packages/03/1f/c5d94abc91557384719da10ff166b916107c1b45e4d0423a88457071dd88/yarl-1.22.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e51ac5435758ba97ad69617e13233da53908beccc6cfcd6c34bbed8dcbede486", size = 339828, upload-time = "2025-10-06T14:12:00.686Z" }, + { url = "https://files.pythonhosted.org/packages/5f/97/aa6a143d3afba17b6465733681c70cf175af89f76ec8d9286e08437a7454/yarl-1.22.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:33e32a0dd0c8205efa8e83d04fc9f19313772b78522d1bdc7d9aed706bfd6138", size = 347551, upload-time = "2025-10-06T14:12:02.628Z" }, + { url = "https://files.pythonhosted.org/packages/43/3c/45a2b6d80195959239a7b2a8810506d4eea5487dce61c2a3393e7fc3c52e/yarl-1.22.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:bf4a21e58b9cde0e401e683ebd00f6ed30a06d14e93f7c8fd059f8b6e8f87b6a", size = 334512, upload-time = "2025-10-06T14:12:04.871Z" }, + { url = "https://files.pythonhosted.org/packages/86/a0/c2ab48d74599c7c84cb104ebd799c5813de252bea0f360ffc29d270c2caa/yarl-1.22.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:e4b582bab49ac33c8deb97e058cd67c2c50dac0dd134874106d9c774fd272529", size = 352400, upload-time = "2025-10-06T14:12:06.624Z" }, + { url = "https://files.pythonhosted.org/packages/32/75/f8919b2eafc929567d3d8411f72bdb1a2109c01caaab4ebfa5f8ffadc15b/yarl-1.22.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:0b5bcc1a9c4839e7e30b7b30dd47fe5e7e44fb7054ec29b5bb8d526aa1041093", size = 357140, upload-time = "2025-10-06T14:12:08.362Z" }, + { url = "https://files.pythonhosted.org/packages/cf/72/6a85bba382f22cf78add705d8c3731748397d986e197e53ecc7835e76de7/yarl-1.22.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c0232bce2170103ec23c454e54a57008a9a72b5d1c3105dc2496750da8cfa47c", size = 341473, upload-time = "2025-10-06T14:12:10.994Z" }, + { url = "https://files.pythonhosted.org/packages/35/18/55e6011f7c044dc80b98893060773cefcfdbf60dfefb8cb2f58b9bacbd83/yarl-1.22.0-cp314-cp314t-win32.whl", hash = "sha256:8009b3173bcd637be650922ac455946197d858b3630b6d8787aa9e5c4564533e", size = 89056, upload-time = "2025-10-06T14:12:13.317Z" }, + { url = "https://files.pythonhosted.org/packages/f9/86/0f0dccb6e59a9e7f122c5afd43568b1d31b8ab7dda5f1b01fb5c7025c9a9/yarl-1.22.0-cp314-cp314t-win_amd64.whl", hash = "sha256:9fb17ea16e972c63d25d4a97f016d235c78dd2344820eb35bc034bc32012ee27", size = 96292, upload-time = "2025-10-06T14:12:15.398Z" }, + { url = "https://files.pythonhosted.org/packages/48/b7/503c98092fb3b344a179579f55814b613c1fbb1c23b3ec14a7b008a66a6e/yarl-1.22.0-cp314-cp314t-win_arm64.whl", hash = "sha256:9f6d73c1436b934e3f01df1e1b21ff765cd1d28c77dfb9ace207f746d4610ee1", size = 85171, upload-time = "2025-10-06T14:12:16.935Z" }, + { url = "https://files.pythonhosted.org/packages/73/ae/b48f95715333080afb75a4504487cbe142cae1268afc482d06692d605ae6/yarl-1.22.0-py3-none-any.whl", hash = "sha256:1380560bdba02b6b6c90de54133c81c9f2a453dee9912fe58c1dcced1edb7cff", size = 46814, upload-time = "2025-10-06T14:12:53.872Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +] diff --git a/deployment/run-unit-tests.sh b/deployment/run-unit-tests.sh index c868dbe9..8b4256a3 100755 --- a/deployment/run-unit-tests.sh +++ b/deployment/run-unit-tests.sh @@ -33,6 +33,28 @@ setup_python_env() { deactivate } +setup_uv() { + echo "Installing UV for ECR container tests..." + + if command -v pip3 &> /dev/null; then + pip3 install uv>=0.5.0 + + # Verify installation + if command -v uv &> /dev/null; then + echo "✅ UV installed successfully: $(uv --version)" + return 0 + fi + fi + + # If pip installation failed, provide instructions and exit + echo "❌ ERROR: Could not install UV." + echo " Please install UV manually:" + echo " - pip install uv>=0.5.0" + echo " - brew install uv (macOS)" + echo " - https://docs.astral.sh/uv/getting-started/installation/" + echo "" +} + install_lambda_layer() { lambda_name=$1 lambda_description=$2 @@ -219,6 +241,25 @@ run_ui_project_test() { mv coverage $coverage_report_path } +run_ecr_container_tests() { + container_name=$1 + container_description=$2 + echo "------------------------------------------------------------------------------" + echo "[Test] ECR Container: $container_name, $container_description" + echo "------------------------------------------------------------------------------" + cd $source_dir/../deployment/ecr/$container_name + if [ -f "scripts/run_unit_tests.sh" ]; then + echo "Running $container_description container tests..." + ./scripts/run_unit_tests.sh + if [ "$?" != "0" ]; then + echo "(deployment/run-unit-tests.sh) ERROR: ECR container tests failed." 1>&2 + exit 1 + fi + else + echo "⚠️ ECR container test script not found, skipping..." + fi +} + timer() { start=$(date +%s) "$@" # Executes all arguments as a command @@ -270,8 +311,15 @@ echo "---------------------------------------" cd $source_dir # install TS node libraries in the layers as required for local unit testing +cd lambda/layers/aws-node-user-agent-config +timer npm ci +timer npm run build + +cd $source_dir + cd lambda/layers/aws-sdk-lib timer npm ci +timer npm run build cd $source_dir timer install_lambda_layer layers/aws_boto3 "Boto3 SDK Layer" @@ -305,6 +353,20 @@ echo "---------------------------------------" timer run_cdk_project_test "CDK - Generative AI Application Builder on AWS" +echo "---------------------------------------" +echo "Running ECR container unit tests" +echo "---------------------------------------" + +# Ensure UV is installed for ECR container tests +setup_uv + +# Run ECR container tests +timer run_ecr_container_tests "gaab-strands-agent" "GAAB Strands Agent" +timer run_ecr_container_tests "gaab-strands-common" "GAAB Strands Common" +timer run_ecr_container_tests "gaab-strands-workflow-agent" "GAAB Strands Workflow Agent" + +cd $source_dir + echo "---------------------------------------" echo "Executing Unit Tests Complete" echo "---------------------------------------" diff --git a/source/.prettierignore b/source/.prettierignore deleted file mode 100644 index 7a49e235..00000000 --- a/source/.prettierignore +++ /dev/null @@ -1,9 +0,0 @@ -node_modules -**/*.d.ts -coverage -cdk.out -.eslintrc.js -cdk.json -tsconfig.json -jest.config.js -package*.json \ No newline at end of file diff --git a/source/.prettierrc.yml b/source/.prettierrc.yml deleted file mode 100644 index 415c72f1..00000000 --- a/source/.prettierrc.yml +++ /dev/null @@ -1,14 +0,0 @@ -# .prettierrc or .prettierrc.yaml -proseWrap: 'preserve' -trailingComma: 'none' -tabWidth: 4 -semi: true -singleQuote: true -quoteProps: 'preserve' -printWidth: 120 -overrides: - - files: - - '*.json' - - '*.yml' - options: - tabWidth: 2 diff --git a/source/infrastructure/bin/gen-ai-app-builder.ts b/source/infrastructure/bin/gen-ai-app-builder.ts index 3537295d..dd2e1309 100644 --- a/source/infrastructure/bin/gen-ai-app-builder.ts +++ b/source/infrastructure/bin/gen-ai-app-builder.ts @@ -5,24 +5,31 @@ import * as cdk from 'aws-cdk-lib'; import { AwsSolutionsChecks } from 'cdk-nag'; import * as crypto from 'crypto'; +import { AgentBuilderStack } from '../lib/use-case-stacks/agent-core/agent-builder-stack'; import { BedrockAgent } from '../lib/bedrock-agent-stack'; import { BedrockChat } from '../lib/bedrock-chat-stack'; +import { MCPServerStack } from '../lib/mcp-server-stack'; import { DeploymentPlatformStack } from '../lib/deployment-platform-stack'; import { BaseStack, BaseStackProps } from '../lib/framework/base-stack'; import { SageMakerChat } from '../lib/sagemaker-chat-stack'; -import { AppRegistry } from '../lib/utils/app-registry-aspects'; import { LambdaAspects } from '../lib/utils/lambda-aspect'; import { LogGroupRetentionCheckAspect } from '../lib/utils/log-group-retention-check-aspect'; +import { WorkflowStack } from '../lib/use-case-stacks/agent-core/workflow-stack'; const app = new cdk.App(); const solutionID = process.env.SOLUTION_ID ?? app.node.tryGetContext('solution_id'); const version = process.env.VERSION ?? app.node.tryGetContext('solution_version'); const solutionName = process.env.SOLUTION_NAME ?? app.node.tryGetContext('solution_name'); -const applicationType = app.node.tryGetContext('application_type'); -const applicationName = app.node.tryGetContext('app_registry_name'); const applicationTrademarkName = app.node.tryGetContext('application_trademark_name'); -const stackList: (typeof BaseStack)[] = [BedrockChat, SageMakerChat, BedrockAgent]; +const stackList: (typeof BaseStack)[] = [ + BedrockChat, + SageMakerChat, + BedrockAgent, + AgentBuilderStack, + MCPServerStack, + WorkflowStack +]; for (const stack of stackList) { createStack(stack, undefined, true); @@ -45,19 +52,6 @@ app.synth(); function createStack(stack: typeof BaseStack, props?: BaseStackProps, isUseCase?: boolean) { const instance = new stack(app, stack.name, props ?? getDefaultBaseStackProps(stack, isUseCase)); - cdk.Aspects.of(instance).add( - new AppRegistry(instance, 'AppRegistry', { - solutionID: solutionID, - solutionVersion: version, - solutionName: solutionName, - applicationType: applicationType, - applicationName: isUseCase - ? `${applicationName}-${cdk.Fn.select(0, cdk.Fn.split('-', cdk.Fn.ref('UseCaseUUID')))}` - : `${applicationName}-Dashboard` - }), - { priority: cdk.AspectPriority.MUTATING } - ); - // adding lambda layer to all lambda functions for injecting user-agent for SDK calls to AWS services. cdk.Aspects.of(instance).add( new LambdaAspects(instance, 'AspectInject', { diff --git a/source/infrastructure/cdk.json b/source/infrastructure/cdk.json index 942b7b5d..eb6038be 100644 --- a/source/infrastructure/cdk.json +++ b/source/infrastructure/cdk.json @@ -1,5 +1,5 @@ { - "app": "../pre-build-lambda-layers.sh && npx ts-node --prefer-ts-exts bin/gen-ai-app-builder.ts", + "app": "../pre-build-lambda-layers.sh && ../pre-build-ecr-images.sh && npx ts-node --prefer-ts-exts bin/gen-ai-app-builder.ts", "watch": { "include": [ "**" @@ -64,7 +64,7 @@ "@custom-bundler/unit-test": false, "solution_id": "SO0276", "solution_name": "generative-ai-application-builder-on-aws", - "solution_version": "v3.0.7", + "solution_version": "v4.0.0", "app_registry_name": "GAAB", "application_type": "AWS-Solutions", "application_trademark_name": "Generative AI Application Builder on AWS", diff --git a/source/infrastructure/lib/api/base-rest-endpoint.ts b/source/infrastructure/lib/api/base-rest-endpoint.ts index 2523df42..9edd4777 100644 --- a/source/infrastructure/lib/api/base-rest-endpoint.ts +++ b/source/infrastructure/lib/api/base-rest-endpoint.ts @@ -203,18 +203,17 @@ export abstract class BaseRestEndpoint extends Construct { orStatement: { statements: [ { - byteMatchStatement: { - searchString: '/deployments', + regexMatchStatement: { fieldToMatch: { uriPath: {} }, + regexString: '/deployments(/mcp|/agents|/workflows)?$', textTransformations: [ { priority: 0, type: 'NONE' } - ], - positionalConstraint: 'ENDS_WITH' + ] } }, { @@ -223,7 +222,7 @@ export abstract class BaseRestEndpoint extends Construct { uriPath: {} }, regexString: - '/deployments/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$', + '/deployments(/mcp|/agents|/workflows)?/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$', textTransformations: [ { priority: 0, diff --git a/source/infrastructure/lib/api/deployment-platform-rest-api-helper.ts b/source/infrastructure/lib/api/deployment-platform-rest-api-helper.ts new file mode 100644 index 00000000..621d302f --- /dev/null +++ b/source/infrastructure/lib/api/deployment-platform-rest-api-helper.ts @@ -0,0 +1,369 @@ +#!/usr/bin/env node +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as api from 'aws-cdk-lib/aws-apigateway'; +import { JsonSchema } from 'aws-cdk-lib/aws-apigateway'; +import { Construct } from 'constructs'; + +/** + * Context object containing common parameters for API operations + * Reduces parameter duplication across helper methods + */ +export interface DeploymentApiContext { + scope: Construct; + requestValidator: api.RequestValidator; + authorizer: api.IAuthorizer; + integration: api.LambdaIntegration; +} + +/** + * Use case deployment schema structure for CRUD operations + * Matches the DeploymentSchemas structure from model-schema/index.ts + */ +export interface DeploymentSchema { + deploy?: JsonSchema; + deployResponse?: JsonSchema; + update?: JsonSchema; + updateResponse?: JsonSchema; +} + +/** + * Configuration for ignoring specific CRUD operations + */ +export interface CrudModelIgnore { + /** Skip GET /collection endpoint */ + collectionGet?: boolean; + /** Skip POST /collection endpoint */ + collectionPost?: boolean; + /** Skip GET /collection/{id} endpoint */ + itemGet?: boolean; + /** Skip PATCH /collection/{id} endpoint */ + itemPatch?: boolean; + /** Skip DELETE /collection/{id} endpoint */ + itemDelete?: boolean; +} + +/** + * Static helper class for creating deployment REST API resources and methods + * Provides reusable methods for building REST API endpoints with consistent patterns + */ +export class DeploymentRestApiHelper { + /** + * Configures CORS for a resource with standard headers + * @param resource The API Gateway resource + * @param allowedMethods Array of HTTP methods to allow + */ + static configureCors(resource: api.Resource, allowedMethods: string[]): void { + if (!resource || resource.node.tryFindChild('OPTIONS')) { + return; + } + + resource.addCorsPreflight({ + allowOrigins: ['*'], + allowHeaders: ['Content-Type, Access-Control-Allow-Headers, X-Requested-With, Authorization'], + allowMethods: allowedMethods + }); + } + + /** + * Creates consistent method options for API Gateway methods + * @param context The deployment API context + * @param operationName The operation name for the method + * @param additionalParams Additional request parameters to include + * @returns Configured method options + */ + private static createMethodOptions( + context: DeploymentApiContext, + operationName: string, + additionalParams?: Record + ): api.MethodOptions { + return { + operationName, + authorizer: context.authorizer, + authorizationType: api.AuthorizationType.CUSTOM, + requestValidator: context.requestValidator, + requestParameters: { + 'method.request.header.authorization': true, + ...additionalParams + } + }; + } + + /** + * Creates method options with request/response models for CRUD operations + * @param context The deployment API context + * @param operationName The operation name for the method + * @param requestModel Optional request model for validation + * @param responseModel Optional response model for documentation + * @param additionalParams Additional request parameters to include + * @returns Configured method options with models + */ + static createMethodOptionsWithModels( + context: DeploymentApiContext, + operationName: string, + requestModel?: api.IModel, + responseModel?: api.IModel, + additionalParams?: Record + ): any { + const options: any = { + ...DeploymentRestApiHelper.createMethodOptions(context, operationName, additionalParams) + }; + + if (requestModel) { + options.requestModels = { 'application/json': requestModel }; + } + + if (responseModel) { + options.methodResponses = [ + { + responseModels: { 'application/json': responseModel }, + statusCode: '200' + } + ]; + } + + return options; + } + + /** + * Creates an API Gateway model with consistent naming and structure + * @param context The deployment API context + * @param restApi The REST API to create the model for + * @param modelName The name of the model (e.g., 'DeployUseCaseApiBody') + * @param description Description of what the model represents + * @param schema The JSON schema for the model + * @returns The created API Gateway model + */ + static createModel( + context: DeploymentApiContext, + restApi: api.IRestApi, + modelName: string, + description: string, + schema: JsonSchema + ): api.Model { + const fullModelName = `${modelName}Model`; + return new api.Model(context.scope, fullModelName, { + restApi: restApi, + contentType: 'application/json', + description: description, + modelName: fullModelName, + schema: schema + }); + } + + /** + * Creates the basic resource structure and CORS + * @param parentResource Parent resource to add to + * @param collectionPath Path for the collection resource (e.g., 'mcp') + * @param singularName Singular name for path parameters (e.g., 'mcp' for {mcp-id}) + * @returns Object with collection and item resources + */ + static createResourceStructure( + parentResource: api.Resource, + collectionPath: string, + singularName: string + ): { collectionResource: api.Resource; itemResource: api.Resource } { + const collectionResource = parentResource.addResource(collectionPath); + const itemResource = collectionResource.addResource(`{${singularName}}`); + + // Configure CORS + DeploymentRestApiHelper.configureCors(collectionResource, ['GET', 'POST', 'OPTIONS']); + DeploymentRestApiHelper.configureCors(itemResource, ['GET', 'PATCH', 'DELETE', 'POST', 'OPTIONS']); + + return { collectionResource, itemResource }; + } + + /** + * Creates models for CRUD operations with consistent naming + * @param context The deployment API context + * @param restApi The REST API to create models for + * @param operationPrefix Operation prefix (e.g., 'UseCase', 'MCP') + * @param deploymentSchema Use case deployment schema for request/response schemas + * @returns Object with created models + */ + static createCrudModels( + context: DeploymentApiContext, + restApi: api.IRestApi, + operationPrefix: string, + deploymentSchema?: DeploymentSchema + ): any { + if (!deploymentSchema) return {}; + + const entityName = operationPrefix.toLowerCase(); + const models: any = {}; + + if (deploymentSchema.deploy) { + const modelName = `Deploy${operationPrefix}ApiBody`; + const description = `Defines the required JSON structure of the POST request to deploy a ${entityName}`; + models.createRequestModel = DeploymentRestApiHelper.createModel( + context, + restApi, + modelName, + description, + deploymentSchema.deploy + ); + } + + if (deploymentSchema.deployResponse) { + const modelName = `Deploy${operationPrefix}Response`; + const description = `Response model to describe response of deploying a ${entityName}`; + models.createResponseModel = DeploymentRestApiHelper.createModel( + context, + restApi, + modelName, + description, + deploymentSchema.deployResponse + ); + } + + if (deploymentSchema.update) { + const modelName = `Update${operationPrefix}ApiBody`; + const description = `Defines the required JSON structure of the PUT request to update a ${entityName}`; + models.updateRequestModel = DeploymentRestApiHelper.createModel( + context, + restApi, + modelName, + description, + deploymentSchema.update + ); + } + + if (deploymentSchema.updateResponse) { + const modelName = `Update${operationPrefix}Response`; + const description = `Response model to describe response of updating a ${entityName}`; + models.updateResponseModel = DeploymentRestApiHelper.createModel( + context, + restApi, + modelName, + description, + deploymentSchema.updateResponse + ); + } + + return models; + } + + /** + * Creates all CRUD operations with consistent naming and behavior + * @param context The deployment API context + * @param collectionResource Collection resource (e.g., /deployments) + * @param itemResource Item resource (e.g., /deployments/{id}) + * @param operationPrefix Prefix for operation names (e.g., 'UseCase', 'MCP') + * @param restApi Optional REST API for auto-creating models from schemas + * @param ignore Optional configuration to skip specific CRUD operations + * @param deploymentSchema Optional use case deployment schema for auto-creating models + * @returns Array of resources created for suppression purposes + */ + static addCrudOperations( + context: DeploymentApiContext, + collectionResource: api.Resource, + itemResource: api.Resource, + operationPrefix: string, + restApi?: api.IRestApi, + deploymentSchema?: DeploymentSchema + ): api.Resource[] { + // create models if deploymentSchema provided + const models = + restApi && deploymentSchema + ? DeploymentRestApiHelper.createCrudModels(context, restApi, operationPrefix, deploymentSchema) + : {}; + + // GET /collection params + const getParams = { + 'method.request.querystring.pageNumber': true, + 'method.request.querystring.searchFilter': false + }; + collectionResource.addMethod( + 'GET', + context.integration, + DeploymentRestApiHelper.createMethodOptions(context, `Get${operationPrefix}s`, getParams) + ); + + // POST /collection - Deploy item (consistent naming) + const baseOptions = DeploymentRestApiHelper.createMethodOptions(context, `Deploy${operationPrefix}`); + const createMethodOptions: any = { ...baseOptions }; + + // Add request model if available + if (models && models.createRequestModel) { + createMethodOptions.requestModels = { 'application/json': models.createRequestModel }; + } + + // Add response model if available + if (models && models.createResponseModel) { + createMethodOptions.methodResponses = [ + { + responseModels: { 'application/json': models.createResponseModel }, + statusCode: '200' + } + ]; + } + collectionResource.addMethod('POST', context.integration, createMethodOptions); + // GET /collection/{id} - Get specific item + itemResource.addMethod( + 'GET', + context.integration, + DeploymentRestApiHelper.createMethodOptions(context, `Get${operationPrefix}`) + ); + + // PATCH /collection/{id} - Update item + const updateMethodOptions = DeploymentRestApiHelper.createMethodOptionsWithModels( + context, + `Update${operationPrefix}`, + models?.updateRequestModel, + models?.updateResponseModel + ); + itemResource.addMethod('PATCH', context.integration, updateMethodOptions); + // DELETE /collection/{id} + const deleteParams = { 'method.request.querystring.permanent': false }; + + itemResource.addMethod( + 'DELETE', + context.integration, + DeploymentRestApiHelper.createMethodOptions(context, `Delete${operationPrefix}`, deleteParams) + ); + + return [collectionResource, itemResource]; + } + + /** + * Adds a custom endpoint to any resource (collection or item) + * @param context The deployment API context + * @param parentResource Resource to add endpoint to (can be collection or item resource) + * @param httpMethod HTTP method (e.g., 'POST', 'GET') + * @param operationName Operation name for the method + * @param customPath Optional custom path segment (e.g., 'targets', 'upload-schemas'). If not provided, adds method directly to parentResource + * @param additionalParams Additional request parameters + * @returns The resource where the method was added (either new custom resource or the parent resource) + */ + static addCustomEndpoint( + context: DeploymentApiContext, + parentResource: api.Resource, + httpMethod: string, + operationName: string, + customPath?: string, + additionalParams?: Record + ): api.Resource { + // If customPath is provided, create a new sub-resource; otherwise use parentResource directly + const targetResource = customPath ? parentResource.addResource(customPath) : parentResource; + + DeploymentRestApiHelper.configureCors(targetResource, [httpMethod, 'OPTIONS']); + + targetResource.addMethod( + httpMethod, + context.integration, + DeploymentRestApiHelper.createMethodOptions(context, operationName, additionalParams) + ); + + return targetResource; + } + + /** + * Collects all resource paths from a set of resources for CDK-nag security suppressions + * @param resources Array of API Gateway resources + * @returns Array of resource paths (e.g., ['deployments', 'deployments/{useCaseId}', 'deployments/mcp/{mcp-id}']) + */ + static collectResourcePaths(resources: api.Resource[]): string[] { + return resources.map((resource) => resource.path.replace(/^\//, '')); // Remove leading slash + } +} diff --git a/source/infrastructure/lib/api/deployment-platform-rest-endpoint.ts b/source/infrastructure/lib/api/deployment-platform-rest-endpoint.ts index c56a102c..cc51a9ab 100644 --- a/source/infrastructure/lib/api/deployment-platform-rest-endpoint.ts +++ b/source/infrastructure/lib/api/deployment-platform-rest-endpoint.ts @@ -15,11 +15,11 @@ import { API_GATEWAY_THROTTLING_RATE_LIMIT, LOG_RETENTION_PERIOD } from '../utils/constants'; -import { deployUseCaseBodySchema } from './model-schema/deploy-usecase-body'; -import { deployUseCaseResponseSchema } from './model-schema/deploy-usecase-response'; -import { updateUseCaseBodySchema } from './model-schema/update-usecase-body'; -import { updateUseCaseResponseSchema } from './model-schema/update-usecase-response'; +import { UseCaseDeploymentSchemas } from './model-schema'; +import { uploadMcpSchemaBodySchema } from './model-schema/deployments/mcp/upload-schema-body'; +import { uploadMcpSchemaResponseSchema } from './model-schema/deployments/mcp/upload-schema-response'; import { BaseRestEndpoint, BaseRestEndpointProps } from './base-rest-endpoint'; +import { DeploymentRestApiHelper, DeploymentApiContext } from './deployment-platform-rest-api-helper'; export interface DeploymentPlatformRestEndpointProps extends BaseRestEndpointProps { /** @@ -32,10 +32,26 @@ export interface DeploymentPlatformRestEndpointProps extends BaseRestEndpointPro */ modelInfoApiLambda: lambda.Function; + /** + * The lambda function for MCP server management + */ + mcpManagementAPILambda: lambda.Function; + + /** + * The lambda function for agent management + */ + agentManagementAPILambda: lambda.Function; + + /** + * The lambda function for workflow management + */ + workflowManagementAPILambda: lambda.Function; + /** * The custom authorizer to allow admin users to access the use case management API. */ deploymentPlatformAuthorizer: api.RequestAuthorizer; + } export class DeploymentPlatformRestEndpoint extends BaseRestEndpoint { @@ -49,6 +65,11 @@ export class DeploymentPlatformRestEndpoint extends BaseRestEndpoint { */ public readonly requestValidator: api.RequestValidator; + /** + * Collection of all API resources created - stored for CDK-nag AwsSolutions-COG4 security suppressions + */ + private readonly createdResources: api.Resource[] = []; + constructor(scope: Construct, id: string, props: DeploymentPlatformRestEndpointProps) { super(scope, id, props); @@ -89,7 +110,7 @@ export class DeploymentPlatformRestEndpoint extends BaseRestEndpoint { retention: LOG_RETENTION_PERIOD } }); - restApi.apiGateway.node.tryRemoveChild("Endpoint"); + restApi.apiGateway.node.tryRemoveChild('Endpoint'); return restApi; } @@ -105,137 +126,220 @@ export class DeploymentPlatformRestEndpoint extends BaseRestEndpoint { /** * Creates all API resources and methods for the use case management API * @param props - * @param apiRoot * @param restApi */ private createUseCaseManagementApi(props: DeploymentPlatformRestEndpointProps, restApi: api.IRestApi) { + // Paths for the deployment api + const deploymentsResource = restApi.root.addResource('deployments'); + + this.createBaseDeploymentsAPI(deploymentsResource, props, restApi); + + // Create MCP deployments API + this.createMCPPathAPI(deploymentsResource, props, restApi); + + // Create Agents API + this.createAgentsPathAPI(deploymentsResource, props, restApi); + + // Create Workflows API + this.createWorkflowsPathAPI(deploymentsResource, props, restApi); + } + + /** + * Creates the base deployments API using helper methods + * @param deploymentsResource + * @param props + * @param restApi + * @returns The deploymentResource ({useCaseId}) for reuse by other APIs + */ + private createBaseDeploymentsAPI( + deploymentsResource: api.Resource, + props: DeploymentPlatformRestEndpointProps, + restApi: api.IRestApi + ): api.Resource { const useCaseManagementAPILambdaIntegration = new api.LambdaIntegration(props.useCaseManagementAPILambda, { passthroughBehavior: api.PassthroughBehavior.NEVER }); - // Paths for the deployment api - const deploymentsResource = restApi.root.addResource('deployments'); // for getting and creating deployments - const deploymentResource = deploymentsResource.addResource('{useCaseId}'); // for updating/deleting specific a specific deployment + // Create /deployments and /deployments/{useCaseId} structure + const deploymentResource = deploymentsResource.addResource('{useCaseId}'); - deploymentsResource.addCorsPreflight({ - allowOrigins: ['*'], - allowHeaders: ['Content-Type, Access-Control-Allow-Headers, X-Requested-With, Authorization'], - allowMethods: ['POST', 'GET', 'OPTIONS'] - }); + // Configure CORS + DeploymentRestApiHelper.configureCors(deploymentsResource, ['POST', 'GET', 'OPTIONS']); + DeploymentRestApiHelper.configureCors(deploymentResource, ['GET', 'PATCH', 'DELETE', 'OPTIONS']); - // Listing info about existing use cases - deploymentsResource.addMethod('GET', useCaseManagementAPILambdaIntegration, { - operationName: 'GetUseCases', - authorizer: props.deploymentPlatformAuthorizer, - authorizationType: api.AuthorizationType.CUSTOM, + const baseApiContext: DeploymentApiContext = { + scope: this, requestValidator: this.requestValidator, - requestParameters: { - 'method.request.querystring.pageNumber': true, - 'method.request.querystring.searchFilter': false, - 'method.request.header.authorization': true - } - }); - - // Deploying a new use case - deploymentsResource.addMethod('POST', useCaseManagementAPILambdaIntegration, { - operationName: 'DeployUseCase', authorizer: props.deploymentPlatformAuthorizer, - authorizationType: api.AuthorizationType.CUSTOM, - requestValidator: this.requestValidator, - requestParameters: { - 'method.request.header.authorization': true - }, - requestModels: { - 'application/json': new api.Model(this, 'DeployUseCaseApiBodyModel', { - restApi: restApi, - contentType: 'application/json', - description: 'Defines the required JSON structure of the POST request to deploy a use case', - modelName: 'DeployUseCaseApiBodyModel', - schema: deployUseCaseBodySchema - }) - }, - methodResponses: [ - { - responseModels: { - 'application/json': new api.Model(this, 'DeployUseCaseResponseModel', { - restApi: restApi, - contentType: 'application/json', - description: 'Response model to describe response of deploying a use case', - modelName: 'DeployUseCaseResponseModel', - schema: deployUseCaseResponseSchema - }) - }, - statusCode: '200' - } - ] - }); + integration: useCaseManagementAPILambdaIntegration + }; + + // Creates CRUD API endpoints: GET/POST /deployments and GET/PATCH/DELETE /deployments/{useCaseId} + // Generates models for request/response validation and returns created resources + const crudResources = DeploymentRestApiHelper.addCrudOperations( + baseApiContext, + deploymentsResource, + deploymentResource, + 'UseCase', + restApi, + UseCaseDeploymentSchemas.base + ); + + this.createdResources.push(...crudResources); + return deploymentResource; + } - deploymentResource.addCorsPreflight({ - allowOrigins: ['*'], - allowHeaders: ['Content-Type, Access-Control-Allow-Headers, X-Requested-With, Authorization'], - allowMethods: ['GET', 'PATCH', 'DELETE', 'OPTIONS'] + /** + * Creates /deployments/mcp API for MCP server management + */ + private createMCPPathAPI( + deploymentsResource: api.Resource, + props: DeploymentPlatformRestEndpointProps, + restApi: api.IRestApi + ): void { + const mcpIntegration = new api.LambdaIntegration(props.mcpManagementAPILambda, { + passthroughBehavior: api.PassthroughBehavior.NEVER }); - // Updating an existing use case deployment (i.e. changing its configuration) - deploymentResource.addMethod('PATCH', useCaseManagementAPILambdaIntegration, { - operationName: 'UpdateUseCase', - authorizer: props.deploymentPlatformAuthorizer, - authorizationType: api.AuthorizationType.CUSTOM, + // Create /deployments/mcp and /deployments/mcp/{useCaseId} + const { collectionResource, itemResource } = DeploymentRestApiHelper.createResourceStructure( + deploymentsResource, + 'mcp', + 'useCaseId' + ); + + const mcpApiContext: DeploymentApiContext = { + scope: this, requestValidator: this.requestValidator, - requestParameters: { - 'method.request.header.authorization': true - }, - requestModels: { - 'application/json': new api.Model(this, 'UpdateUseCaseApiBodyModel', { - restApi: restApi, - contentType: 'application/json', - description: 'Defines the required JSON structure of the PUT request to update a use case', - modelName: 'UpdateUseCaseApiBodyModel', - schema: updateUseCaseBodySchema - }) - }, - methodResponses: [ - { - responseModels: { - 'application/json': new api.Model(this, 'UpdateUseCaseResponseModel', { - restApi: restApi, - contentType: 'application/json', - description: 'Response model to describe response of updating a use case', - modelName: 'UpdateUseCaseResponseModel', - schema: updateUseCaseResponseSchema - }) - }, - statusCode: '200' - } - ] + authorizer: props.deploymentPlatformAuthorizer, + integration: mcpIntegration + }; + + // Add CRUD operations for MCP and collect resources + const crudResources = DeploymentRestApiHelper.addCrudOperations( + mcpApiContext, + collectionResource, + itemResource, + 'MCP', + restApi, + UseCaseDeploymentSchemas.mcp + ); + + this.createdResources.push(...crudResources); + + // Add collection-level custom endpoints and collect resources + const uploadSchemasRequestModel = DeploymentRestApiHelper.createModel( + mcpApiContext, + this.restApi, + 'UploadMCPSchemasApiRequest', + 'Defines the required JSON structure for uploading MCP schemas', + uploadMcpSchemaBodySchema + ); + + const uploadSchemasResponseModel = DeploymentRestApiHelper.createModel( + mcpApiContext, + this.restApi, + 'UploadMCPSchemasResponse', + 'Defines the response structure for MCP schema upload requests', + uploadMcpSchemaResponseSchema + ); + + // Create upload schemas resource with helper method for consistent validation + const uploadSchemasResource = collectionResource.addResource('upload-schemas'); + DeploymentRestApiHelper.configureCors(uploadSchemasResource, ['POST', 'OPTIONS']); + + const uploadMethodOptions = DeploymentRestApiHelper.createMethodOptionsWithModels( + mcpApiContext, + 'UploadMCPSchemas', + uploadSchemasRequestModel, + uploadSchemasResponseModel + ); + + uploadSchemasResource.addMethod('POST', mcpApiContext.integration, uploadMethodOptions); + + this.createdResources.push(uploadSchemasResource); + } + + /** + * Creates /deployments/agents API for agent management + */ + private createAgentsPathAPI( + deploymentsResource: api.Resource, + props: DeploymentPlatformRestEndpointProps, + restApi: api.IRestApi + ): void { + const agentIntegration = new api.LambdaIntegration(props.agentManagementAPILambda, { + passthroughBehavior: api.PassthroughBehavior.NEVER }); - // deleting (destroying) a deployed use case - deploymentResource.addMethod('DELETE', useCaseManagementAPILambdaIntegration, { - operationName: 'DeleteUseCase', - authorizer: props.deploymentPlatformAuthorizer, + // Create /deployments/agents and /deployments/agents/{agent-id} + const { collectionResource, itemResource } = DeploymentRestApiHelper.createResourceStructure( + deploymentsResource, + 'agents', + 'useCaseId' + ); + + const agentApiContext: DeploymentApiContext = { + scope: this, requestValidator: this.requestValidator, - authorizationType: api.AuthorizationType.CUSTOM, - requestParameters: { - 'method.request.querystring.permanent': false, - 'method.request.header.authorization': true - } + authorizer: props.deploymentPlatformAuthorizer, + integration: agentIntegration + }; + + // Add CRUD operations for Agents and collect resources + const crudResources = DeploymentRestApiHelper.addCrudOperations( + agentApiContext, + collectionResource, + itemResource, + 'Agent', + restApi, + UseCaseDeploymentSchemas.agent + ); + + this.createdResources.push(...crudResources); + } + + /** + * Creates /deployments/workflows API for workflow management + */ + private createWorkflowsPathAPI( + deploymentsResource: api.Resource, + props: DeploymentPlatformRestEndpointProps, + restApi: api.IRestApi + ): void { + const workflowIntegration = new api.LambdaIntegration(props.workflowManagementAPILambda, { + passthroughBehavior: api.PassthroughBehavior.NEVER }); - // Getting information on a deployed use case - deploymentResource.addMethod('GET', useCaseManagementAPILambdaIntegration, { - operationName: 'GetUseCase', - authorizer: props.deploymentPlatformAuthorizer, + // Create /deployments/workflows and /deployments/workflows/{workflow-id} + const { collectionResource, itemResource } = DeploymentRestApiHelper.createResourceStructure( + deploymentsResource, + 'workflows', + 'useCaseId' + ); + + const workflowApiContext: DeploymentApiContext = { + scope: this, requestValidator: this.requestValidator, - authorizationType: api.AuthorizationType.CUSTOM, - requestParameters: { - 'method.request.header.authorization': true - } - }); + authorizer: props.deploymentPlatformAuthorizer, + integration: workflowIntegration + }; + + // Add CRUD operations for Workflows and collect resources + const crudResources = DeploymentRestApiHelper.addCrudOperations( + workflowApiContext, + collectionResource, + itemResource, + 'Workflow', + restApi, + UseCaseDeploymentSchemas.workflow + ); + + this.createdResources.push(...crudResources); } /** - * Creates all API resources and methods for the use case management API + * Creates all API resources and methods for the model info API * @param props * @param restApi */ @@ -243,84 +347,47 @@ export class DeploymentPlatformRestEndpoint extends BaseRestEndpoint { const modelInfoLambdaIntegration = new api.LambdaIntegration(props.modelInfoApiLambda, { passthroughBehavior: api.PassthroughBehavior.NEVER }); + + const modelInfoApiContext: DeploymentApiContext = { + scope: this, + requestValidator: this.requestValidator, + authorizer: props.deploymentPlatformAuthorizer, + integration: modelInfoLambdaIntegration + }; + const modelInfoResource = restApi.root.addResource('model-info'); // Listing the available use case types const useCaseTypesResource = modelInfoResource.addResource('use-case-types'); - - useCaseTypesResource.addCorsPreflight({ - allowOrigins: ['*'], - allowHeaders: ['Content-Type, Access-Control-Allow-Headers, X-Requested-With, Authorization'], - allowMethods: ['GET', 'OPTIONS'] - }); - - useCaseTypesResource.addMethod('GET', modelInfoLambdaIntegration, { - operationName: 'GetUseCaseTypes', - authorizer: props.deploymentPlatformAuthorizer, - authorizationType: api.AuthorizationType.CUSTOM, - requestValidator: this.requestValidator, - requestParameters: { - 'method.request.header.authorization': true - } - }); + DeploymentRestApiHelper.addCustomEndpoint(modelInfoApiContext, useCaseTypesResource, 'GET', 'GetUseCaseTypes'); // Listing available model providers for a given use case const modelInfoByUseCaseResource = modelInfoResource.addResource('{useCaseType}'); const providersResource = modelInfoByUseCaseResource.addResource('providers'); - - providersResource.addCorsPreflight({ - allowOrigins: ['*'], - allowHeaders: ['Content-Type, Access-Control-Allow-Headers, X-Requested-With, Authorization'], - allowMethods: ['GET', 'OPTIONS'] - }); - - providersResource.addMethod('GET', modelInfoLambdaIntegration, { - operationName: 'GetModelProviders', - authorizer: props.deploymentPlatformAuthorizer, - authorizationType: api.AuthorizationType.CUSTOM, - requestValidator: this.requestValidator, - requestParameters: { - 'method.request.header.authorization': true - } - }); + DeploymentRestApiHelper.addCustomEndpoint(modelInfoApiContext, providersResource, 'GET', 'GetModelProviders'); // Getting available models for a given provider/use case const modelsResource = modelInfoByUseCaseResource.addResource('{providerName}'); - - modelsResource.addCorsPreflight({ - allowOrigins: ['*'], - allowHeaders: ['Content-Type, Access-Control-Allow-Headers, X-Requested-With, Authorization'], - allowMethods: ['GET', 'OPTIONS'] - }); - - modelsResource.addMethod('GET', modelInfoLambdaIntegration, { - operationName: 'GetModels', - authorizer: props.deploymentPlatformAuthorizer, - authorizationType: api.AuthorizationType.CUSTOM, - requestValidator: this.requestValidator, - requestParameters: { - 'method.request.header.authorization': true - } - }); + DeploymentRestApiHelper.addCustomEndpoint(modelInfoApiContext, modelsResource, 'GET', 'GetModels'); // Getting model info for a given use case/provider/model const specificModelInfoResource = modelsResource.addResource('{modelId}'); - - specificModelInfoResource.addCorsPreflight({ - allowOrigins: ['*'], - allowHeaders: ['Content-Type, Access-Control-Allow-Headers, X-Requested-With, Authorization'], - allowMethods: ['GET', 'OPTIONS'] - }); - - specificModelInfoResource.addMethod('GET', modelInfoLambdaIntegration, { - operationName: 'GetModelInfo', - authorizer: props.deploymentPlatformAuthorizer, - authorizationType: api.AuthorizationType.CUSTOM, - requestValidator: this.requestValidator, - requestParameters: { - 'method.request.header.authorization': true - } - }); + DeploymentRestApiHelper.addCustomEndpoint( + modelInfoApiContext, + specificModelInfoResource, + 'GET', + 'GetModelInfo' + ); + + // Collect model info resources for suppressions + this.createdResources.push( + modelInfoResource, + useCaseTypesResource, + modelInfoByUseCaseResource, + providersResource, + modelsResource, + specificModelInfoResource + ); } protected addSuppressions(): void { @@ -339,15 +406,9 @@ export class DeploymentPlatformRestEndpoint extends BaseRestEndpoint { } ]); - const resourcePathsToSuppress = [ - 'deployments', - 'deployments/{useCaseId}', - 'model-info', - 'model-info/use-case-types', - 'model-info/{useCaseType}/providers', - 'model-info/{useCaseType}/{providerName}', - 'model-info/{useCaseType}/{providerName}/{modelId}' - ]; + // Extract resource paths from all created resources + const resourcePathsToSuppress = DeploymentRestApiHelper.collectResourcePaths(this.createdResources); + const operationsToSuppress = ['GET', 'POST', 'PATCH', 'DELETE', 'OPTIONS']; resourcePathsToSuppress.forEach((_path) => { operationsToSuppress.forEach((_operation) => { @@ -358,12 +419,14 @@ export class DeploymentPlatformRestEndpoint extends BaseRestEndpoint { [ { id: 'AwsSolutions-COG4', - reason: 'A Custom authorizer must be used in order to authenticate using Cognito user groups' + reason: 'The API uses a custom authorizer instead of Cognito user pool authorizer for authentication' } ], false ); - } catch (error) {} + } catch (error) { + // Ignore if resource doesn't exist + } }); }); diff --git a/source/infrastructure/lib/api/model-schema/deploy-usecase-body.ts b/source/infrastructure/lib/api/model-schema/deploy-usecase-body.ts deleted file mode 100644 index 8c029f59..00000000 --- a/source/infrastructure/lib/api/model-schema/deploy-usecase-body.ts +++ /dev/null @@ -1,739 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -import { JsonSchema, JsonSchemaType, JsonSchemaVersion } from 'aws-cdk-lib/aws-apigateway'; -import { - AUTHENTICATION_PROVIDERS, - BEDROCK_INFERENCE_TYPES, - CHAT_PROVIDERS, - DEFAULT_CONVERSATION_MEMORY_TYPE, - DEFAULT_ENABLE_RBAC, - DEFAULT_KENDRA_EDITION, - DEFAULT_KENDRA_NUMBER_OF_DOCS, - DEFAULT_KENDRA_QUERY_CAPACITY_UNITS, - DEFAULT_KENDRA_STORAGE_CAPACITY_UNITS, - DEFAULT_RETURN_SOURCE_DOCS, - DEFAULT_SCORE_THRESHOLD, - KENDRA_EDITIONS, - KNOWLEDGE_BASE_TYPES, - MAX_KENDRA_NUMBER_OF_DOCS, - MAX_KENDRA_QUERY_CAPACITY_UNITS, - MAX_KENDRA_STORAGE_CAPACITY_UNITS, - MAX_SCORE_THRESHOLD, - MIN_KENDRA_NUMBER_OF_DOCS, - MIN_SCORE_THRESHOLD, - MODEL_PARAM_TYPES, - SUPPORTED_AGENT_TYPES, - SUPPORTED_AUTHENTICATION_PROVIDERS, - SUPPORTED_BEDROCK_INFERENCE_TYPES, - SUPPORTED_CHAT_PROVIDERS, - SUPPORTED_CONVERSATION_MEMORY_TYPES, - SUPPORTED_KNOWLEDGE_BASE_TYPES, - USE_CASE_TYPES -} from '../../utils/constants'; - -export const deployUseCaseBodySchema: JsonSchema = { - schema: JsonSchemaVersion.DRAFT7, - type: JsonSchemaType.OBJECT, - properties: { - UseCaseType: { - type: JsonSchemaType.STRING, - description: 'Type of the use case to be deployed. Either "Text" or "Agent".', - enum: [USE_CASE_TYPES.TEXT, USE_CASE_TYPES.AGENT] - }, - UseCaseName: { - type: JsonSchemaType.STRING, - description: 'Friendly name of the use case to be deployed. For display purposes.' - }, - UseCaseDescription: { - type: JsonSchemaType.STRING, - description: 'Description of the use case to be deployed. For display purposes' - }, - DefaultUserEmail: { - type: JsonSchemaType.STRING, - description: 'Email address of the user who will be created with permissions to use the deployed use-case', - format: 'email' - }, - DeployUI: { - type: JsonSchemaType.BOOLEAN, - description: 'Deploy the CloudFront based UI for the use case', - default: true - }, - FeedbackParams: { - type: JsonSchemaType.OBJECT, - description: 'Parameters for the feedback capability for the use case.', - properties: { - FeedbackEnabled: { - type: JsonSchemaType.BOOLEAN, - description: 'Allow the feedback capability for the use case.', - default: false - } - }, - required: ['FeedbackEnabled'], - additionalProperties: false - }, - ExistingRestApiId: { - type: JsonSchemaType.STRING, - description: 'Rest API ID which will be used to invoke UseCaseDetails (and Feedback, if enabled).' - }, - VpcParams: { - type: JsonSchemaType.OBJECT, - description: - 'Parameters for the use case VPC. VPC can be either created for you, or provided by the user depending on the parameters provided.', - properties: { - VpcEnabled: { - type: JsonSchemaType.BOOLEAN, - description: 'Should the use case stacks resources be deployed within a VPC', - default: false - }, - CreateNewVpc: { - type: JsonSchemaType.BOOLEAN, - description: 'If true, a new VPC will be created for the use case.', - default: false - }, - ExistingVpcId: { - type: JsonSchemaType.STRING, - description: - 'If VpcEnabled is true and CreateNewVpc is false, the use case will be deployed within the specified VPC.', - pattern: '^vpc-\\w{8}(\\w{9})?$' - }, - ExistingPrivateSubnetIds: { - type: JsonSchemaType.ARRAY, - description: - 'If VpcEnabled is true and CreateNewVpc is false, the use case will be deployed using the specified subnets.', - items: { - type: JsonSchemaType.STRING, - pattern: '^subnet-\\w{8}(\\w{9})?$' - }, - maxItems: 16, - uniqueItems: true - }, - ExistingSecurityGroupIds: { - type: JsonSchemaType.ARRAY, - description: - 'If VpcEnabled is true and CreateNewVpc is false, the use case will be deployed using the specified security groups.', - items: { - type: JsonSchemaType.STRING, - pattern: '^sg-\\w{8}(\\w{9})?$' - }, - maxItems: 5, - uniqueItems: true - } - }, - oneOf: [ - // if using an existing VPC, info about existing VPC resources is required - { - properties: { - VpcEnabled: { - type: JsonSchemaType.BOOLEAN, - enum: [true] - }, - CreateNewVpc: { - type: JsonSchemaType.BOOLEAN, - enum: [false] - } - }, - required: ['ExistingVpcId', 'ExistingPrivateSubnetIds', 'ExistingSecurityGroupIds'] - }, - // if creating a new VPC, not allowed to provide existing VPC resource info - { - properties: { - VpcEnabled: { - type: JsonSchemaType.BOOLEAN, - enum: [true] - }, - CreateNewVpc: { - type: JsonSchemaType.BOOLEAN, - enum: [true] - }, - ExistingVpcId: { - not: {} - }, - ExistingPrivateSubnetIds: { - not: {} - }, - ExistingSecurityGroupIds: { - not: {} - } - } - }, - // if VPC is disabled, not allowed to provide existing VPC resource info or request deployment of new VPC - { - properties: { - VpcEnabled: { - type: JsonSchemaType.BOOLEAN, - enum: [false] - }, - CreateNewVpc: { - not: {} - }, - ExistingVpcId: { - not: {} - }, - ExistingPrivateSubnetIds: { - not: {} - }, - ExistingSecurityGroupIds: { - not: {} - } - } - } - ], - required: ['VpcEnabled'], - additionalProperties: false - }, - ConversationMemoryParams: { - type: JsonSchemaType.OBJECT, - description: 'Parameters related to storing and using the chat history', - properties: { - ConversationMemoryType: { - type: JsonSchemaType.STRING, - default: DEFAULT_CONVERSATION_MEMORY_TYPE, - enum: SUPPORTED_CONVERSATION_MEMORY_TYPES - }, - HumanPrefix: { - type: JsonSchemaType.STRING, - description: 'Prefix used in the history when storing messages sent by the user' - }, - AiPrefix: { - type: JsonSchemaType.STRING, - description: 'Prefix used in the history when storing responses from the LLM' - }, - ChatHistoryLength: { - type: JsonSchemaType.INTEGER, - description: 'Number of messages to store in the history', - minimum: 0 - } - }, - additionalProperties: false - }, - KnowledgeBaseParams: { - type: JsonSchemaType.OBJECT, - description: - 'Parameters related to the knowledge base. Based on KnowledgeBaseType, different nested parameters are required.', - properties: { - KnowledgeBaseType: { - type: JsonSchemaType.STRING, - description: 'The type of knowledge base to use. Required.', - default: KNOWLEDGE_BASE_TYPES.KENDRA, - enum: SUPPORTED_KNOWLEDGE_BASE_TYPES - }, - NoDocsFoundResponse: { - type: JsonSchemaType.STRING, - description: 'Response text message to use when the knowledge base does not return any documents', - minLength: 1 - }, - KendraKnowledgeBaseParams: { - type: JsonSchemaType.OBJECT, - description: 'Parameters specific to Kendra', - properties: { - ExistingKendraIndexId: { - type: JsonSchemaType.STRING, - description: - 'Index ID of an existing Kendra index to be used for the use case. Required if KendraIndexName is not provided.', - pattern: '^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$' - }, - KendraIndexName: { - type: JsonSchemaType.STRING, - description: - 'Name of the new Kendra index to be created, if provided. Required if ExistingKendraIndexId is not provided.', - pattern: '^[0-9a-zA-Z-]{1,64}$' - }, - QueryCapacityUnits: { - type: JsonSchemaType.INTEGER, - description: - 'Number of additional query capacity units to provision for the new Kendra index to be created. Can only be provided if if ExistingKendraIndexId is not provided.', - default: DEFAULT_KENDRA_QUERY_CAPACITY_UNITS, - minimum: 0, - maximum: MAX_KENDRA_QUERY_CAPACITY_UNITS - }, - StorageCapacityUnits: { - type: JsonSchemaType.INTEGER, - description: - 'Number of additional storage capacity units to provision for the new Kendra index to be created. Can only be provided if if ExistingKendraIndexId is not provided.', - default: DEFAULT_KENDRA_STORAGE_CAPACITY_UNITS, - minimum: 0, - maximum: MAX_KENDRA_STORAGE_CAPACITY_UNITS - }, - KendraIndexEdition: { - type: JsonSchemaType.STRING, - description: - 'Edition of the Kendra index to be created. Can only be provided if if ExistingKendraIndexId is not provided.', - enum: KENDRA_EDITIONS, - default: DEFAULT_KENDRA_EDITION - }, - AttributeFilter: { - type: JsonSchemaType.OBJECT, - description: - 'Filter to apply when querying the Kendra index. See: https://docs.aws.amazon.com/kendra/latest/APIReference/API_AttributeFilter.html' - }, - RoleBasedAccessControlEnabled: { - type: JsonSchemaType.BOOLEAN, - description: - 'Whether role-based access control is enabled on the Kendra index, used to restrict Kendra queries to documents accessible by user group and id.', - default: DEFAULT_ENABLE_RBAC - } - }, - // If providing KendraKnowledgeBaseParams, either we provide only a Kendra index id or we provide the parameters to create one. - oneOf: [ - { - required: ['ExistingKendraIndexId'], - properties: { - KendraIndexName: { - not: {} - }, - QueryCapacityUnits: { - not: {} - }, - StorageCapacityUnits: { - not: {} - }, - KendraIndexEdition: { - not: {} - } - } - }, - { - required: ['KendraIndexName'], - properties: { - ExistingKendraIndexId: { - not: {} - } - } - } - ], - additionalProperties: false - }, - BedrockKnowledgeBaseParams: { - type: JsonSchemaType.OBJECT, - description: 'Parameters specific to Bedrock Knowledge Bases', - properties: { - BedrockKnowledgeBaseId: { - type: JsonSchemaType.STRING, - description: - 'ID of the Bedrock knowledge base to use in a RAG use case. Required if KnowledgeBaseType is Bedrock.', - pattern: '^[0-9a-zA-Z]{1,10}$' - }, - RetrievalFilter: { - type: JsonSchemaType.OBJECT, - description: - 'Filter to apply when querying the Bedrock knowledge base. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_RetrievalFilter.html' - }, - OverrideSearchType: { - type: JsonSchemaType.STRING, - description: - "If you're using an Amazon OpenSearch Serverless vector store that contains a filterable text field, you can specify whether to query the knowledge base with a HYBRID search using both vector embeddings and raw text, or SEMANTIC search using only vector embeddings. By default (if this is not provided), Amazon Bedrock will choose for you. For other vector store types, passing this parameter will result in a validation error during retrieval. For more information, see https://docs.aws.amazon.com/bedrock/latest/userguide/kb-test-config.html", - enum: ['HYBRID', 'SEMANTIC', 'NONE'], - default: 'NONE' - } - }, - required: ['BedrockKnowledgeBaseId'], - additionalProperties: false - }, - NumberOfDocs: { - type: JsonSchemaType.INTEGER, - description: - 'The number of documents returned from the knowledge base which will be used as context to be sent to the LLM', - default: DEFAULT_KENDRA_NUMBER_OF_DOCS, - minimum: MIN_KENDRA_NUMBER_OF_DOCS, - maximum: MAX_KENDRA_NUMBER_OF_DOCS - }, - ScoreThreshold: { - type: JsonSchemaType.NUMBER, - description: 'The minimum score a document must have to be returned from the knowledge base', - default: DEFAULT_SCORE_THRESHOLD, - minimum: MIN_SCORE_THRESHOLD, - maximum: MAX_SCORE_THRESHOLD - }, - ReturnSourceDocs: { - type: JsonSchemaType.BOOLEAN, - description: - 'Whether to return information about the source of documents returned from the knowledge base', - default: DEFAULT_RETURN_SOURCE_DOCS - } - }, - // Only the parameters for the selected KnowledgeBaseType can be provided - oneOf: [ - { - properties: { - KnowledgeBaseType: { enum: [KNOWLEDGE_BASE_TYPES.KENDRA] }, - BedrockKnowledgeBaseParams: { - not: {} - } - }, - required: ['KendraKnowledgeBaseParams'] - }, - { - properties: { - KnowledgeBaseType: { enum: [KNOWLEDGE_BASE_TYPES.BEDROCK] }, - KendraKnowledgeBaseParams: { - not: {} - } - }, - required: ['BedrockKnowledgeBaseParams'] - } - ], - required: ['KnowledgeBaseType'], - additionalProperties: false - }, - AuthenticationParams: { - type: JsonSchemaType.OBJECT, - description: 'Parameters related to the Authentication.', - properties: { - AuthenticationProvider: { - type: JsonSchemaType.STRING, - description: 'Supported authentication provider.', - enum: SUPPORTED_AUTHENTICATION_PROVIDERS - }, - CognitoParams: { - type: JsonSchemaType.OBJECT, - description: 'Cognito user pool related parameters.', - properties: { - ExistingUserPoolId: { - type: JsonSchemaType.STRING, - description: 'Existing Cognito User Pool Id.', - pattern: '^[\\w-]+_[0-9a-zA-Z]+$', - minLength: 1, - maxLength: 55 - }, - ExistingUserPoolClientId: { - type: JsonSchemaType.STRING, - description: 'Existing Cognito User Pool Client Id.', - pattern: '^[\\w+]+$', - minLength: 1, - maxLength: 128 - } - }, - required: ['ExistingUserPoolId'] - } - }, - anyOf: [ - { - properties: { - AuthenticationProvider: { enum: [AUTHENTICATION_PROVIDERS.COGNITO] } - }, - required: ['CognitoParams'] - } - ], - required: ['AuthenticationProvider'] - }, - LlmParams: { - type: JsonSchemaType.OBJECT, - description: 'Parameters related to the LLM performing inferences.', - properties: { - ModelProvider: { - type: JsonSchemaType.STRING, - description: 'Name of the LLM provider which the use case will use', - enum: SUPPORTED_CHAT_PROVIDERS - }, - BedrockLlmParams: { - type: JsonSchemaType.OBJECT, - description: `Parameters specific to use cases using Bedrock as an LLM provider. Can only be provided when "ModelProvider" is ${CHAT_PROVIDERS.BEDROCK}`, - properties: { - ModelId: { - type: JsonSchemaType.STRING, - description: - 'Depending on whether ModelArn is provided, this will either be used to select the on-demand model to invoke or be used to specify the base model that the selected provisioned/custom model is based on.', - pattern: - '^([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$' - }, - ModelArn: { - type: JsonSchemaType.STRING, - description: - 'ARN of the provisioned/custom model to use from Amazon Bedrock. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModel.html#API_runtime_InvokeModel_RequestSyntax', - pattern: - '^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-:]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))$' - }, - InferenceProfileId: { - type: JsonSchemaType.STRING, - description: - 'The identifier of the Bedrock inference profile to use when invoking the model. When provided, a ModelId and ModelArn should not be provided. All inference requests will be mapped to the specified inference profile, which can be configured in the Bedrock console. This enables cross region model invocation. See: https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-use.html', - pattern: '^[a-zA-Z0-9-:.]+$' - }, - GuardrailIdentifier: { - type: JsonSchemaType.STRING, - description: - "The unique identifier of the Bedrock guardrail that you want to be applied to all LLM invocations. If you don't provide a value, no guardrail is applied to the invocation. If provided, you must also provide a GuardrailVersion. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModel.html#API_runtime_InvokeModel_RequestSyntax", - pattern: - '^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$' - }, - GuardrailVersion: { - type: JsonSchemaType.STRING, - description: - 'Version of the guardrail to be used. Must be provided if GuardrailIdentifier is provided. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModel.html#API_runtime_InvokeModel_RequestSyntax', - pattern: '^(([1-9][0-9]{0,7})|(DRAFT))$' - }, - BedrockInferenceType: { - type: JsonSchemaType.STRING, - description: 'The type of Bedrock inference to use. Required for Bedrock LLM params.', - default: BEDROCK_INFERENCE_TYPES.QUICK_START, - enum: SUPPORTED_BEDROCK_INFERENCE_TYPES - }, - }, - required: ['BedrockInferenceType'], - allOf: [ - // Conditional requirements based on BedrockInferenceType - { - oneOf: [ - { - properties: { - BedrockInferenceType: { enum: [BEDROCK_INFERENCE_TYPES.QUICK_START, BEDROCK_INFERENCE_TYPES.OTHER_FOUNDATION] }, - InferenceProfileId: { - not: {} - } - }, - required: ['ModelId'] - }, - { - properties: { - BedrockInferenceType: { enum: [BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILE] }, - ModelId: { - not: {} - } - }, - required: ['InferenceProfileId'] - }, - { - properties: { - BedrockInferenceType: { enum: [BEDROCK_INFERENCE_TYPES.PROVISIONED] }, - }, - required: ['ModelArn'] - } - ] - }, - // either provide both guardrail params or neither - { - oneOf: [ - { - required: ['GuardrailIdentifier', 'GuardrailVersion'] - }, - { - properties: { - GuardrailIdentifier: { - not: {} - }, - GuardrailVersion: { - not: {} - } - } - } - ] - } - ], - additionalProperties: false - }, - SageMakerLlmParams: { - type: JsonSchemaType.OBJECT, - description: `Parameters specific to use cases using a SageMaker model as an LLM provider. Can only be provided when "ModelProvider" is ${CHAT_PROVIDERS.SAGEMAKER}`, - properties: { - EndpointName: { - type: JsonSchemaType.STRING, - description: 'Endpoint for the deployed model to use from SageMaker', - pattern: '^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$' - }, - ModelInputPayloadSchema: { - type: JsonSchemaType.OBJECT, - description: - 'An object defining the schema to be used to populate model params for SageMaker endpoint models' - }, - ModelOutputJSONPath: { - type: JsonSchemaType.STRING, - description: - 'JSON path where the response should be retrieved from the model output payload. Applicable only to SageMaker endpoints.', - pattern: '^\\$[\\w\\.\\,\\[\\]:\\\'\\"\\-\\(\\)\\*\\?\\@]*$' - } - }, - required: ['EndpointName', 'ModelInputPayloadSchema', 'ModelOutputJSONPath'], - additionalProperties: false - }, - ModelParams: { - type: JsonSchemaType.OBJECT, - description: - 'Additional model params to be passed to the model, whose keys are as defined in the LLM documentation', - additionalProperties: { - type: JsonSchemaType.OBJECT, - properties: { - Value: { - type: JsonSchemaType.STRING, - description: 'Value of the param' - }, - Type: { - type: JsonSchemaType.STRING, - enum: MODEL_PARAM_TYPES, - description: - 'Python type of the param, as a string. Will be cast to this type before being fed to LLM.' - } - }, - required: ['Value', 'Type'] - } - }, - PromptParams: { - type: JsonSchemaType.OBJECT, - description: 'Parameters related to the prompt(s) used by the use case', - properties: { - PromptTemplate: { - type: JsonSchemaType.STRING, - description: - 'Default prompt template which will be fed to the LLM, barring any overrides by users' - }, - UserPromptEditingEnabled: { - type: JsonSchemaType.BOOLEAN, - description: 'Whether to allow the user of the use case to edit their own prompt', - default: true - }, - MaxPromptTemplateLength: { - type: JsonSchemaType.INTEGER, - description: - 'Maximum length (in characters) of the system prompt template that a user can use in the use case', - minimum: 0 - }, - MaxInputTextLength: { - type: JsonSchemaType.INTEGER, - description: - 'Maximum length (in characters) of the input text that can be sent to the LLM.', - minimum: 1 - }, - RephraseQuestion: { - type: JsonSchemaType.BOOLEAN, - description: - 'Whether to use the disambiguated query instead of the original user input in the final prompt. Only appluies when using RAG.', - default: true - }, - DisambiguationPromptTemplate: { - type: JsonSchemaType.STRING, - description: - 'Prompt which will be internally used to disambiguate new queries in combination with the chat history. Only applies when using RAG.' - }, - DisambiguationEnabled: { - type: JsonSchemaType.BOOLEAN, - description: - 'Whether to perform disambiguation for the use case. Only applies when using RAG.', - default: true - } - }, - additionalProperties: false - }, - Temperature: { - type: JsonSchemaType.NUMBER, - description: - 'Temperature value which will be fed to the LLM. Scale should be chosen based on the supported range of the model provider.', - default: 0, - minimum: 0, - maximum: 100 - }, - Streaming: { - type: JsonSchemaType.BOOLEAN, - description: - 'Whether to stream the LLM responses back to the user or not. Note some providers do not support streaming.' - }, - RAGEnabled: { - type: JsonSchemaType.BOOLEAN, - description: - 'If true, the use case will reference a knowledge base when responding to the user. Otherwise provides chat with the LLM directly.', - default: true - }, - Verbose: { - type: JsonSchemaType.BOOLEAN, - description: 'Whether to print out debug messages to the console', - default: false - } - }, - oneOf: [ - { - properties: { - ModelProvider: { enum: [CHAT_PROVIDERS.BEDROCK] } - }, - required: ['BedrockLlmParams'] - }, - { - properties: { - ModelProvider: { enum: [CHAT_PROVIDERS.SAGEMAKER] } - }, - required: ['SageMakerLlmParams'] - } - ], - additionalProperties: false - }, - AgentParams: { - type: JsonSchemaType.OBJECT, - description: 'Parameters for Bedrock agent invocation workflow.', - properties: { - AgentType: { - type: JsonSchemaType.STRING, - description: 'The type of agent to use. Required.', - enum: SUPPORTED_AGENT_TYPES - }, - BedrockAgentParams: { - type: JsonSchemaType.OBJECT, - properties: { - AgentId: { - type: JsonSchemaType.STRING, - description: 'ID of the Bedrock agent to be invoked.', - pattern: '^[0-9a-zA-Z]+$', - maxLength: 10 - }, - AgentAliasId: { - type: JsonSchemaType.STRING, - description: 'Alias ID of the Bedrock agent to be invoked.', - pattern: '^[0-9a-zA-Z]+$', - maxLength: 10 - }, - EnableTrace: { - type: JsonSchemaType.BOOLEAN, - description: 'Whether to enable tracing for the agent invocation.', - default: false - } - }, - required: ['AgentId', 'AgentAliasId', 'EnableTrace'], - additionalProperties: false - } - }, - required: ['AgentType'], - additionalProperties: false - } - }, - required: ['UseCaseType', 'UseCaseName'], - oneOf: [ - { - // Text-based use case - properties: { - UseCaseType: { enum: [USE_CASE_TYPES.TEXT] } - }, - required: ['LlmParams'], - oneOf: [ - { - // Non-RAG case - properties: { - LlmParams: { - properties: { - RAGEnabled: { enum: [false] } - } - }, - KnowledgeBaseParams: { 'not': {} } - } - }, - { - // RAG-enabled case - properties: { - LlmParams: { - properties: { - RAGEnabled: { enum: [true] } - } - } - }, - required: ['KnowledgeBaseParams'] - } - ] - }, - { - // Agent-based use case - properties: { - UseCaseType: { enum: [USE_CASE_TYPES.AGENT] }, - KnowledgeBaseParams: { 'not': {} }, - LlmParams: { 'not': {} } - }, - required: ['AgentParams'] - } - ], - additionalProperties: false -}; diff --git a/source/infrastructure/lib/api/model-schema/deployments/agents/deploy-agent-usecase-body.ts b/source/infrastructure/lib/api/model-schema/deployments/agents/deploy-agent-usecase-body.ts new file mode 100644 index 00000000..eea9aeaa --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/agents/deploy-agent-usecase-body.ts @@ -0,0 +1,25 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; +import { USE_CASE_TYPES } from '../../../../utils/constants'; +import { deployUseCaseProperties } from '../base-usecase-schema'; +import { llmParamsSchema } from '../../shared/llm-params'; +import { agentCoreParams } from './params/agent-core-params'; + +export const deployAgentUseCaseBodySchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: 'Schema for deploying an agent use case', + properties: { + ...deployUseCaseProperties, + UseCaseType: { + type: JsonSchemaType.STRING, + description: 'Type of the use case to be deployed. Must be "AgentBuilder" for agent deployments.', + enum: [USE_CASE_TYPES.AGENT_BUILDER] + }, + LlmParams: llmParamsSchema, + AgentParams: agentCoreParams + }, + required: ['UseCaseName', 'UseCaseType', 'LlmParams', 'AgentParams'], + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/deployments/agents/deploy-agent-usecase-response.ts b/source/infrastructure/lib/api/model-schema/deployments/agents/deploy-agent-usecase-response.ts new file mode 100644 index 00000000..f48cfb10 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/agents/deploy-agent-usecase-response.ts @@ -0,0 +1,14 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; + +export const deployAgentUseCaseResponseSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + properties: { + useCaseId: { + type: JsonSchemaType.STRING, + description: 'Unique identifier for the deployed agent use case' + } + } +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/deployments/agents/params/agent-core-params.ts b/source/infrastructure/lib/api/model-schema/deployments/agents/params/agent-core-params.ts new file mode 100644 index 00000000..53fb6321 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/agents/params/agent-core-params.ts @@ -0,0 +1,77 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; +import { AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH } from '../../../../../utils/constants'; +import { agentMemoryParams } from '../../../shared/agent-memory-params'; + +export const agentCoreParams: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: 'Core agent configuration parameters', + properties: { + SystemPrompt: { + type: JsonSchemaType.STRING, + description: 'System prompt template for the agent', + minLength: 1, + maxLength: AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH + }, + MCPServers: { + type: JsonSchemaType.ARRAY, + description: 'MCP servers to integrate with the agent (no AWS service limits)', + items: { + type: JsonSchemaType.OBJECT, + properties: { + UseCaseId: { + type: JsonSchemaType.STRING, + description: 'MCP server deployment use case ID', + minLength: 1 + }, + UseCaseName: { + type: JsonSchemaType.STRING, + description: 'Human-readable name of the MCP server', + minLength: 1 + }, + Url: { + type: JsonSchemaType.STRING, + description: 'MCP server endpoint URL', + minLength: 1 + }, + Type: { + type: JsonSchemaType.STRING, + description: 'MCP server type', + enum: ['gateway', 'runtime'] + } + }, + required: ['UseCaseId', 'UseCaseName', 'Url', 'Type'], + additionalProperties: false + } + }, + Tools: { + type: JsonSchemaType.ARRAY, + description: 'Built-in Strands tools for the agent (no AWS service limits)', + items: { + type: JsonSchemaType.OBJECT, + properties: { + ToolId: { + type: JsonSchemaType.STRING, + description: 'Tool identifier', + minLength: 1 + } + }, + required: ['ToolId'], + additionalProperties: false + } + }, + MemoryConfig: agentMemoryParams + }, + required: ['SystemPrompt'], + additionalProperties: false +}; + +//exclude `required` property at top level to allowa for partial patch updates +export const agentCoreParamsUpdateSchema: JsonSchema = { + type: agentCoreParams.type, + description: agentCoreParams.description, + properties: agentCoreParams.properties, + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/deployments/agents/update-agent-usecase-body.ts b/source/infrastructure/lib/api/model-schema/deployments/agents/update-agent-usecase-body.ts new file mode 100644 index 00000000..0d36b74a --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/agents/update-agent-usecase-body.ts @@ -0,0 +1,24 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; +import { updateUseCaseProperties } from '../base-usecase-schema'; +import { llmParamsUpdateSchema } from '../../shared/llm-params'; +import { agentCoreParamsUpdateSchema } from './params/agent-core-params'; +import { USE_CASE_TYPES } from '../../../../utils/constants'; + +export const updateAgentUseCaseBodySchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: 'Schema for updating an agent use case', + properties: { + ...updateUseCaseProperties, + UseCaseType: { + type: JsonSchemaType.STRING, + description: 'Type of the use case to be deployed. Must be "AgentBuilder" for agent deployments.', + enum: [USE_CASE_TYPES.AGENT_BUILDER] + }, + LlmParams: llmParamsUpdateSchema, + AgentParams: agentCoreParamsUpdateSchema + }, + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/deployments/agents/update-agent-usecase-response.ts b/source/infrastructure/lib/api/model-schema/deployments/agents/update-agent-usecase-response.ts new file mode 100644 index 00000000..0e39a464 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/agents/update-agent-usecase-response.ts @@ -0,0 +1,14 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; + +export const updateAgentUseCaseResponseSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + properties: { + useCaseId: { + type: JsonSchemaType.STRING, + description: 'Unique identifier for the updated agent use case' + } + } +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/deployments/base-usecase-schema.ts b/source/infrastructure/lib/api/model-schema/deployments/base-usecase-schema.ts new file mode 100644 index 00000000..0e4f6689 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/base-usecase-schema.ts @@ -0,0 +1,70 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; +import { vpcParamsCreateSchema, vpcParamsUpdateSchema } from '../shared/vpc-params'; +import { authenticationParamsSchema } from '../shared/auth-params'; + +/** + * Base schema definitions for use case deployment and update operations. + * This file contains common properties shared between deploy and update schemas. + */ + +// Common properties shared between deploy and update use case operations +export const commonUseCaseProperties = { + UseCaseDescription: { + type: JsonSchemaType.STRING, + description: 'Description of the use case to be deployed. For display purposes' + }, + DefaultUserEmail: { + type: JsonSchemaType.STRING, + description: 'Email address of the user who will be created with permissions to use the deployed use-case', + format: 'email' + }, + DeployUI: { + type: JsonSchemaType.BOOLEAN, + description: 'Deploy the CloudFront based UI for the use case', + default: true + }, + FeedbackParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters for the feedback capability for the use case.', + properties: { + FeedbackEnabled: { + type: JsonSchemaType.BOOLEAN, + description: 'Allow the feedback capability for the use case.', + default: false + } + }, + required: ['FeedbackEnabled'], + additionalProperties: false + }, + ExistingRestApiId: { + type: JsonSchemaType.STRING, + description: 'Rest API ID which will be used to invoke UseCaseDetails (and Feedback, if enabled).' + }, + ProvisionedConcurrencyValue: { + type: JsonSchemaType.INTEGER, + description: 'Number of execution environments to keep warm. Set to 0 to disable provisioned concurrency, or 1-5 to enable.', + minimum: 0, + maximum: 5, + default: 0 + }, + AuthenticationParams: authenticationParamsSchema +} + +// Properties specific to deploying a new use case (includes UseCaseName which is required for new deployments) +export const deployUseCaseProperties = { + ...commonUseCaseProperties, + UseCaseName: { + type: JsonSchemaType.STRING, + description: 'Friendly name of the use case to be deployed. For display purposes.' + }, + VpcParams: vpcParamsCreateSchema +}; + +// Properties for updating an existing use case (excludes UseCaseName since it cannot be changed) +export const updateUseCaseProperties = { + ...commonUseCaseProperties, + VpcParams: vpcParamsUpdateSchema +} diff --git a/source/infrastructure/lib/api/model-schema/deployments/deploy-usecase-body.ts b/source/infrastructure/lib/api/model-schema/deployments/deploy-usecase-body.ts new file mode 100644 index 00000000..28409f4f --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/deploy-usecase-body.ts @@ -0,0 +1,139 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType, JsonSchemaVersion } from 'aws-cdk-lib/aws-apigateway'; +import { + DEFAULT_CONVERSATION_MEMORY_TYPE, + SUPPORTED_AGENT_TYPES, + SUPPORTED_CONVERSATION_MEMORY_TYPES, + USE_CASE_TYPES +} from '../../../utils/constants'; +import { deployUseCaseProperties } from './base-usecase-schema'; +import { llmParamsSchema } from '../shared/llm-params'; +import { knowledgeBaseParamsSchema } from '../shared/knowledge-base-params'; + +/** + * JSON Schema for deploying a new use case via the REST API. + * This schema validates the request body for POST /deployments operations. + * Supports both Text-based and Agent-based use cases with conditional validation. + */ +export const deployUseCaseBodySchema: JsonSchema = { + schema: JsonSchemaVersion.DRAFT7, + type: JsonSchemaType.OBJECT, + properties: { + ...deployUseCaseProperties, + UseCaseType: { + type: JsonSchemaType.STRING, + description: 'Type of the use case to be deployed. Either "Text" or "Agent".', + enum: [USE_CASE_TYPES.TEXT, USE_CASE_TYPES.AGENT] + }, + ConversationMemoryParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters related to storing and using the chat history', + properties: { + ConversationMemoryType: { + type: JsonSchemaType.STRING, + default: DEFAULT_CONVERSATION_MEMORY_TYPE, + enum: SUPPORTED_CONVERSATION_MEMORY_TYPES + }, + HumanPrefix: { + type: JsonSchemaType.STRING, + description: 'Prefix used in the history when storing messages sent by the user' + }, + AiPrefix: { + type: JsonSchemaType.STRING, + description: 'Prefix used in the history when storing responses from the LLM' + }, + ChatHistoryLength: { + type: JsonSchemaType.INTEGER, + description: 'Number of messages to store in the history', + minimum: 0 + } + }, + additionalProperties: false + }, + KnowledgeBaseParams: knowledgeBaseParamsSchema, + LlmParams: llmParamsSchema, + AgentParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters for Bedrock agent invocation workflow.', + properties: { + AgentType: { + type: JsonSchemaType.STRING, + description: 'The type of agent to use. Required.', + enum: SUPPORTED_AGENT_TYPES + }, + BedrockAgentParams: { + type: JsonSchemaType.OBJECT, + properties: { + AgentId: { + type: JsonSchemaType.STRING, + description: 'ID of the Bedrock agent to be invoked.', + pattern: '^[0-9a-zA-Z]+$', + maxLength: 10 + }, + AgentAliasId: { + type: JsonSchemaType.STRING, + description: 'Alias ID of the Bedrock agent to be invoked.', + pattern: '^[0-9a-zA-Z]+$', + maxLength: 10 + }, + EnableTrace: { + type: JsonSchemaType.BOOLEAN, + description: 'Whether to enable tracing for the agent invocation.', + default: false + } + }, + required: ['AgentId', 'AgentAliasId', 'EnableTrace'], + additionalProperties: false + } + }, + required: ['AgentType'], + additionalProperties: false + } + }, + oneOf: [ + { + // Text-based use case + properties: { + UseCaseType: { enum: [USE_CASE_TYPES.TEXT] } + }, + required: ['LlmParams'], + oneOf: [ + { + // Non-RAG case + properties: { + LlmParams: { + properties: { + RAGEnabled: { enum: [false] } + } + }, + KnowledgeBaseParams: { 'not': {} } + } + }, + { + // RAG-enabled case + properties: { + LlmParams: { + properties: { + RAGEnabled: { enum: [true] } + } + } + }, + required: ['KnowledgeBaseParams'] + } + ] + }, + { + // Agent-based use case + properties: { + UseCaseType: { enum: [USE_CASE_TYPES.AGENT] }, + KnowledgeBaseParams: { 'not': {} }, + LlmParams: { 'not': {} } + }, + required: ['AgentParams'] + } + ], + required: ['UseCaseType', 'UseCaseName'], + additionalProperties: false +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/deploy-usecase-response.ts b/source/infrastructure/lib/api/model-schema/deployments/deploy-usecase-response.ts similarity index 72% rename from source/infrastructure/lib/api/model-schema/deploy-usecase-response.ts rename to source/infrastructure/lib/api/model-schema/deployments/deploy-usecase-response.ts index 4ea2d49e..d25cb192 100644 --- a/source/infrastructure/lib/api/model-schema/deploy-usecase-response.ts +++ b/source/infrastructure/lib/api/model-schema/deployments/deploy-usecase-response.ts @@ -3,6 +3,10 @@ import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; +/** + * JSON Schema for the response returned after successfully deploying a new use case. + * Contains the unique identifier for the newly created use case. + */ export const deployUseCaseResponseSchema: JsonSchema = { type: JsonSchemaType.OBJECT, properties: { @@ -11,4 +15,4 @@ export const deployUseCaseResponseSchema: JsonSchema = { description: 'ID of the use case that was created' } } -}; +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/deployments/mcp/deploy-mcp-usecase-body.ts b/source/infrastructure/lib/api/model-schema/deployments/mcp/deploy-mcp-usecase-body.ts new file mode 100644 index 00000000..ff52261a --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/mcp/deploy-mcp-usecase-body.ts @@ -0,0 +1,85 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType, JsonSchemaVersion } from 'aws-cdk-lib/aws-apigateway'; +import { USE_CASE_TYPES, MCP_RUNTIME_ENV_VARS_MAX_COUNT, ECR_URI_PATTERN } from '../../../../utils/constants'; +import { gatewayParams } from './params/mcp-gateway-params'; +import { deployUseCaseProperties } from '../base-usecase-schema'; + +/** + * JSON Schema for deploying a new MCP use case via the REST API. + * This schema validates the request body for POST /deployments operations for MCP servers. + * Supports both Gateway-type and Runtime-type MCP servers with conditional validation. + */ +export const deployMcpUseCaseBodySchema: JsonSchema = { + schema: JsonSchemaVersion.DRAFT7, + type: JsonSchemaType.OBJECT, + properties: { + ...deployUseCaseProperties, + UseCaseType: { + type: JsonSchemaType.STRING, + description: 'Type of the use case to be deployed. Must be "MCPServer" for MCP deployments.', + enum: [USE_CASE_TYPES.MCP_SERVER] + }, + MCPParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters for MCP server configuration.', + properties: { + GatewayParams: gatewayParams, + RuntimeParams: { + type: JsonSchemaType.OBJECT, + description: 'Runtime-specific configuration (required for RUNTIME type)', + properties: { + EcrUri: { + type: JsonSchemaType.STRING, + description: 'ECR Docker image URI for the MCP server runtime', + pattern: ECR_URI_PATTERN + }, + EnvironmentVariables: { + type: JsonSchemaType.OBJECT, + description: 'Environment variables to pass to the MCP server runtime container', + maxProperties: MCP_RUNTIME_ENV_VARS_MAX_COUNT, + patternProperties: { + '^[a-zA-Z_][a-zA-Z0-9_]*$': { + type: JsonSchemaType.STRING, + description: 'Environment variable value' + } + }, + additionalProperties: false + } + }, + required: ['EcrUri'], + additionalProperties: false + } + }, + additionalProperties: false + } + }, + // Conditional validation based on MCP server type + oneOf: [ + { + // Gateway-type MCP server + properties: { + MCPParams: { + properties: { + RuntimeParams: { not: {} } + }, + required: ['GatewayParams'] + } + } + }, + { + // Runtime-type MCP server + properties: { + MCPParams: { + properties: { + GatewayParams: { not: {} } + }, + required: ['RuntimeParams'] + } + } + } + ], + required: ['UseCaseName', 'UseCaseType', 'MCPParams'], + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/deployments/mcp/deploy-mcp-usecase-response.ts b/source/infrastructure/lib/api/model-schema/deployments/mcp/deploy-mcp-usecase-response.ts new file mode 100644 index 00000000..9183502d --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/mcp/deploy-mcp-usecase-response.ts @@ -0,0 +1,20 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; + +/** + * JSON Schema for the response returned after successfully deploying a new MCP use case. + * Contains the unique identifier for the newly created MCP use case. + */ +export const deployMcpUseCaseResponseSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + properties: { + useCaseId: { + type: JsonSchemaType.STRING, + description: 'ID of the MCP use case that was created' + } + }, + required: ['useCaseId'], + additionalProperties: false +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/deployments/mcp/params/mcp-gateway-params.ts b/source/infrastructure/lib/api/model-schema/deployments/mcp/params/mcp-gateway-params.ts new file mode 100644 index 00000000..590f2d0c --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/mcp/params/mcp-gateway-params.ts @@ -0,0 +1,250 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; +import { + MCP_GATEWAY_MAX_TARGETS_PER_GATEWAY, + MCP_GATEWAY_TARGET_NAME_MAX_LENGTH, + MCP_GATEWAY_TARGET_DESCRIPTION_MAX_LENGTH, + MCP_GATEWAY_TARGET_NAME_PATTERN, + MCP_GATEWAY_AUTH_TYPES, + MCP_GATEWAY_TARGET_TYPES, + LAMBDA_ARN_PATTERN, + BEDROCK_AGENTCORE_OAUTH_ARN_PATTERN, + BEDROCK_AGENTCORE_API_KEY_ARN_PATTERN, + MCP_SCHEMA_KEY_PATTERN, + OAUTH_SCOPE_MAX_LENGTH, + OAUTH_SCOPES_MAX_COUNT, + OAUTH_CUSTOM_PARAM_KEY_MAX_LENGTH, + OAUTH_CUSTOM_PARAM_VALUE_MAX_LENGTH, + OAUTH_CUSTOM_PARAMS_MAX_COUNT, + API_KEY_PARAM_NAME_MAX_LENGTH, + API_KEY_PREFIX_MAX_LENGTH +} from '../../../../../utils/constants'; + +/** + * MCP Gateway parameter schemas for use case deployments and updates. + * Supports Gateway-type MCP servers with Lambda, OpenAPI, and Smithy targets. + */ + +// Additional config parameters schema for OAuth and API Key authentication +const additionalConfigParamsSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: 'Additional configuration parameters for authentication', + properties: { + OAuthAdditionalConfig: { + type: JsonSchemaType.OBJECT, + description: 'Additional OAuth configuration', + properties: { + scopes: { + type: JsonSchemaType.ARRAY, + description: 'OAuth scopes', + maxItems: OAUTH_SCOPES_MAX_COUNT, + items: { + type: JsonSchemaType.STRING, + maxLength: OAUTH_SCOPE_MAX_LENGTH + } + }, + customParameters: { + type: JsonSchemaType.ARRAY, + description: 'Custom OAuth parameters', + maxItems: OAUTH_CUSTOM_PARAMS_MAX_COUNT, + items: { + type: JsonSchemaType.OBJECT, + properties: { + key: { + type: JsonSchemaType.STRING, + description: 'Parameter key', + maxLength: OAUTH_CUSTOM_PARAM_KEY_MAX_LENGTH + }, + value: { + type: JsonSchemaType.STRING, + description: 'Parameter value', + maxLength: OAUTH_CUSTOM_PARAM_VALUE_MAX_LENGTH + } + }, + additionalProperties: false + } + } + }, + additionalProperties: false + }, + ApiKeyAdditionalConfig: { + type: JsonSchemaType.OBJECT, + description: 'Additional API Key configuration', + properties: { + location: { + type: JsonSchemaType.STRING, + description: 'Location of the API key', + enum: ['HEADER', 'QUERY_PARAMETER'] + }, + parameterName: { + type: JsonSchemaType.STRING, + description: 'Name of the parameter containing the API key', + maxLength: API_KEY_PARAM_NAME_MAX_LENGTH + }, + prefix: { + type: JsonSchemaType.STRING, + description: 'Prefix for the API key value', + maxLength: API_KEY_PREFIX_MAX_LENGTH + } + }, + additionalProperties: false + } + }, + additionalProperties: false +}; + +// Outbound authentication parameters schema +const outboundAuthParamsSchema: JsonSchema = { + oneOf: [ + { + type: JsonSchemaType.OBJECT, + description: 'OAuth authentication configuration', + properties: { + OutboundAuthProviderType: { + type: JsonSchemaType.STRING, + enum: ['OAUTH'] + }, + OutboundAuthProviderArn: { + type: JsonSchemaType.STRING, + description: 'ARN of the OAuth authentication provider', + pattern: BEDROCK_AGENTCORE_OAUTH_ARN_PATTERN, + minLength: 1 + }, + AdditionalConfigParams: additionalConfigParamsSchema + }, + required: ['OutboundAuthProviderArn', 'OutboundAuthProviderType'], + additionalProperties: false + }, + { + type: JsonSchemaType.OBJECT, + description: 'API Key authentication configuration', + properties: { + OutboundAuthProviderType: { + type: JsonSchemaType.STRING, + enum: ['API_KEY'] + }, + OutboundAuthProviderArn: { + type: JsonSchemaType.STRING, + description: 'ARN of the API Key authentication provider', + pattern: BEDROCK_AGENTCORE_API_KEY_ARN_PATTERN, + minLength: 1 + }, + AdditionalConfigParams: additionalConfigParamsSchema + }, + required: ['OutboundAuthProviderArn', 'OutboundAuthProviderType'], + additionalProperties: false + } + ] +}; + +/** + * MCP Gateway parameter schemas for use case deployments and updates. + * Supports Gateway-type MCP servers with Lambda, OpenAPI, and Smithy targets. + */ + +// Common Gateway target schema used in both create and update operations +const gatewayTargetSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: 'Configuration for a Gateway target (Lambda, OpenAPI, or Smithy)', + properties: { + TargetName: { + type: JsonSchemaType.STRING, + description: 'Unique name for the Gateway target', + pattern: MCP_GATEWAY_TARGET_NAME_PATTERN, + minLength: 1, + maxLength: MCP_GATEWAY_TARGET_NAME_MAX_LENGTH + }, + TargetDescription: { + type: JsonSchemaType.STRING, + description: 'Description of the Gateway target', + maxLength: MCP_GATEWAY_TARGET_DESCRIPTION_MAX_LENGTH + }, + TargetType: { + type: JsonSchemaType.STRING, + description: 'Type of the Gateway target', + enum: MCP_GATEWAY_TARGET_TYPES + }, + TargetId: { + type: JsonSchemaType.STRING, + description: 'Unique identifier for the Gateway target (10 uppercase characters)', + pattern: '^[A-Z0-9]{10}$', + minLength: 10, + maxLength: 10 + }, + LambdaArn: { + type: JsonSchemaType.STRING, + description: 'ARN of the Lambda function (required for lambda target type)', + pattern: LAMBDA_ARN_PATTERN + }, + SchemaUri: { + type: JsonSchemaType.STRING, + description: 'MCP schema key path for the target configuration', + pattern: MCP_SCHEMA_KEY_PATTERN, + minLength: 1 + }, + OutboundAuthParams: outboundAuthParamsSchema + }, + required: ['TargetName', 'TargetType', 'SchemaUri'], + // Conditional validation based on target type + oneOf: [ + { + // Lambda target validation - requires ARN and schema + properties: { + TargetType: { enum: ['lambda'] } + }, + required: ['LambdaArn'] + }, + { + // OpenAPI target validation - requires schema and outbound auth + properties: { + TargetType: { enum: ['openApiSchema'] } + }, + required: ['OutboundAuthParams'] + }, + { + // Smithy target validation - only requires schema (already in base required) + properties: { + TargetType: { enum: ['smithyModel'] } + } + } + ], + additionalProperties: false +}; + +// Gateway configuration schema for both creating and updating MCP servers +export const gatewayParams: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: 'Gateway configuration parameters for MCP servers', + properties: { + GatewayArn: { + type: JsonSchemaType.STRING, + description: 'ARN of the MCP Gateway', + pattern: '^arn:aws:bedrock-agentcore:[a-z0-9-]+:[0-9]{12}:gateway/[a-zA-Z0-9-]+$' + }, + GatewayUrl: { + type: JsonSchemaType.STRING, + description: 'URL of the MCP Gateway', + pattern: '^https://[a-zA-Z0-9-]+\\.gateway\\.bedrock-agentcore\\.[a-z0-9-]+\\.amazonaws\\.com/mcp$' + }, + GatewayName: { + type: JsonSchemaType.STRING, + description: 'Name of the MCP Gateway' + }, + GatewayId: { + type: JsonSchemaType.STRING, + description: 'Unique identifier for the MCP Gateway', + pattern: '^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)+$' + }, + TargetParams: { + type: JsonSchemaType.ARRAY, + description: 'Array of Gateway targets (Lambda, OpenAPI, Smithy)', + items: gatewayTargetSchema, + minItems: 1, + maxItems: MCP_GATEWAY_MAX_TARGETS_PER_GATEWAY + } + }, + required: ['TargetParams'], + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/deployments/mcp/update-mcp-usecase-body.ts b/source/infrastructure/lib/api/model-schema/deployments/mcp/update-mcp-usecase-body.ts new file mode 100644 index 00000000..45393c2d --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/mcp/update-mcp-usecase-body.ts @@ -0,0 +1,84 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType, JsonSchemaVersion } from 'aws-cdk-lib/aws-apigateway'; +import { gatewayParams } from './params/mcp-gateway-params'; +import { updateUseCaseProperties } from '../base-usecase-schema'; +import { USE_CASE_TYPES, MCP_RUNTIME_ENV_VARS_MAX_COUNT, ECR_URI_PATTERN } from '../../../../utils/constants'; + +/** + * JSON Schema for updating an existing MCP use case via the REST API. + * This schema validates the request body for PUT /deployments/{useCaseId} operations for MCP servers. + * Supports partial updates for both Gateway-type and Runtime-type MCP servers. + */ +export const updateMcpUseCaseBodySchema: JsonSchema = { + schema: JsonSchemaVersion.DRAFT7, + type: JsonSchemaType.OBJECT, + properties: { + ...updateUseCaseProperties, + UseCaseType: { + type: JsonSchemaType.STRING, + description: 'Type of the use case to be uploaded. Must be "MCPServer" for MCP deployments.', + enum: [USE_CASE_TYPES.MCP_SERVER] + }, + MCPParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters for MCP server configuration updates.', + properties: { + GatewayParams: gatewayParams, + RuntimeParams: { + type: JsonSchemaType.OBJECT, + description: 'Runtime-specific configuration updates (only for RUNTIME type)', + properties: { + EcrUri: { + type: JsonSchemaType.STRING, + description: 'ECR Docker image URI for the MCP server runtime', + pattern: ECR_URI_PATTERN + }, + EnvironmentVariables: { + type: JsonSchemaType.OBJECT, + description: 'Environment variables to pass to the MCP server runtime container', + maxProperties: MCP_RUNTIME_ENV_VARS_MAX_COUNT, + patternProperties: { + '^[a-zA-Z_][a-zA-Z0-9_]*$': { + type: JsonSchemaType.STRING, + description: 'Environment variable value' + } + }, + additionalProperties: false + } + }, + additionalProperties: false + } + }, + // Ensure at least one field is provided for update to prevent empty update requests + anyOf: [{ required: ['GatewayParams'] }, { required: ['RuntimeParams'] }], + additionalProperties: false + } + }, + // Conditional validation based on MCP server type - inferred from which params are present + oneOf: [ + { + // Gateway-type MCP server updates + properties: { + MCPParams: { + properties: { + RuntimeParams: { not: {} } + } + } + } + }, + { + // Runtime-type MCP server updates + properties: { + MCPParams: { + properties: { + GatewayParams: { not: {} } + } + } + } + } + ], + required: ['MCPParams'], + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/deployments/mcp/update-mcp-usecase-response.ts b/source/infrastructure/lib/api/model-schema/deployments/mcp/update-mcp-usecase-response.ts new file mode 100644 index 00000000..21265c09 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/mcp/update-mcp-usecase-response.ts @@ -0,0 +1,20 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; + +/** + * JSON Schema for the response returned after successfully updating an existing MCP use case. + * Contains the unique identifier of the updated MCP use case for confirmation. + */ +export const updateMcpUseCaseResponseSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + properties: { + useCaseId: { + type: JsonSchemaType.STRING, + description: 'ID of the MCP use case that was updated' + } + }, + required: ['useCaseId'], + additionalProperties: false +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/deployments/mcp/upload-schema-body.ts b/source/infrastructure/lib/api/model-schema/deployments/mcp/upload-schema-body.ts new file mode 100644 index 00000000..86ea70fd --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/mcp/upload-schema-body.ts @@ -0,0 +1,53 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType, JsonSchemaVersion } from 'aws-cdk-lib/aws-apigateway'; +import { + MCP_GATEWAY_TARGET_TYPES, + MCP_SCHEMA_FILE_NAME_PATTERN, + UPLOADED_FILE_NAME_MIN_LENGTH, + UPLOADED_FILE_NAME_MAX_LENGTH, + MCP_GATEWAY_MAX_TARGETS_PER_GATEWAY +} from '../../../../utils/constants'; + +/** + * JSON Schema for uploading MCP schemas via the REST API. + * This schema validates the request body for POST /deployments/mcp/upload-schema operations. + * Validates the API Gateway event body structure expected by the mcp-handler. + * The handler expects a 'files' array containing file upload information. + */ +export const uploadMcpSchemaBodySchema: JsonSchema = { + schema: JsonSchemaVersion.DRAFT7, + type: JsonSchemaType.OBJECT, + properties: { + files: { + type: JsonSchemaType.ARRAY, + description: 'Array of files to be uploaded for MCP schema processing', + minItems: 1, + maxItems: MCP_GATEWAY_MAX_TARGETS_PER_GATEWAY, + items: { + type: JsonSchemaType.OBJECT, + properties: { + schemaType: { + type: JsonSchemaType.STRING, + description: + 'Type of schema being uploaded. Determines allowed file extensions and validation rules.', + enum: MCP_GATEWAY_TARGET_TYPES + }, + fileName: { + type: JsonSchemaType.STRING, + description: + 'Name of the file being uploaded. Must have appropriate extension for the schema type. Detailed validation is performed at the Lambda level.', + pattern: MCP_SCHEMA_FILE_NAME_PATTERN, + minLength: UPLOADED_FILE_NAME_MIN_LENGTH, + maxLength: UPLOADED_FILE_NAME_MAX_LENGTH + } + }, + required: ['schemaType', 'fileName'], + additionalProperties: false + } + } + }, + required: ['files'], + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/deployments/mcp/upload-schema-response.ts b/source/infrastructure/lib/api/model-schema/deployments/mcp/upload-schema-response.ts new file mode 100644 index 00000000..4f09db29 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/mcp/upload-schema-response.ts @@ -0,0 +1,54 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType, JsonSchemaVersion } from 'aws-cdk-lib/aws-apigateway'; + +/** + * JSON Schema for the response returned after successfully requesting MCP schema uploads. + * Contains an array of presigned POST URLs and associated metadata for uploading schema files to S3. + */ +export const uploadMcpSchemaResponseSchema: JsonSchema = { + schema: JsonSchemaVersion.DRAFT7, + type: JsonSchemaType.OBJECT, + properties: { + uploads: { + type: JsonSchemaType.ARRAY, + description: 'Array of presigned POST responses for uploading schema files', + items: { + type: JsonSchemaType.OBJECT, + properties: { + uploadUrl: { + type: JsonSchemaType.STRING, + description: 'The S3 presigned POST URL for uploading the file', + format: 'uri' + }, + formFields: { + type: JsonSchemaType.OBJECT, + description: 'Form fields required for the S3 presigned POST request', + additionalProperties: { + type: JsonSchemaType.STRING + } + }, + fileName: { + type: JsonSchemaType.STRING, + description: 'Original name of the file to be uploaded' + }, + expiresIn: { + type: JsonSchemaType.INTEGER, + description: 'Number of seconds until the presigned URL expires', + minimum: 1 + }, + createdAt: { + type: JsonSchemaType.STRING, + description: 'ISO 8601 timestamp when the presigned URL was created', + format: 'date-time' + } + }, + required: ['uploadUrl', 'formFields', 'fileName', 'expiresIn', 'createdAt'], + additionalProperties: false + } + } + }, + required: ['uploads'], + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/deployments/update-usecase-body.ts b/source/infrastructure/lib/api/model-schema/deployments/update-usecase-body.ts new file mode 100644 index 00000000..ff226249 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/update-usecase-body.ts @@ -0,0 +1,108 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType, JsonSchemaVersion } from 'aws-cdk-lib/aws-apigateway'; +import { + DEFAULT_CONVERSATION_MEMORY_TYPE, + SUPPORTED_AGENT_TYPES, + SUPPORTED_CONVERSATION_MEMORY_TYPES, + USE_CASE_TYPES +} from '../../../utils/constants'; +import { updateUseCaseProperties } from './base-usecase-schema'; +import { llmParamsUpdateSchema } from '../shared/llm-params'; +import { knowledgeBaseParamsUpdateSchema } from '../shared/knowledge-base-params'; + +/** + * JSON Schema for updating an existing use case via the REST API. + * This schema validates the request body for PUT /deployments/{useCaseId} operations. + * Requires at least one field to be updated to ensure meaningful changes. + */ +export const updateUseCaseBodySchema: JsonSchema = { + schema: JsonSchemaVersion.DRAFT7, + type: JsonSchemaType.OBJECT, + properties: { + ...updateUseCaseProperties, + UseCaseType: { + type: JsonSchemaType.STRING, + description: 'Type of the use case to be deployed. Either "Text" or "Agent".', + enum: [USE_CASE_TYPES.TEXT, USE_CASE_TYPES.AGENT] + }, + ConversationMemoryParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters related to storing and using the chat history', + properties: { + ConversationMemoryType: { + type: JsonSchemaType.STRING, + default: DEFAULT_CONVERSATION_MEMORY_TYPE, + enum: SUPPORTED_CONVERSATION_MEMORY_TYPES + }, + HumanPrefix: { + type: JsonSchemaType.STRING, + description: 'Prefix used in the history when storing messages sent by the user' + }, + AiPrefix: { + type: JsonSchemaType.STRING, + description: 'Prefix used in the history when storing responses from the LLM' + }, + ChatHistoryLength: { + type: JsonSchemaType.INTEGER, + description: 'Number of messages to store in the history', + minimum: 0 + } + }, + additionalProperties: false + }, + KnowledgeBaseParams: knowledgeBaseParamsUpdateSchema, + LlmParams: llmParamsUpdateSchema, + AgentParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters for Bedrock agent invocation workflow.', + properties: { + AgentType: { + type: JsonSchemaType.STRING, + description: 'The type of agent to use. Required.', + enum: SUPPORTED_AGENT_TYPES + }, + BedrockAgentParams: { + type: JsonSchemaType.OBJECT, + properties: { + AgentId: { + type: JsonSchemaType.STRING, + description: 'ID of the Bedrock agent to be invoked.', + pattern: '^[0-9a-zA-Z]+$', + maxLength: 10 + }, + AgentAliasId: { + type: JsonSchemaType.STRING, + description: 'Alias ID of the Bedrock agent to be invoked.', + pattern: '^[0-9a-zA-Z]+$', + maxLength: 10 + }, + EnableTrace: { + type: JsonSchemaType.BOOLEAN, + description: 'Whether to enable tracing for the agent invocation.', + default: false + } + }, + required: ['AgentId', 'AgentAliasId'], + additionalProperties: false + } + }, + additionalProperties: false + } + }, + // Ensure at least one field is provided for update to prevent empty update requests + anyOf: [ + { required: ['UseCaseDescription'] }, + { required: ['DefaultUserEmail'] }, + { required: ['VpcParams'] }, + { required: ['ConversationMemoryParams'] }, + { required: ['KnowledgeBaseParams'] }, + { required: ['LlmParams'] }, + { required: ['AgentParams'] }, + { required: ['AuthenticationParams'] }, + { required: ['ProvisionedConcurrencyValue'] } + ], + required: ['UseCaseType'], + additionalProperties: false +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/update-usecase-response.ts b/source/infrastructure/lib/api/model-schema/deployments/update-usecase-response.ts similarity index 70% rename from source/infrastructure/lib/api/model-schema/update-usecase-response.ts rename to source/infrastructure/lib/api/model-schema/deployments/update-usecase-response.ts index a99791c6..6909e0ce 100644 --- a/source/infrastructure/lib/api/model-schema/update-usecase-response.ts +++ b/source/infrastructure/lib/api/model-schema/deployments/update-usecase-response.ts @@ -3,7 +3,10 @@ import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; -// note: we could add more to the response here. Stack ID? Status? what would be useful? +/** + * JSON Schema for the response returned after successfully updating an existing use case. + * Contains the unique identifier of the updated use case for confirmation. + */ export const updateUseCaseResponseSchema: JsonSchema = { type: JsonSchemaType.OBJECT, properties: { @@ -12,4 +15,4 @@ export const updateUseCaseResponseSchema: JsonSchema = { description: 'ID of the use case that was updated' } } -}; +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/deployments/workflows/deploy-workflow-usecase-body.ts b/source/infrastructure/lib/api/model-schema/deployments/workflows/deploy-workflow-usecase-body.ts new file mode 100644 index 00000000..8fdc5d96 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/workflows/deploy-workflow-usecase-body.ts @@ -0,0 +1,31 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; + +import { deployUseCaseProperties } from '../base-usecase-schema'; +import { llmParamsSchema } from '../../shared/llm-params'; +import { workflowCoreParams } from './params/workflow-core-params'; +import { USE_CASE_TYPES } from '../../../../utils/constants'; + +/** + * JSON Schema for deploying a new workflow use case via the REST API. + * This schema validates the request body for POST /deployments/workflows operations. + * Supports workflow configuration with system prompt, orchestration pattern, and selected agents. + */ +export const deployWorkflowUseCaseBodySchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: 'Schema for deploying a workflow use case', + properties: { + ...deployUseCaseProperties, + UseCaseType: { + type: JsonSchemaType.STRING, + description: 'Type of the use case to be deployed. Must be "Workflow" for workflow deployments.', + enum: [USE_CASE_TYPES.WORKFLOW] + }, + LlmParams: llmParamsSchema, + WorkflowParams: workflowCoreParams + }, + required: ['UseCaseName', 'UseCaseType', 'LlmParams', 'WorkflowParams'], + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/deployments/workflows/deploy-workflow-usecase-response.ts b/source/infrastructure/lib/api/model-schema/deployments/workflows/deploy-workflow-usecase-response.ts new file mode 100644 index 00000000..f9ca1b25 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/workflows/deploy-workflow-usecase-response.ts @@ -0,0 +1,20 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; + +/** + * JSON Schema for workflow deployment response. + * This schema defines the structure of the response returned after successfully + * deploying a workflow use case via POST /deployments/workflows. + */ +export const deployWorkflowUseCaseResponseSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: 'Response schema for workflow deployment', + properties: { + useCaseId: { + type: JsonSchemaType.STRING, + description: 'Unique identifier for the deployed workflow use case' + }, + } +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/deployments/workflows/params/workflow-core-params.ts b/source/infrastructure/lib/api/model-schema/deployments/workflows/params/workflow-core-params.ts new file mode 100644 index 00000000..cca784e8 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/workflows/params/workflow-core-params.ts @@ -0,0 +1,95 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; +import { + AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH, + WORKFLOW_MAX_SELECTED_AGENTS, + WORKFLOW_ORCHESTRATION_PATTERNS, + SUPPORTED_WORKFLOW_ORCHESTRATION_PATTERNS, + USE_CASE_TYPES +} from '../../../../../utils/constants'; +import { agentMemoryParams } from '../../../shared/agent-memory-params'; +import { llmParamsSchema } from '../../../shared/llm-params'; +import { agentCoreParams } from '../../agents/params/agent-core-params'; +import { commonUseCaseProperties, deployUseCaseProperties } from '../../base-usecase-schema'; + +/** + * JSON Schema for workflow core parameters. + * Defines the structure for workflow-specific configuration including system prompt, + * orchestration pattern, and selected agents. + */ +export const workflowCoreParams: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: 'Core workflow configuration parameters', + properties: { + SystemPrompt: { + type: JsonSchemaType.STRING, + description: 'System prompt template for the client agent that orchestrates specialized agents', + minLength: 1, + maxLength: AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH + }, + OrchestrationPattern: { + type: JsonSchemaType.STRING, + description: 'Orchestration pattern used for multi-agent coordination', + enum: SUPPORTED_WORKFLOW_ORCHESTRATION_PATTERNS + }, + AgentsAsToolsParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters related to the orchestration pattern Agents as Tools', + properties: { + Agents: { + type: JsonSchemaType.ARRAY, + description: 'List of agents to include in this workflow', + items: { + type: JsonSchemaType.OBJECT, + properties: { + UseCaseId: { + type: JsonSchemaType.STRING, + description: 'Unique identifier for the selected use case', + pattern: '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$' + }, + UseCaseType: { + type: JsonSchemaType.STRING, + description: `Underlying use case type of this Agent. Supported types are ["${USE_CASE_TYPES.AGENT_BUILDER}"]`, + enum: [USE_CASE_TYPES.AGENT_BUILDER] + }, + UseCaseName: deployUseCaseProperties.UseCaseName, + UseCaseDescription: commonUseCaseProperties.UseCaseDescription, + LlmParams: llmParamsSchema, + AgentBuilderParams: agentCoreParams + }, + required: ['UseCaseId', 'UseCaseType', 'UseCaseName', 'LlmParams', 'AgentBuilderParams'], + additionalProperties: false + }, + minItems: 1, + maxItems: WORKFLOW_MAX_SELECTED_AGENTS + } + }, + required: ['Agents'], + additionalProperties: false + }, + MemoryConfig: agentMemoryParams + }, + oneOf: [ + { + properties: { + OrchestrationPattern: { enum: [WORKFLOW_ORCHESTRATION_PATTERNS.AGENT_AS_TOOLS] } + }, + required: ['AgentsAsToolsParams'] + } + ], + required: ['SystemPrompt', 'OrchestrationPattern'], + additionalProperties: false +}; + +/** + * JSON Schema for updating workflow core parameters. + * Uses expansion of the base schema with modified required fields. + */ +export const workflowCoreParamsUpdateSchema: JsonSchema = { + type: workflowCoreParams.type, + description: workflowCoreParams.description, + properties: workflowCoreParams.properties, + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/deployments/workflows/update-workflow-usecase-body.ts b/source/infrastructure/lib/api/model-schema/deployments/workflows/update-workflow-usecase-body.ts new file mode 100644 index 00000000..d9be6c60 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/workflows/update-workflow-usecase-body.ts @@ -0,0 +1,29 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; +import { updateUseCaseProperties } from '../base-usecase-schema'; +import { llmParamsUpdateSchema } from '../../shared/llm-params'; +import { workflowCoreParamsUpdateSchema } from './params/workflow-core-params'; +import { USE_CASE_TYPES } from '../../../../utils/constants'; + +/** + * JSON Schema for updating an existing workflow use case via the REST API. + * This schema validates the request body for PATCH /deployments/workflows/{id} operations. + * All workflow parameters are optional to support partial updates. + */ +export const updateWorkflowUseCaseBodySchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: 'Schema for updating a workflow use case', + properties: { + ...updateUseCaseProperties, + UseCaseType: { + type: JsonSchemaType.STRING, + description: 'Type of the use case to be deployed. Must be "Workflow" for workflow deployments.', + enum: [USE_CASE_TYPES.WORKFLOW] + }, + LlmParams: llmParamsUpdateSchema, + WorkflowParams: workflowCoreParamsUpdateSchema + }, + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/deployments/workflows/update-workflow-usecase-response.ts b/source/infrastructure/lib/api/model-schema/deployments/workflows/update-workflow-usecase-response.ts new file mode 100644 index 00000000..f9ab4c6b --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/deployments/workflows/update-workflow-usecase-response.ts @@ -0,0 +1,20 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; + +/** + * JSON Schema for workflow update response. + * This schema defines the structure of the response returned after successfully + * updating a workflow use case via PATCH /deployments/workflows/{id}. + */ +export const updateWorkflowUseCaseResponseSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: 'Response schema for workflow updates', + properties: { + useCaseId: { + type: JsonSchemaType.STRING, + description: 'Unique identifier for the updated workflow use case' + } + } +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/feedback-body.ts b/source/infrastructure/lib/api/model-schema/feedback/feedback-body.ts similarity index 90% rename from source/infrastructure/lib/api/model-schema/feedback-body.ts rename to source/infrastructure/lib/api/model-schema/feedback/feedback-body.ts index b7bce916..1d51dc9d 100644 --- a/source/infrastructure/lib/api/model-schema/feedback-body.ts +++ b/source/infrastructure/lib/api/model-schema/feedback/feedback-body.ts @@ -7,9 +7,13 @@ import { MAX_REPHRASED_QUERY_LENGTH, MAX_COMMENT_LENGTH, FEEDBACK_VALUES -} from '../../utils/constants'; +} from '../../../utils/constants'; -// Define the feedback request schema +/** + * JSON Schema for user feedback submissions via the REST API. + * This schema validates feedback requests for LLM responses, including ratings, + * reasons, and optional comments to improve model performance. + */ export const feedbackRequestSchema: JsonSchema = { schema: JsonSchemaVersion.DRAFT4, type: JsonSchemaType.OBJECT, @@ -63,4 +67,4 @@ export const feedbackRequestSchema: JsonSchema = { } }, additionalProperties: false -}; +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/index.ts b/source/infrastructure/lib/api/model-schema/index.ts new file mode 100644 index 00000000..6f449cdb --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/index.ts @@ -0,0 +1,66 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { deployUseCaseBodySchema } from './deployments/deploy-usecase-body'; +import { deployUseCaseResponseSchema } from './deployments/deploy-usecase-response'; +import { updateUseCaseBodySchema } from './deployments/update-usecase-body'; +import { updateUseCaseResponseSchema } from './deployments/update-usecase-response'; +import { deployMcpUseCaseBodySchema } from './deployments/mcp/deploy-mcp-usecase-body'; +import { deployMcpUseCaseResponseSchema } from './deployments/mcp/deploy-mcp-usecase-response'; +import { updateMcpUseCaseBodySchema } from './deployments/mcp/update-mcp-usecase-body'; +import { updateMcpUseCaseResponseSchema } from './deployments/mcp/update-mcp-usecase-response'; +import { deployAgentUseCaseBodySchema } from './deployments/agents/deploy-agent-usecase-body'; +import { deployAgentUseCaseResponseSchema } from './deployments/agents/deploy-agent-usecase-response'; +import { updateAgentUseCaseBodySchema } from './deployments/agents/update-agent-usecase-body'; +import { updateAgentUseCaseResponseSchema } from './deployments/agents/update-agent-usecase-response'; +import { deployWorkflowUseCaseBodySchema } from './deployments/workflows/deploy-workflow-usecase-body'; +import { deployWorkflowUseCaseResponseSchema } from './deployments/workflows/deploy-workflow-usecase-response'; +import { updateWorkflowUseCaseBodySchema } from './deployments/workflows/update-workflow-usecase-body'; +import { updateWorkflowUseCaseResponseSchema } from './deployments/workflows/update-workflow-usecase-response'; +import { filesUploadRequestSchema } from './multimodal/files-upload-request-body'; +import { filesUploadResponseSchema } from './multimodal/files-upload-response-body'; +import { filesDeleteRequestSchema } from './multimodal/files-delete-request-body'; +import { filesDeleteResponseSchema } from './multimodal/files-delete-response-body'; +import { filesGetResponseSchema } from './multimodal/files-get-response-body'; + +export const UseCaseDeploymentSchemas = { + base: { + deploy: deployUseCaseBodySchema, + deployResponse: deployUseCaseResponseSchema, + update: updateUseCaseBodySchema, + updateResponse: updateUseCaseResponseSchema + }, + mcp: { + deploy: deployMcpUseCaseBodySchema, + deployResponse: deployMcpUseCaseResponseSchema, + update: updateMcpUseCaseBodySchema, + updateResponse: updateMcpUseCaseResponseSchema + }, + agent: { + deploy: deployAgentUseCaseBodySchema, + deployResponse: deployAgentUseCaseResponseSchema, + update: updateAgentUseCaseBodySchema, + updateResponse: updateAgentUseCaseResponseSchema + }, + workflow: { + deploy: deployWorkflowUseCaseBodySchema, + deployResponse: deployWorkflowUseCaseResponseSchema, + update: updateWorkflowUseCaseBodySchema, + updateResponse: updateWorkflowUseCaseResponseSchema + } +} as const; + +export const FileOperationSchemas = { + upload: { + request: filesUploadRequestSchema, + response: filesUploadResponseSchema + }, + delete: { + request: filesDeleteRequestSchema, + response: filesDeleteResponseSchema + }, + get: { + // Note: GET requests use query parameters, not request body schema + response: filesGetResponseSchema + } +} as const; diff --git a/source/infrastructure/lib/api/model-schema/multimodal/files-delete-request-body.ts b/source/infrastructure/lib/api/model-schema/multimodal/files-delete-request-body.ts new file mode 100644 index 00000000..437320cf --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/multimodal/files-delete-request-body.ts @@ -0,0 +1,47 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType, JsonSchemaVersion } from 'aws-cdk-lib/aws-apigateway'; +import { + UPLOADED_FILE_NAME_MIN_LENGTH, + UPLOADED_FILE_NAME_MAX_LENGTH, + MAX_FILE_DELETES_PER_BATCH, + UUID_PATTERN, + MULTIMODAL_FILENAME_PATTERN, + SUPPORTED_MULTIMODAL_FILE_EXTENSIONS +} from '../../../utils/constants'; + +/** + * JSON Schema for file deletion requests via the DELETE /files REST API. + */ +export const filesDeleteRequestSchema: JsonSchema = { + schema: JsonSchemaVersion.DRAFT4, + type: JsonSchemaType.OBJECT, + required: ['fileNames', 'conversationId', 'messageId'], + properties: { + fileNames: { + type: JsonSchemaType.ARRAY, + description: 'Array of filenames to delete', + minItems: 1, + maxItems: MAX_FILE_DELETES_PER_BATCH, + items: { + type: JsonSchemaType.STRING, + description: `Filename to delete with supported extension (${SUPPORTED_MULTIMODAL_FILE_EXTENSIONS.join('|')})`, + minLength: UPLOADED_FILE_NAME_MIN_LENGTH, + maxLength: UPLOADED_FILE_NAME_MAX_LENGTH, + pattern: MULTIMODAL_FILENAME_PATTERN + } + }, + conversationId: { + type: JsonSchemaType.STRING, + description: 'Unique identifier for the conversation', + pattern: UUID_PATTERN + }, + messageId: { + type: JsonSchemaType.STRING, + description: 'Unique identifier for the message', + pattern: UUID_PATTERN + } + }, + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/multimodal/files-delete-response-body.ts b/source/infrastructure/lib/api/model-schema/multimodal/files-delete-response-body.ts new file mode 100644 index 00000000..70b6bf1e --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/multimodal/files-delete-response-body.ts @@ -0,0 +1,60 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType, JsonSchemaVersion } from 'aws-cdk-lib/aws-apigateway'; +import { UPLOADED_FILE_NAME_MIN_LENGTH, MULTIMODAL_FILENAME_PATTERN } from '../../../utils/constants'; + +/** + * JSON Schema for file deletion response from the DELETE /files REST API. + */ +export const filesDeleteResponseSchema: JsonSchema = { + schema: JsonSchemaVersion.DRAFT4, + type: JsonSchemaType.OBJECT, + required: ['deletions', 'allSuccessful', 'failureCount'], + properties: { + deletions: { + type: JsonSchemaType.ARRAY, + description: 'Array of deletion results for each file', + minItems: 1, + items: { + type: JsonSchemaType.OBJECT, + required: ['success', 'fileName'], + properties: { + success: { + type: JsonSchemaType.BOOLEAN, + description: 'Whether the deletion was successful' + }, + fileName: { + type: JsonSchemaType.STRING, + description: 'Filename that was processed', + pattern: MULTIMODAL_FILENAME_PATTERN, + minLength: UPLOADED_FILE_NAME_MIN_LENGTH + }, + error: { + anyOf: [ + { + type: JsonSchemaType.STRING, + minLength: 1 + }, + { + type: JsonSchemaType.NULL + } + ], + description: 'Error message if deletion failed (null if no error details available)' + } + }, + additionalProperties: false + } + }, + allSuccessful: { + type: JsonSchemaType.BOOLEAN, + description: 'Whether all deletions were successful' + }, + failureCount: { + type: JsonSchemaType.INTEGER, + description: 'Number of failed deletions', + minimum: 0 + } + }, + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/multimodal/files-get-response-body.ts b/source/infrastructure/lib/api/model-schema/multimodal/files-get-response-body.ts new file mode 100644 index 00000000..f3b81969 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/multimodal/files-get-response-body.ts @@ -0,0 +1,23 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType, JsonSchemaVersion } from 'aws-cdk-lib/aws-apigateway'; +import { UPLOADED_FILE_NAME_MIN_LENGTH, MULTIMODAL_FILENAME_PATTERN } from '../../../utils/constants'; + +/** + * JSON Schema for file retrieval response from the GET /files REST API. + */ +export const filesGetResponseSchema: JsonSchema = { + schema: JsonSchemaVersion.DRAFT4, + type: JsonSchemaType.OBJECT, + required: ['downloadUrl'], + properties: { + downloadUrl: { + type: JsonSchemaType.STRING, + description: 'Presigned URL for file download from S3', + format: 'uri', + minLength: UPLOADED_FILE_NAME_MIN_LENGTH + } + }, + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/multimodal/files-upload-request-body.ts b/source/infrastructure/lib/api/model-schema/multimodal/files-upload-request-body.ts new file mode 100644 index 00000000..f68ae7dd --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/multimodal/files-upload-request-body.ts @@ -0,0 +1,47 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType, JsonSchemaVersion } from 'aws-cdk-lib/aws-apigateway'; +import { + UPLOADED_FILE_NAME_MIN_LENGTH, + UPLOADED_FILE_NAME_MAX_LENGTH, + MAX_FILE_UPLOADS_PER_BATCH, + UUID_PATTERN, + MULTIMODAL_FILENAME_PATTERN, + SUPPORTED_MULTIMODAL_FILE_EXTENSIONS +} from '../../../utils/constants'; + +/** + * JSON Schema for file upload requests via the POST /files REST API. + */ +export const filesUploadRequestSchema: JsonSchema = { + schema: JsonSchemaVersion.DRAFT4, + type: JsonSchemaType.OBJECT, + required: ['fileNames', 'conversationId', 'messageId'], + properties: { + fileNames: { + type: JsonSchemaType.ARRAY, + description: 'Array of file names to upload', + minItems: 1, + maxItems: MAX_FILE_UPLOADS_PER_BATCH, + items: { + type: JsonSchemaType.STRING, + description: `Filename with supported extension (${SUPPORTED_MULTIMODAL_FILE_EXTENSIONS.join('|')})`, + minLength: UPLOADED_FILE_NAME_MIN_LENGTH, + maxLength: UPLOADED_FILE_NAME_MAX_LENGTH, + pattern: MULTIMODAL_FILENAME_PATTERN + } + }, + conversationId: { + type: JsonSchemaType.STRING, + description: 'Unique identifier for the conversation', + pattern: UUID_PATTERN + }, + messageId: { + type: JsonSchemaType.STRING, + description: 'Unique identifier for the message', + pattern: UUID_PATTERN + } + }, + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/multimodal/files-upload-response-body.ts b/source/infrastructure/lib/api/model-schema/multimodal/files-upload-response-body.ts new file mode 100644 index 00000000..94b50dca --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/multimodal/files-upload-response-body.ts @@ -0,0 +1,74 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType, JsonSchemaVersion } from 'aws-cdk-lib/aws-apigateway'; +import { UPLOADED_FILE_NAME_MIN_LENGTH } from '../../../utils/constants'; + +/** + * JSON Schema for file upload response through the POST /files REST API. + */ +export const filesUploadResponseSchema: JsonSchema = { + schema: JsonSchemaVersion.DRAFT4, + type: JsonSchemaType.OBJECT, + required: ['uploads'], + properties: { + uploads: { + type: JsonSchemaType.ARRAY, + description: 'Array of upload information for each file', + items: { + type: JsonSchemaType.OBJECT, + required: ['uploadUrl', 'formFields', 'fileName', 'fileKey', 'expiresIn', 'createdAt'], + properties: { + uploadUrl: { + type: JsonSchemaType.STRING, + description: 'The S3 presigned POST URL for uploading the file', + format: 'uri', + minLength: UPLOADED_FILE_NAME_MIN_LENGTH + }, + formFields: { + type: JsonSchemaType.OBJECT, + description: 'Form fields required for the S3 presigned POST request', + minProperties: 1, + additionalProperties: { + type: JsonSchemaType.STRING + } + }, + fileName: { + type: JsonSchemaType.STRING, + description: 'Original name of the file to be uploaded', + minLength: UPLOADED_FILE_NAME_MIN_LENGTH + }, + fileKey: { + type: JsonSchemaType.STRING, + description: 'Unique file key for tracking: user-uuid/conversation-uuid/message-uuid/file-uuid', + minLength: UPLOADED_FILE_NAME_MIN_LENGTH + }, + expiresIn: { + type: JsonSchemaType.INTEGER, + description: 'Number of seconds until the presigned URL expires', + minimum: 1 + }, + createdAt: { + type: JsonSchemaType.STRING, + description: 'ISO 8601 timestamp when the presigned URL was created', + format: 'date-time' + }, + error: { + anyOf: [ + { + type: JsonSchemaType.STRING, + minLength: 1 + }, + { + type: JsonSchemaType.NULL + } + ], + description: 'Error message if deletion failed (null if no error details available)' + } + }, + additionalProperties: false + } + } + }, + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/shared/agent-memory-params.ts b/source/infrastructure/lib/api/model-schema/shared/agent-memory-params.ts new file mode 100644 index 00000000..2e2af854 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/shared/agent-memory-params.ts @@ -0,0 +1,17 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; + +export const agentMemoryParams: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: 'Agent memory configuration parameters', + properties: { + LongTermEnabled: { + type: JsonSchemaType.BOOLEAN, + description: 'Enable long-term memory for the agent', + default: false + } + }, + additionalProperties: false +}; diff --git a/source/infrastructure/lib/api/model-schema/shared/auth-params.ts b/source/infrastructure/lib/api/model-schema/shared/auth-params.ts new file mode 100644 index 00000000..3ebbcf9f --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/shared/auth-params.ts @@ -0,0 +1,51 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; +import { AUTHENTICATION_PROVIDERS, SUPPORTED_AUTHENTICATION_PROVIDERS } from '../../../utils/constants'; + +/** + * Authentication parameter schema for use case deployments. + * Currently supports Cognito User Pool authentication with optional existing pool configuration. + */ +export const authenticationParamsSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: 'Parameters related to the Authentication.', + properties: { + AuthenticationProvider: { + type: JsonSchemaType.STRING, + description: 'Supported authentication provider.', + enum: SUPPORTED_AUTHENTICATION_PROVIDERS + }, + CognitoParams: { + type: JsonSchemaType.OBJECT, + description: 'Cognito user pool related parameters.', + properties: { + ExistingUserPoolId: { + type: JsonSchemaType.STRING, + description: 'Existing Cognito User Pool Id.', + pattern: '^[\\w-]+_[0-9a-zA-Z]+$', + minLength: 1, + maxLength: 55 + }, + ExistingUserPoolClientId: { + type: JsonSchemaType.STRING, + description: 'Existing Cognito User Pool Client Id.', + pattern: '^[\\w+]+$', + minLength: 1, + maxLength: 128 + } + }, + required: ['ExistingUserPoolId'] + } + }, + anyOf: [ + { + properties: { + AuthenticationProvider: { enum: [AUTHENTICATION_PROVIDERS.COGNITO] } + }, + required: ['CognitoParams'] + } + ], + required: ['AuthenticationProvider'] +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/shared/knowledge-base-params.ts b/source/infrastructure/lib/api/model-schema/shared/knowledge-base-params.ts new file mode 100644 index 00000000..f4d229ab --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/shared/knowledge-base-params.ts @@ -0,0 +1,321 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; +import { + DEFAULT_ENABLE_RBAC, + DEFAULT_KENDRA_EDITION, + DEFAULT_KENDRA_NUMBER_OF_DOCS, + DEFAULT_KENDRA_QUERY_CAPACITY_UNITS, + DEFAULT_KENDRA_STORAGE_CAPACITY_UNITS, + DEFAULT_RETURN_SOURCE_DOCS, + DEFAULT_SCORE_THRESHOLD, + KENDRA_EDITIONS, + KNOWLEDGE_BASE_TYPES, + MAX_KENDRA_NUMBER_OF_DOCS, + MAX_KENDRA_QUERY_CAPACITY_UNITS, + MAX_KENDRA_STORAGE_CAPACITY_UNITS, + MAX_SCORE_THRESHOLD, + MIN_KENDRA_NUMBER_OF_DOCS, + MIN_SCORE_THRESHOLD, + SUPPORTED_KNOWLEDGE_BASE_TYPES +} from '../../../utils/constants'; + +/** + * Knowledge base parameter schemas for RAG-enabled use cases. + * Supports both Amazon Kendra and Amazon Bedrock Knowledge Bases with conditional validation. + */ + +export const knowledgeBaseParamsSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: + 'Parameters related to the knowledge base. Based on KnowledgeBaseType, different nested parameters are required.', + properties: { + KnowledgeBaseType: { + type: JsonSchemaType.STRING, + description: 'The type of knowledge base to use. Required.', + default: KNOWLEDGE_BASE_TYPES.KENDRA, + enum: SUPPORTED_KNOWLEDGE_BASE_TYPES + }, + NoDocsFoundResponse: { + type: JsonSchemaType.STRING, + description: 'Response text message to use when the knowledge base does not return any documents', + minLength: 1 + }, + KendraKnowledgeBaseParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters specific to Kendra', + properties: { + ExistingKendraIndexId: { + type: JsonSchemaType.STRING, + description: + 'Index ID of an existing Kendra index to be used for the use case. Required if KendraIndexName is not provided.', + pattern: '^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$' + }, + KendraIndexName: { + type: JsonSchemaType.STRING, + description: + 'Name of the new Kendra index to be created, if provided. Required if ExistingKendraIndexId is not provided.', + pattern: '^[0-9a-zA-Z-]{1,64}$' + }, + QueryCapacityUnits: { + type: JsonSchemaType.INTEGER, + description: + 'Number of additional query capacity units to provision for the new Kendra index to be created. Can only be provided if if ExistingKendraIndexId is not provided.', + default: DEFAULT_KENDRA_QUERY_CAPACITY_UNITS, + minimum: 0, + maximum: MAX_KENDRA_QUERY_CAPACITY_UNITS + }, + StorageCapacityUnits: { + type: JsonSchemaType.INTEGER, + description: + 'Number of additional storage capacity units to provision for the new Kendra index to be created. Can only be provided if if ExistingKendraIndexId is not provided.', + default: DEFAULT_KENDRA_STORAGE_CAPACITY_UNITS, + minimum: 0, + maximum: MAX_KENDRA_STORAGE_CAPACITY_UNITS + }, + KendraIndexEdition: { + type: JsonSchemaType.STRING, + description: + 'Edition of the Kendra index to be created. Can only be provided if if ExistingKendraIndexId is not provided.', + enum: KENDRA_EDITIONS, + default: DEFAULT_KENDRA_EDITION + }, + AttributeFilter: { + type: JsonSchemaType.OBJECT, + description: + 'Filter to apply when querying the Kendra index. See: https://docs.aws.amazon.com/kendra/latest/APIReference/API_AttributeFilter.html' + }, + RoleBasedAccessControlEnabled: { + type: JsonSchemaType.BOOLEAN, + description: + 'Whether role-based access control is enabled on the Kendra index, used to restrict Kendra queries to documents accessible by user group and id.', + default: DEFAULT_ENABLE_RBAC + } + }, + // Either use existing Kendra index or provide parameters to create a new one + oneOf: [ + { + required: ['ExistingKendraIndexId'], + properties: { + KendraIndexName: { + not: {} + }, + QueryCapacityUnits: { + not: {} + }, + StorageCapacityUnits: { + not: {} + }, + KendraIndexEdition: { + not: {} + } + } + }, + { + required: ['KendraIndexName'], + properties: { + ExistingKendraIndexId: { + not: {} + } + } + } + ], + additionalProperties: false + }, + BedrockKnowledgeBaseParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters specific to Bedrock Knowledge Bases', + properties: { + BedrockKnowledgeBaseId: { + type: JsonSchemaType.STRING, + description: + 'ID of the Bedrock knowledge base to use in a RAG use case. Required if KnowledgeBaseType is Bedrock.', + pattern: '^[0-9a-zA-Z]{1,10}$' + }, + RetrievalFilter: { + type: JsonSchemaType.OBJECT, + description: + 'Filter to apply when querying the Bedrock knowledge base. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_RetrievalFilter.html' + }, + OverrideSearchType: { + type: JsonSchemaType.STRING, + description: + "If you're using an Amazon OpenSearch Serverless vector store that contains a filterable text field, you can specify whether to query the knowledge base with a HYBRID search using both vector embeddings and raw text, or SEMANTIC search using only vector embeddings. By default (if this is not provided), Amazon Bedrock will choose for you. For other vector store types, passing this parameter will result in a validation error during retrieval. For more information, see https://docs.aws.amazon.com/bedrock/latest/userguide/kb-test-config.html", + enum: ['HYBRID', 'SEMANTIC', 'NONE'], + default: 'NONE' + } + }, + required: ['BedrockKnowledgeBaseId'], + additionalProperties: false + }, + NumberOfDocs: { + type: JsonSchemaType.INTEGER, + description: + 'The number of documents returned from the knowledge base which will be used as context to be sent to the LLM', + default: DEFAULT_KENDRA_NUMBER_OF_DOCS, + minimum: MIN_KENDRA_NUMBER_OF_DOCS, + maximum: MAX_KENDRA_NUMBER_OF_DOCS + }, + ScoreThreshold: { + type: JsonSchemaType.NUMBER, + description: 'The minimum score a document must have to be returned from the knowledge base', + default: DEFAULT_SCORE_THRESHOLD, + minimum: MIN_SCORE_THRESHOLD, + maximum: MAX_SCORE_THRESHOLD + }, + ReturnSourceDocs: { + type: JsonSchemaType.BOOLEAN, + description: + 'Whether to return information about the source of documents returned from the knowledge base', + default: DEFAULT_RETURN_SOURCE_DOCS + } + }, + // Ensure only parameters for the selected knowledge base type are provided + oneOf: [ + { + properties: { + KnowledgeBaseType: { enum: [KNOWLEDGE_BASE_TYPES.KENDRA] }, + BedrockKnowledgeBaseParams: { + not: {} + } + }, + required: ['KendraKnowledgeBaseParams'] + }, + { + properties: { + KnowledgeBaseType: { enum: [KNOWLEDGE_BASE_TYPES.BEDROCK] }, + KendraKnowledgeBaseParams: { + not: {} + } + }, + required: ['BedrockKnowledgeBaseParams'] + } + ], + required: ['KnowledgeBaseType'], + additionalProperties: false +}; + +// Schema for knowledge base parameters when updating an existing use case +export const knowledgeBaseParamsUpdateSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: + 'Parameters related to the knowledge base. Based on KnowledgeBaseType, different nested parameters are required.', + properties: { + KnowledgeBaseType: { + type: JsonSchemaType.STRING, + description: 'The type of knowledge base to use. Required.', + default: KNOWLEDGE_BASE_TYPES.KENDRA, + enum: SUPPORTED_KNOWLEDGE_BASE_TYPES + }, + NoDocsFoundResponse: { + type: JsonSchemaType.STRING, + description: 'Response text message to use when the knowledge base does not return any documents', + minLength: 1 + }, + KendraKnowledgeBaseParams: { + type: JsonSchemaType.OBJECT, + description: + 'Parameters specific to Kendra. Note on update we can only reference an existing Kendra index, creating a new one is not supported currently.', + properties: { + ExistingKendraIndexId: { + type: JsonSchemaType.STRING, + description: + 'Index ID of an existing Kendra index to be used for the use case. Required if KendraIndexName is not provided.', + pattern: '^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$' + }, + AttributeFilter: { + type: JsonSchemaType.OBJECT, + description: + 'Filter to apply when querying the Kendra index. See: https://docs.aws.amazon.com/kendra/latest/APIReference/API_AttributeFilter.html' + }, + RoleBasedAccessControlEnabled: { + type: JsonSchemaType.BOOLEAN, + description: + 'Whether role-based access control is enabled on the Kendra index, used to restrict Kendra queries to documents accessible by user group and id.', + default: DEFAULT_ENABLE_RBAC + } + }, + minProperties: 1, + additionalProperties: false + }, + BedrockKnowledgeBaseParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters specific to Bedrock Knowledge Bases', + properties: { + BedrockKnowledgeBaseId: { + type: JsonSchemaType.STRING, + description: + 'ID of the Bedrock knowledge base to use in a RAG use case. Required if KnowledgeBaseType is Bedrock.', + pattern: '^[0-9a-zA-Z]{1,10}$' + }, + RetrievalFilter: { + type: JsonSchemaType.OBJECT, + description: + 'Filter to apply when querying the Bedrock knowledge base. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_RetrievalFilter.html' + }, + + OverrideSearchType: { + type: JsonSchemaType.STRING, + description: + "If you're using an Amazon OpenSearch Serverless vector store that contains a filterable text field, you can specify whether to query the knowledge base with a HYBRID search using both vector embeddings and raw text, or SEMANTIC search using only vector embeddings. By default (if this is not provided), Amazon Bedrock will choose for you. For other vector store types, passing this parameter will result in a validation error during retrieval. For more information, see https://docs.aws.amazon.com/bedrock/latest/userguide/kb-test-config.html", + enum: ['HYBRID', 'SEMANTIC', 'NONE'], + default: 'NONE' + } + }, + additionalProperties: false + }, + NumberOfDocs: { + type: JsonSchemaType.INTEGER, + description: + 'The number of documents returned from the knowledge base which will be used as context to be sent to the LLM', + default: DEFAULT_KENDRA_NUMBER_OF_DOCS, + minimum: MIN_KENDRA_NUMBER_OF_DOCS, + maximum: MAX_KENDRA_NUMBER_OF_DOCS + }, + ScoreThreshold: { + type: JsonSchemaType.NUMBER, + description: 'The minimum score a document must have to be returned from the knowledge base', + default: DEFAULT_SCORE_THRESHOLD, + minimum: MIN_SCORE_THRESHOLD, + maximum: MAX_SCORE_THRESHOLD + }, + ReturnSourceDocs: { + type: JsonSchemaType.BOOLEAN, + description: + 'Whether to return information about the source of documents returned from the knowledge base', + default: DEFAULT_RETURN_SOURCE_DOCS + } + }, + oneOf: [ + // Case 1: Knowledge base type unchanged - no additional validation needed + { + properties: { + KnowledgeBaseType: { + not: {} + } + } + }, + // Case 2: Switching to Kendra - Bedrock params not allowed + { + properties: { + KnowledgeBaseType: { enum: [KNOWLEDGE_BASE_TYPES.KENDRA] }, + BedrockKnowledgeBaseParams: { + not: {} + } + }, + required: ['KendraKnowledgeBaseParams', 'KnowledgeBaseType'] + }, + // Case 3: Switching to Bedrock Knowledge Base - Kendra params not allowed + { + properties: { + KnowledgeBaseType: { enum: [KNOWLEDGE_BASE_TYPES.BEDROCK] }, + KendraKnowledgeBaseParams: { + not: {} + } + }, + required: ['BedrockKnowledgeBaseParams', 'KnowledgeBaseType'] + } + ], + additionalProperties: false +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/shared/llm-params.ts b/source/infrastructure/lib/api/model-schema/shared/llm-params.ts new file mode 100644 index 00000000..39d7eee3 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/shared/llm-params.ts @@ -0,0 +1,506 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; +import { + BEDROCK_INFERENCE_TYPES, + CHAT_PROVIDERS, + MODEL_PARAM_TYPES, + SUPPORTED_BEDROCK_INFERENCE_TYPES, + SUPPORTED_CHAT_PROVIDERS +} from '../../../utils/constants'; + +/** + * LLM parameter schemas for use case deployments and updates. + * Supports both Amazon Bedrock and SageMaker model providers with comprehensive configuration options. + */ +export const llmParamsSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: 'Parameters related to the LLM performing inferences.', + properties: { + ModelProvider: { + type: JsonSchemaType.STRING, + description: 'Name of the LLM provider which the use case will use', + enum: SUPPORTED_CHAT_PROVIDERS + }, + BedrockLlmParams: { + type: JsonSchemaType.OBJECT, + description: `Parameters specific to use cases using Bedrock as an LLM provider. Can only be provided when "ModelProvider" is ${CHAT_PROVIDERS.BEDROCK}`, + properties: { + ModelId: { + type: JsonSchemaType.STRING, + description: + 'Depending on whether ModelArn is provided, this will either be used to select the on-demand model to invoke or be used to specify the base model that the selected provisioned/custom model is based on.', + pattern: + '^([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$' + }, + ModelArn: { + type: JsonSchemaType.STRING, + description: + 'ARN of the provisioned/custom model to use from Amazon Bedrock. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModel.html#API_runtime_InvokeModel_RequestSyntax', + pattern: + '^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-:]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))$' + }, + InferenceProfileId: { + type: JsonSchemaType.STRING, + description: + 'The identifier of the Bedrock inference profile to use when invoking the model. When provided, a ModelId and ModelArn should not be provided. All inference requests will be mapped to the specified inference profile, which can be configured in the Bedrock console. This enables cross region model invocation. See: https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-use.html', + pattern: '^[a-zA-Z0-9-:.]+$' + }, + GuardrailIdentifier: { + type: JsonSchemaType.STRING, + description: + "The unique identifier of the Bedrock guardrail that you want to be applied to all LLM invocations. If you don't provide a value, no guardrail is applied to the invocation. If provided, you must also provide a GuardrailVersion. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModel.html#API_runtime_InvokeModel_RequestSyntax", + pattern: + '^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$' + }, + GuardrailVersion: { + type: JsonSchemaType.STRING, + description: + 'Version of the guardrail to be used. Must be provided if GuardrailIdentifier is provided. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModel.html#API_runtime_InvokeModel_RequestSyntax', + pattern: '^(([1-9][0-9]{0,7})|(DRAFT))$' + }, + BedrockInferenceType: { + type: JsonSchemaType.STRING, + description: 'The type of Bedrock inference to use. Required for Bedrock LLM params.', + default: BEDROCK_INFERENCE_TYPES.QUICK_START, + enum: SUPPORTED_BEDROCK_INFERENCE_TYPES + }, + }, + required: ['BedrockInferenceType'], + allOf: [ + // Validate required fields based on selected Bedrock inference type + { + oneOf: [ + { + properties: { + BedrockInferenceType: { enum: [BEDROCK_INFERENCE_TYPES.QUICK_START, BEDROCK_INFERENCE_TYPES.OTHER_FOUNDATION] }, + InferenceProfileId: { + not: {} + } + }, + required: ['ModelId'] + }, + { + properties: { + BedrockInferenceType: { enum: [BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILE] }, + ModelId: { + not: {} + } + }, + required: ['InferenceProfileId'] + }, + { + properties: { + BedrockInferenceType: { enum: [BEDROCK_INFERENCE_TYPES.PROVISIONED] }, + }, + required: ['ModelArn'] + } + ] + }, + // Guardrail parameters must be provided together or not at all + { + oneOf: [ + { + required: ['GuardrailIdentifier', 'GuardrailVersion'] + }, + { + properties: { + GuardrailIdentifier: { + not: {} + }, + GuardrailVersion: { + not: {} + } + } + } + ] + } + ], + additionalProperties: false + }, + SageMakerLlmParams: { + type: JsonSchemaType.OBJECT, + description: `Parameters specific to use cases using a SageMaker model as an LLM provider. Can only be provided when "ModelProvider" is ${CHAT_PROVIDERS.SAGEMAKER}`, + properties: { + EndpointName: { + type: JsonSchemaType.STRING, + description: 'Endpoint for the deployed model to use from SageMaker', + pattern: '^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$' + }, + ModelInputPayloadSchema: { + type: JsonSchemaType.OBJECT, + description: + 'An object defining the schema to be used to populate model params for SageMaker endpoint models' + }, + ModelOutputJSONPath: { + type: JsonSchemaType.STRING, + description: + 'JSON path where the response should be retrieved from the model output payload. Applicable only to SageMaker endpoints.', + pattern: '^\\$[\\w\\.\\,\\[\\]:\\\'\\"\\-\\(\\)\\*\\?\\@]*$' + } + }, + required: ['EndpointName', 'ModelInputPayloadSchema', 'ModelOutputJSONPath'], + additionalProperties: false + }, + ModelParams: { + type: JsonSchemaType.OBJECT, + description: + 'Additional model params to be passed to the model, whose keys are as defined in the LLM documentation', + additionalProperties: { + type: JsonSchemaType.OBJECT, + properties: { + Value: { + type: JsonSchemaType.STRING, + description: 'Value of the param' + }, + Type: { + type: JsonSchemaType.STRING, + enum: MODEL_PARAM_TYPES, + description: + 'Python type of the param, as a string. Will be cast to this type before being fed to LLM.' + } + }, + required: ['Value', 'Type'] + } + }, + PromptParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters related to the prompt(s) used by the use case', + properties: { + PromptTemplate: { + type: JsonSchemaType.STRING, + description: + 'Default prompt template which will be fed to the LLM, barring any overrides by users' + }, + UserPromptEditingEnabled: { + type: JsonSchemaType.BOOLEAN, + description: 'Whether to allow the user of the use case to edit their own prompt', + default: true + }, + MaxPromptTemplateLength: { + type: JsonSchemaType.INTEGER, + description: + 'Maximum length (in characters) of the system prompt template that a user can use in the use case', + minimum: 0 + }, + MaxInputTextLength: { + type: JsonSchemaType.INTEGER, + description: + 'Maximum length (in characters) of the input text that can be sent to the LLM.', + minimum: 1 + }, + RephraseQuestion: { + type: JsonSchemaType.BOOLEAN, + description: + 'Whether to use the disambiguated query instead of the original user input in the final prompt. Only appluies when using RAG.', + default: true + }, + DisambiguationPromptTemplate: { + type: JsonSchemaType.STRING, + description: + 'Prompt which will be internally used to disambiguate new queries in combination with the chat history. Only applies when using RAG.' + }, + DisambiguationEnabled: { + type: JsonSchemaType.BOOLEAN, + description: + 'Whether to perform disambiguation for the use case. Only applies when using RAG.', + default: true + } + }, + additionalProperties: false + }, + Temperature: { + type: JsonSchemaType.NUMBER, + description: + 'Temperature value which will be fed to the LLM. Scale should be chosen based on the supported range of the model provider.', + default: 0, + minimum: 0, + maximum: 100 + }, + Streaming: { + type: JsonSchemaType.BOOLEAN, + description: + 'Whether to stream the LLM responses back to the user or not. Note some providers do not support streaming.' + }, + RAGEnabled: { + type: JsonSchemaType.BOOLEAN, + description: + 'If true, the use case will reference a knowledge base when responding to the user. Otherwise provides chat with the LLM directly.', + default: true + }, + Verbose: { + type: JsonSchemaType.BOOLEAN, + description: 'Whether to print out debug messages to the console', + default: false + }, + MultimodalParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters for the multimodal capability for the LLM.', + properties: { + MultimodalEnabled: { + type: JsonSchemaType.BOOLEAN, + description: 'Allow the multimodal input capability for the LLM.', + default: false + } + }, + required: ['MultimodalEnabled'], + additionalProperties: false + } + }, + oneOf: [ + { + properties: { + ModelProvider: { enum: [CHAT_PROVIDERS.BEDROCK] } + }, + required: ['BedrockLlmParams'] + }, + { + properties: { + ModelProvider: { enum: [CHAT_PROVIDERS.SAGEMAKER] } + }, + required: ['SageMakerLlmParams'] + } + ], + additionalProperties: false +}; + +// Schema for LLM parameters when updating an existing use case (allows partial updates) +export const llmParamsUpdateSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + properties: { + ModelProvider: { + type: JsonSchemaType.STRING, + description: 'Name of the LLM provider which the use case will use', + enum: SUPPORTED_CHAT_PROVIDERS + }, + BedrockLlmParams: { + type: JsonSchemaType.OBJECT, + description: `Parameters specific to use cases using Bedrock as an LLM provider. Can only be provided when "ModelProvider" is ${CHAT_PROVIDERS.BEDROCK}`, + properties: { + ModelId: { + type: JsonSchemaType.STRING, + description: + 'Depending on whether ModelArn is provided, this will either be used to select the on-demand model to invoke or be used to specify the base model that the selected provisioned/custom model is based on.', + pattern: + '^([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$' + }, + ModelArn: { + type: JsonSchemaType.STRING, + description: + 'ARN of the provisioned/custom model to use from Amazon Bedrock. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModel.html#API_runtime_InvokeModel_RequestSyntax', + pattern: + '^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-:]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))$' + }, + InferenceProfileId: { + type: JsonSchemaType.STRING, + description: + 'The identifier of the Bedrock inference profile to use when invoking the model. When provided, a ModelId and ModelArn should not be provided. All inference requests will be mapped to the specified inference profile, which can be configured in the Bedrock console. This enables cross region model invocation. See: https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-use.html', + pattern: '^[a-zA-Z0-9-:.]+$' + }, + GuardrailIdentifier: { + type: [JsonSchemaType.STRING, JsonSchemaType.NULL], + description: + "The unique identifier of the Bedrock guardrail that you want to be applied to all LLM invocations. If you don't provide a value, no guardrail is applied to the invocation. If provided, you must also provide a GuardrailVersion. To remove a guardrail set this value to 'null'. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModel.html#API_runtime_InvokeModel_RequestSyntax.", + pattern: + '^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$' + }, + GuardrailVersion: { + type: [JsonSchemaType.STRING, JsonSchemaType.NULL], + description: + 'Version of the guardrail to be used. Must be provided if GuardrailIdentifier is provided. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModel.html#API_runtime_InvokeModel_RequestSyntax', + pattern: '^(([1-9][0-9]{0,7})|(DRAFT))$' + }, + BedrockInferenceType: { + type: JsonSchemaType.STRING, + description: 'The type of Bedrock inference to use. Required for Bedrock LLM params.', + default: BEDROCK_INFERENCE_TYPES.QUICK_START, + enum: SUPPORTED_BEDROCK_INFERENCE_TYPES + } + }, + required: ['BedrockInferenceType'], + // Validate model selection based on inference type (allows partial updates) + oneOf: [ + { + required: ['ModelId'], + properties: { + BedrockInferenceType: { enum: [BEDROCK_INFERENCE_TYPES.QUICK_START, BEDROCK_INFERENCE_TYPES.OTHER_FOUNDATION] }, + InferenceProfileId: { + not: {} + } + } + }, + { + required: ['InferenceProfileId'], + properties: { + BedrockInferenceType: { enum: [BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILE] }, + ModelId: { + not: {} + } + } + }, + { + properties: { + BedrockInferenceType: { enum: [BEDROCK_INFERENCE_TYPES.PROVISIONED] }, + }, + required: ['ModelArn'] + }, + { + properties: { + ModelId: { + not: {} + }, + InferenceProfileId: { + not: {} + }, + ModelArn: { + not: {} + } + } + } + ], + additionalProperties: false + }, + SageMakerLlmParams: { + type: JsonSchemaType.OBJECT, + description: `Parameters specific to use cases using a SageMaker model as an LLM provider. Can only be provided when "ModelProvider" is ${CHAT_PROVIDERS.SAGEMAKER}`, + properties: { + EndpointName: { + type: JsonSchemaType.STRING, + description: 'Endpoint for the deployed model to use from SageMaker', + pattern: '^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$' + }, + ModelInputPayloadSchema: { + type: JsonSchemaType.OBJECT, + description: + 'An object defining the schema to be used to populate model params for SageMaker endpoint models' + }, + ModelOutputJSONPath: { + type: JsonSchemaType.STRING, + description: + 'JSON path where the response should be retrieved from the model output payload. Applicable only to SageMaker endpoints.', + pattern: '^\\$[\\w\\.\\,\\[\\]:\\\'\\"\\-\\(\\)\\*\\?\\@]*$' + } + }, + additionalProperties: false + }, + ModelParams: { + type: JsonSchemaType.OBJECT, + description: + 'Additional model params to be passed to the model, whose keys are as defined in the LLM documentation', + additionalProperties: { + type: JsonSchemaType.OBJECT, + properties: { + Value: { + type: JsonSchemaType.STRING, + description: 'Value of the param' + }, + Type: { + type: JsonSchemaType.STRING, + enum: MODEL_PARAM_TYPES, + description: + 'Python type of the param, as a string. Will be cast to this type before being fed to LLM.' + } + }, + required: ['Value', 'Type'] + } + }, + PromptParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters related to the prompt(s) used by the use case', + properties: { + PromptTemplate: { + type: JsonSchemaType.STRING, + description: + 'Default prompt template which will be fed to the LLM, barring any overrides by users' + }, + UserPromptEditingEnabled: { + type: JsonSchemaType.BOOLEAN, + description: 'Whether to allow the user of the use case to edit their own prompt' + }, + MaxPromptTemplateLength: { + type: JsonSchemaType.INTEGER, + description: + 'Maximum length (in characters) of the prompt template that a user can use in the use case' + }, + MaxInputTextLength: { + type: JsonSchemaType.INTEGER, + description: 'Maximum length (in characters) of the input text that can be sent to the LLM.' + }, + RephraseQuestion: { + type: JsonSchemaType.BOOLEAN, + description: + 'Whether to use the disambiguated query instead of the original user input in the final prompt. Only appluies when using RAG.', + default: true + }, + DisambiguationPromptTemplate: { + type: JsonSchemaType.STRING, + description: + 'Prompt which will be internally used to disambiguate new queries in combination with the chat history. Only applies when using RAG.' + }, + DisambiguationEnabled: { + type: JsonSchemaType.BOOLEAN, + description: + 'Whether to disable disambiguation for the use case. Only applies when using RAG.' + } + }, + additionalProperties: false + }, + Temperature: { + type: JsonSchemaType.NUMBER, + description: + 'Temperature value which will be fed to the LLM. Scale should be chosen based on the supported range of the model provider.', + default: 0, + minimum: 0, + maximum: 100 + }, + + Streaming: { + type: JsonSchemaType.BOOLEAN, + description: + 'Whether to stream the LLM responses back to the user or not. Note some providers do not support streaming.' + }, + RAGEnabled: { + type: JsonSchemaType.BOOLEAN, + description: + 'If true, the use case will reference a knowledge base when responding to the user. Otherwise provides chat with the LLM directly.' + }, + Verbose: { + type: JsonSchemaType.BOOLEAN, + description: 'Whether to print out debug messages to the console' + }, + MultimodalParams: { + type: JsonSchemaType.OBJECT, + description: 'Parameters for the multimodal capability for the LLM.', + properties: { + MultimodalEnabled: { + type: JsonSchemaType.BOOLEAN, + description: 'Allow the multimodal input capability for the LLM.', + default: false + } + }, + required: ['MultimodalEnabled'], + additionalProperties: false + } + }, + anyOf: [ + { + properties: { + ModelProvider: { enum: [CHAT_PROVIDERS.BEDROCK] } + }, + required: ['BedrockLlmParams'] + }, + { + properties: { + ModelProvider: { enum: [CHAT_PROVIDERS.SAGEMAKER] } + }, + required: ['SageMakerLlmParams'] + }, + { + properties: { + ModelProvider: { not: {} } + } + } + ], + additionalProperties: false +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/shared/vpc-params.ts b/source/infrastructure/lib/api/model-schema/shared/vpc-params.ts new file mode 100644 index 00000000..c7efdd48 --- /dev/null +++ b/source/infrastructure/lib/api/model-schema/shared/vpc-params.ts @@ -0,0 +1,147 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { JsonSchema, JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; + +/** + * VPC parameter schemas for use case deployment and updates. + * Handles both new VPC creation and existing VPC configuration scenarios. + */ +export const vpcParamsCreateSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: + 'Parameters for the use case VPC. VPC can be either created for you, or provided by the user depending on the parameters provided.', + properties: { + VpcEnabled: { + type: JsonSchemaType.BOOLEAN, + description: 'Should the use case stacks resources be deployed within a VPC', + default: false + }, + CreateNewVpc: { + type: JsonSchemaType.BOOLEAN, + description: 'If true, a new VPC will be created for the use case.', + default: false + }, + ExistingVpcId: { + type: JsonSchemaType.STRING, + description: + 'If VpcEnabled is true and CreateNewVpc is false, the use case will be deployed within the specified VPC.', + pattern: '^vpc-\\w{8}(\\w{9})?$' + }, + ExistingPrivateSubnetIds: { + type: JsonSchemaType.ARRAY, + description: + 'If VpcEnabled is true and CreateNewVpc is false, the use case will be deployed using the specified subnets.', + items: { + type: JsonSchemaType.STRING, + pattern: '^subnet-\\w{8}(\\w{9})?$' + }, + maxItems: 16, + uniqueItems: true + }, + ExistingSecurityGroupIds: { + type: JsonSchemaType.ARRAY, + description: + 'If VpcEnabled is true and CreateNewVpc is false, the use case will be deployed using the specified security groups.', + items: { + type: JsonSchemaType.STRING, + pattern: '^sg-\\w{8}(\\w{9})?$' + }, + maxItems: 5, + uniqueItems: true + } + }, + oneOf: [ + // Case 1: Using an existing VPC - requires VPC ID, subnets, and security groups + { + properties: { + VpcEnabled: { + type: JsonSchemaType.BOOLEAN, + enum: [true] + }, + CreateNewVpc: { + type: JsonSchemaType.BOOLEAN, + enum: [false] + } + }, + required: ['ExistingVpcId', 'ExistingPrivateSubnetIds', 'ExistingSecurityGroupIds'] + }, + // Case 2: Creating a new VPC - existing VPC resource info not allowed + { + properties: { + VpcEnabled: { + type: JsonSchemaType.BOOLEAN, + enum: [true] + }, + CreateNewVpc: { + type: JsonSchemaType.BOOLEAN, + enum: [true] + }, + ExistingVpcId: { + not: {} + }, + ExistingPrivateSubnetIds: { + not: {} + }, + ExistingSecurityGroupIds: { + not: {} + } + } + }, + // Case 3: VPC disabled - no VPC-related parameters allowed + { + properties: { + VpcEnabled: { + type: JsonSchemaType.BOOLEAN, + enum: [false] + }, + CreateNewVpc: { + not: {} + }, + ExistingVpcId: { + not: {} + }, + ExistingPrivateSubnetIds: { + not: {} + }, + ExistingSecurityGroupIds: { + not: {} + } + } + } + ], + required: ['VpcEnabled'], + additionalProperties: false +}; + +// Schema for VPC parameters when updating an existing use case (limited to subnets and security groups) +export const vpcParamsUpdateSchema: JsonSchema = { + type: JsonSchemaType.OBJECT, + description: + 'Parameters for the use case VPC. Note on updates it is only possible to change private subnets and security groups. It is not possible to switch the VPC a use case is deployed in, or move an existing non-VPC use case into a VPC.', + properties: { + ExistingPrivateSubnetIds: { + type: JsonSchemaType.ARRAY, + items: { + type: JsonSchemaType.STRING, + pattern: '^subnet-\\w{8}(\\w{9})?$' + }, + maxItems: 16, + uniqueItems: true, + description: + 'If VpcEnabled is true and CreateNewVpc is false, the use case will be deployed using the specified subnets.' + }, + ExistingSecurityGroupIds: { + type: JsonSchemaType.ARRAY, + items: { + type: JsonSchemaType.STRING, + pattern: '^sg-\\w{8}(\\w{9})?$' + }, + maxItems: 5, + uniqueItems: true, + description: + 'If VpcEnabled is true and CreateNewVpc is false, the use case will be deployed using the specified security groups.' + } + }, + additionalProperties: false +}; \ No newline at end of file diff --git a/source/infrastructure/lib/api/model-schema/update-usecase-body.ts b/source/infrastructure/lib/api/model-schema/update-usecase-body.ts deleted file mode 100644 index d2408d2d..00000000 --- a/source/infrastructure/lib/api/model-schema/update-usecase-body.ts +++ /dev/null @@ -1,578 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -import { JsonSchema, JsonSchemaType, JsonSchemaVersion } from 'aws-cdk-lib/aws-apigateway'; -import { - AUTHENTICATION_PROVIDERS, - BEDROCK_INFERENCE_TYPES, - CHAT_PROVIDERS, - DEFAULT_CONVERSATION_MEMORY_TYPE, - DEFAULT_ENABLE_RBAC, - DEFAULT_KENDRA_NUMBER_OF_DOCS, - DEFAULT_RETURN_SOURCE_DOCS, - DEFAULT_SCORE_THRESHOLD, - KNOWLEDGE_BASE_TYPES, - MAX_KENDRA_NUMBER_OF_DOCS, - MAX_SCORE_THRESHOLD, - MIN_KENDRA_NUMBER_OF_DOCS, - MIN_SCORE_THRESHOLD, - MODEL_PARAM_TYPES, - SUPPORTED_AGENT_TYPES, - SUPPORTED_AUTHENTICATION_PROVIDERS, - SUPPORTED_BEDROCK_INFERENCE_TYPES, - SUPPORTED_CHAT_PROVIDERS, - SUPPORTED_CONVERSATION_MEMORY_TYPES, - SUPPORTED_KNOWLEDGE_BASE_TYPES, - USE_CASE_TYPES -} from '../../utils/constants'; - -export const updateUseCaseBodySchema: JsonSchema = { - schema: JsonSchemaVersion.DRAFT7, - type: JsonSchemaType.OBJECT, - properties: { - UseCaseType: { - type: JsonSchemaType.STRING, - description: 'Type of the use case to be deployed. Either "Text" or "Agent".', - enum: [USE_CASE_TYPES.TEXT, USE_CASE_TYPES.AGENT] - }, - UseCaseDescription: { - type: JsonSchemaType.STRING, - description: 'Description of the use case to be deployed. For display purposes' - }, - DefaultUserEmail: { - type: JsonSchemaType.STRING, - description: 'Email address of the user who will be created with permissions to use the deployed use-case', - format: 'email' - }, - DeployUI: { - type: JsonSchemaType.BOOLEAN, - description: 'Deploy the CloudFront based UI for the use case', - default: true - }, - FeedbackParams: { - type: JsonSchemaType.OBJECT, - description: 'Parameters for the feedback capability for the use case.', - properties: { - FeedbackEnabled: { - type: JsonSchemaType.BOOLEAN, - description: 'Allow the feedback capability for the use case.', - default: false - } - }, - required: ['FeedbackEnabled'], - additionalProperties: false - }, - ExistingRestApiId: { - type: JsonSchemaType.STRING, - description: 'Rest API ID which will be used to invoke UseCaseDetails (and Feedback, if enabled).' - }, - VpcParams: { - type: JsonSchemaType.OBJECT, - description: - 'Parameters for the use case VPC. Note on updates it is only possible to change private subnets and security groups. It is not possible to switch the VPC a use case is deployed in, or move an existing non-VPC use case into a VPC.', - properties: { - ExistingPrivateSubnetIds: { - type: JsonSchemaType.ARRAY, - items: { - type: JsonSchemaType.STRING, - pattern: '^subnet-\\w{8}(\\w{9})?$' - }, - maxItems: 16, - uniqueItems: true, - description: - 'If VpcEnabled is true and CreateNewVpc is false, the use case will be deployed using the specified subnets.' - }, - ExistingSecurityGroupIds: { - type: JsonSchemaType.ARRAY, - items: { - type: JsonSchemaType.STRING, - pattern: '^sg-\\w{8}(\\w{9})?$' - }, - maxItems: 5, - uniqueItems: true, - description: - 'If VpcEnabled is true and CreateNewVpc is false, the use case will be deployed using the specified security groups.' - } - }, - additionalProperties: false - }, - ConversationMemoryParams: { - type: JsonSchemaType.OBJECT, - description: 'Parameters related to storing and using the chat history', - properties: { - ConversationMemoryType: { - type: JsonSchemaType.STRING, - default: DEFAULT_CONVERSATION_MEMORY_TYPE, - enum: SUPPORTED_CONVERSATION_MEMORY_TYPES - }, - HumanPrefix: { - type: JsonSchemaType.STRING, - description: 'Prefix used in the history when storing messages sent by the user' - }, - AiPrefix: { - type: JsonSchemaType.STRING, - description: 'Prefix used in the history when storing responses from the LLM' - }, - ChatHistoryLength: { - type: JsonSchemaType.INTEGER, - description: 'Number of messages to store in the history', - minimum: 0 - } - }, - additionalProperties: false - }, - KnowledgeBaseParams: { - type: JsonSchemaType.OBJECT, - description: - 'Parameters related to the knowledge base. Based on KnowledgeBaseType, different nested parameters are required.', - properties: { - KnowledgeBaseType: { - type: JsonSchemaType.STRING, - description: 'The type of knowledge base to use. Required.', - default: KNOWLEDGE_BASE_TYPES.KENDRA, - enum: SUPPORTED_KNOWLEDGE_BASE_TYPES - }, - NoDocsFoundResponse: { - type: JsonSchemaType.STRING, - description: 'Response text message to use when the knowledge base does not return any documents', - minLength: 1 - }, - KendraKnowledgeBaseParams: { - type: JsonSchemaType.OBJECT, - description: - 'Parameters specific to Kendra. Note on update we can only reference an existing Kendra index, creating a new one is not supported currently.', - properties: { - ExistingKendraIndexId: { - type: JsonSchemaType.STRING, - description: - 'Index ID of an existing Kendra index to be used for the use case. Required if KendraIndexName is not provided.', - pattern: '^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$' - }, - AttributeFilter: { - type: JsonSchemaType.OBJECT, - description: - 'Filter to apply when querying the Kendra index. See: https://docs.aws.amazon.com/kendra/latest/APIReference/API_AttributeFilter.html' - }, - RoleBasedAccessControlEnabled: { - type: JsonSchemaType.BOOLEAN, - description: - 'Whether role-based access control is enabled on the Kendra index, used to restrict Kendra queries to documents accessible by user group and id.', - default: DEFAULT_ENABLE_RBAC - } - }, - minProperties: 1, - additionalProperties: false - }, - BedrockKnowledgeBaseParams: { - type: JsonSchemaType.OBJECT, - description: 'Parameters specific to Bedrock Knowledge Bases', - properties: { - BedrockKnowledgeBaseId: { - type: JsonSchemaType.STRING, - description: - 'ID of the Bedrock knowledge base to use in a RAG use case. Required if KnowledgeBaseType is Bedrock.', - pattern: '^[0-9a-zA-Z]{1,10}$' - }, - RetrievalFilter: { - type: JsonSchemaType.OBJECT, - description: - 'Filter to apply when querying the Bedrock knowledge base. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_RetrievalFilter.html' - }, - - OverrideSearchType: { - type: JsonSchemaType.STRING, - description: - "If you're using an Amazon OpenSearch Serverless vector store that contains a filterable text field, you can specify whether to query the knowledge base with a HYBRID search using both vector embeddings and raw text, or SEMANTIC search using only vector embeddings. By default (if this is not provided), Amazon Bedrock will choose for you. For other vector store types, passing this parameter will result in a validation error during retrieval. For more information, see https://docs.aws.amazon.com/bedrock/latest/userguide/kb-test-config.html", - enum: ['HYBRID', 'SEMANTIC', 'NONE'], - default: 'NONE' - } - }, - additionalProperties: false - }, - NumberOfDocs: { - type: JsonSchemaType.INTEGER, - description: - 'The number of documents returned from the knowledge base which will be used as context to be sent to the LLM', - default: DEFAULT_KENDRA_NUMBER_OF_DOCS, - minimum: MIN_KENDRA_NUMBER_OF_DOCS, - maximum: MAX_KENDRA_NUMBER_OF_DOCS - }, - ScoreThreshold: { - type: JsonSchemaType.NUMBER, - description: 'The minimum score a document must have to be returned from the knowledge base', - default: DEFAULT_SCORE_THRESHOLD, - minimum: MIN_SCORE_THRESHOLD, - maximum: MAX_SCORE_THRESHOLD - }, - ReturnSourceDocs: { - type: JsonSchemaType.BOOLEAN, - description: - 'Whether to return information about the source of documents returned from the knowledge base', - default: DEFAULT_RETURN_SOURCE_DOCS - } - }, - oneOf: [ - // case where the knowledge base type is not changed requires no additional conditions. - { - properties: { - KnowledgeBaseType: { - not: {} - } - } - }, - { - properties: { - KnowledgeBaseType: { enum: [KNOWLEDGE_BASE_TYPES.KENDRA] }, - // no bedrock knowledge base params allowed when switching to kendra - BedrockKnowledgeBaseParams: { - not: {} - } - }, - required: ['KendraKnowledgeBaseParams', 'KnowledgeBaseType'] - }, - { - properties: { - KnowledgeBaseType: { enum: [KNOWLEDGE_BASE_TYPES.BEDROCK] }, - // no kendra params allowed when switching to bedrock knowledge base - KendraKnowledgeBaseParams: { - not: {} - } - }, - required: ['BedrockKnowledgeBaseParams', 'KnowledgeBaseType'] - } - ], - additionalProperties: false - }, - AuthenticationParams: { - type: JsonSchemaType.OBJECT, - description: 'Parameters related to the Authentication.', - properties: { - AuthenticationProvider: { - type: JsonSchemaType.STRING, - description: 'Supported authentication provider.', - enum: SUPPORTED_AUTHENTICATION_PROVIDERS - }, - CognitoParams: { - type: JsonSchemaType.OBJECT, - description: 'Cognito user pool related parameters.', - properties: { - ExistingUserPoolId: { - type: JsonSchemaType.STRING, - description: 'Existing Cognito User Pool Id.', - pattern: '^[\\w-]+_[0-9a-zA-Z]+$', - minLength: 1, - maxLength: 55 - }, - ExistingUserPoolClientId: { - type: JsonSchemaType.STRING, - description: 'Existing Cognito User Pool Client Id.', - pattern: '^[\\w+]+$', - minLength: 1, - maxLength: 128 - } - }, - required: ['ExistingUserPoolId'] - } - }, - anyOf: [ - { - properties: { - AuthenticationProvider: { enum: [AUTHENTICATION_PROVIDERS.COGNITO] } - }, - required: ['CognitoParams'] - } - ], - required: ['AuthenticationProvider'] - }, - LlmParams: { - type: JsonSchemaType.OBJECT, - properties: { - ModelProvider: { - type: JsonSchemaType.STRING, - description: 'Name of the LLM provider which the use case will use', - enum: SUPPORTED_CHAT_PROVIDERS - }, - BedrockLlmParams: { - type: JsonSchemaType.OBJECT, - description: `Parameters specific to use cases using Bedrock as an LLM provider. Can only be provided when "ModelProvider" is ${CHAT_PROVIDERS.BEDROCK}`, - properties: { - ModelId: { - type: JsonSchemaType.STRING, - description: - 'Depending on whether ModelArn is provided, this will either be used to select the on-demand model to invoke or be used to specify the base model that the selected provisioned/custom model is based on.', - pattern: - '^([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$' - }, - ModelArn: { - type: JsonSchemaType.STRING, - description: - 'ARN of the provisioned/custom model to use from Amazon Bedrock. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModel.html#API_runtime_InvokeModel_RequestSyntax', - pattern: - '^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-:]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))$' - }, - InferenceProfileId: { - type: JsonSchemaType.STRING, - description: - 'The identifier of the Bedrock inference profile to use when invoking the model. When provided, a ModelId and ModelArn should not be provided. All inference requests will be mapped to the specified inference profile, which can be configured in the Bedrock console. This enables cross region model invocation. See: https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-use.html', - pattern: '^[a-zA-Z0-9-:.]+$' - }, - GuardrailIdentifier: { - type: [JsonSchemaType.STRING, JsonSchemaType.NULL], - description: - "The unique identifier of the Bedrock guardrail that you want to be applied to all LLM invocations. If you don't provide a value, no guardrail is applied to the invocation. If provided, you must also provide a GuardrailVersion. To remove a guardrail set this value to 'null'. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModel.html#API_runtime_InvokeModel_RequestSyntax.", - pattern: - '^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$' - }, - GuardrailVersion: { - type: [JsonSchemaType.STRING, JsonSchemaType.NULL], - description: - 'Version of the guardrail to be used. Must be provided if GuardrailIdentifier is provided. See: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_InvokeModel.html#API_runtime_InvokeModel_RequestSyntax', - pattern: '^(([1-9][0-9]{0,7})|(DRAFT))$' - }, - BedrockInferenceType: { - type: JsonSchemaType.STRING, - description: 'The type of Bedrock inference to use. Required for Bedrock LLM params.', - default: BEDROCK_INFERENCE_TYPES.QUICK_START, - enum: SUPPORTED_BEDROCK_INFERENCE_TYPES - } - }, - required: ['BedrockInferenceType'], - // either provide ModelId or InferenceProfileId or neither - oneOf: [ - { - required: ['ModelId'], - properties: { - BedrockInferenceType: { enum: [BEDROCK_INFERENCE_TYPES.QUICK_START, BEDROCK_INFERENCE_TYPES.OTHER_FOUNDATION] }, - InferenceProfileId: { - not: {} - } - } - }, - { - required: ['InferenceProfileId'], - properties: { - BedrockInferenceType: { enum: [BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILE] }, - ModelId: { - not: {} - } - } - }, - { - properties: { - BedrockInferenceType: { enum: [BEDROCK_INFERENCE_TYPES.PROVISIONED] }, - }, - required: ['ModelArn'] - }, - { - properties: { - ModelId: { - not: {} - }, - InferenceProfileId: { - not: {} - }, - ModelArn: { - not: {} - } - } - } - ], - additionalProperties: false - }, - SageMakerLlmParams: { - type: JsonSchemaType.OBJECT, - description: `Parameters specific to use cases using a SageMaker model as an LLM provider. Can only be provided when "ModelProvider" is ${CHAT_PROVIDERS.SAGEMAKER}`, - properties: { - EndpointName: { - type: JsonSchemaType.STRING, - description: 'Endpoint for the deployed model to use from SageMaker', - pattern: '^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$' - }, - ModelInputPayloadSchema: { - type: JsonSchemaType.OBJECT, - description: - 'An object defining the schema to be used to populate model params for SageMaker endpoint models' - }, - ModelOutputJSONPath: { - type: JsonSchemaType.STRING, - description: - 'JSON path where the response should be retrieved from the model output payload. Applicable only to SageMaker endpoints.', - pattern: '^\\$[\\w\\.\\,\\[\\]:\\\'\\"\\-\\(\\)\\*\\?\\@]*$' - } - }, - additionalProperties: false - }, - ModelParams: { - type: JsonSchemaType.OBJECT, - description: - 'Additional model params to be passed to the model, whose keys are as defined in the LLM documentation', - additionalProperties: { - type: JsonSchemaType.OBJECT, - properties: { - Value: { - type: JsonSchemaType.STRING, - description: 'Value of the param' - }, - Type: { - type: JsonSchemaType.STRING, - enum: MODEL_PARAM_TYPES, - description: - 'Python type of the param, as a string. Will be cast to this type before being fed to LLM.' - } - }, - required: ['Value', 'Type'] - } - }, - PromptParams: { - type: JsonSchemaType.OBJECT, - description: 'Parameters related to the prompt(s) used by the use case', - properties: { - PromptTemplate: { - type: JsonSchemaType.STRING, - description: - 'Default prompt template which will be fed to the LLM, barring any overrides by users' - }, - UserPromptEditingEnabled: { - type: JsonSchemaType.BOOLEAN, - description: 'Whether to allow the user of the use case to edit their own prompt' - }, - MaxPromptTemplateLength: { - type: JsonSchemaType.INTEGER, - description: - 'Maximum length (in characters) of the prompt template that a user can use in the use case' - }, - MaxInputTextLength: { - type: JsonSchemaType.INTEGER, - description: 'Maximum length (in characters) of the input text that can be sent to the LLM.' - }, - RephraseQuestion: { - type: JsonSchemaType.BOOLEAN, - description: - 'Whether to use the disambiguated query instead of the original user input in the final prompt. Only appluies when using RAG.', - default: true - }, - DisambiguationPromptTemplate: { - type: JsonSchemaType.STRING, - description: - 'Prompt which will be internally used to disambiguate new queries in combination with the chat history. Only applies when using RAG.' - }, - DisambiguationEnabled: { - type: JsonSchemaType.BOOLEAN, - description: - 'Whether to disable disambiguation for the use case. Only applies when using RAG.' - } - }, - additionalProperties: false - }, - Temperature: { - type: JsonSchemaType.NUMBER, - description: - 'Temperature value which will be fed to the LLM. Scale should be chosen based on the supported range of the model provider.', - default: 0, - minimum: 0, - maximum: 100 - }, - - Streaming: { - type: JsonSchemaType.BOOLEAN, - description: - 'Whether to stream the LLM responses back to the user or not. Note some providers do not support streaming.' - }, - RAGEnabled: { - type: JsonSchemaType.BOOLEAN, - description: - 'If true, the use case will reference a knowledge base when responding to the user. Otherwise provides chat with the LLM directly.' - }, - Verbose: { - type: JsonSchemaType.BOOLEAN, - description: 'Whether to print out debug messages to the console' - } - }, - anyOf: [ - { - properties: { - ModelProvider: { enum: [CHAT_PROVIDERS.BEDROCK] } - }, - required: ['BedrockLlmParams'] - }, - { - properties: { - ModelProvider: { enum: [CHAT_PROVIDERS.SAGEMAKER] } - }, - required: ['SageMakerLlmParams'] - }, - { - properties: { - ModelProvider: { not: {} } - } - } - ], - additionalProperties: false - }, - AgentParams: { - type: JsonSchemaType.OBJECT, - description: 'Parameters for Bedrock agent invocation workflow.', - properties: { - AgentType: { - type: JsonSchemaType.STRING, - description: 'The type of agent to use. Required.', - enum: SUPPORTED_AGENT_TYPES - }, - BedrockAgentParams: { - type: JsonSchemaType.OBJECT, - properties: { - AgentId: { - type: JsonSchemaType.STRING, - description: 'ID of the Bedrock agent to be invoked.', - pattern: '^[0-9a-zA-Z]+$', - maxLength: 10 - }, - AgentAliasId: { - type: JsonSchemaType.STRING, - description: 'Alias ID of the Bedrock agent to be invoked.', - pattern: '^[0-9a-zA-Z]+$', - maxLength: 10 - }, - EnableTrace: { - type: JsonSchemaType.BOOLEAN, - description: 'Whether to enable tracing for the agent invocation.', - default: false - } - }, - required: ['AgentId', 'AgentAliasId'], - additionalProperties: false - } - }, - additionalProperties: false - } - }, - // on update we require at least one of these to be present so an actual update should take place - anyOf: [ - { - required: ['UseCaseDescription'] - }, - { - required: ['DefaultUserEmail'] - }, - { - required: ['VpcParams'] - }, - { - required: ['ConversationMemoryParams'] - }, - { - required: ['KnowledgeBaseParams'] - }, - { - required: ['LlmParams'] - }, - { - required: ['AgentParams'] - }, - { - required: ['AuthenticationParams'] - } - ], - required: ['UseCaseType'], - additionalProperties: false -}; diff --git a/source/infrastructure/lib/api/rest-request-processor.ts b/source/infrastructure/lib/api/rest-request-processor.ts index c0280b08..35dcf8fd 100644 --- a/source/infrastructure/lib/api/rest-request-processor.ts +++ b/source/infrastructure/lib/api/rest-request-processor.ts @@ -34,6 +34,21 @@ export interface RestRequestProcessorProps extends RequestProcessorProps { */ modelInfoAPILambda: lambda.Function; + /** + * The function to back the MCP management API + */ + mcpManagementAPILambda: lambda.Function; + + /** + * The function to back the agent management API + */ + agentManagementAPILambda: lambda.Function; + + /** + * The function to back the workflow management API + */ + workflowManagementAPILambda: lambda.Function; + /** * The ARN of the Lambda function to use for custom resource implementation. */ @@ -149,6 +164,9 @@ export class RestRequestProcessor extends RequestProcessor { this.deploymentRestEndpoint = new DeploymentPlatformRestEndpoint(this, 'DeploymentRestEndpoint', { useCaseManagementAPILambda: props.useCaseManagementAPILambda, modelInfoApiLambda: props.modelInfoAPILambda, + mcpManagementAPILambda: props.mcpManagementAPILambda, + agentManagementAPILambda: props.agentManagementAPILambda, + workflowManagementAPILambda: props.workflowManagementAPILambda, deploymentPlatformAuthorizer: this.requestAuthorizer }); diff --git a/source/infrastructure/lib/api/use-case-rest-endpoint-setup.ts b/source/infrastructure/lib/api/use-case-rest-endpoint-setup.ts index 11c30ec1..38bf26ad 100644 --- a/source/infrastructure/lib/api/use-case-rest-endpoint-setup.ts +++ b/source/infrastructure/lib/api/use-case-rest-endpoint-setup.ts @@ -145,6 +145,16 @@ export class UseCaseRestEndpointSetup extends BaseRestEndpoint { */ public readonly methodOptions: api.MethodOptions; + /** + * The request authorizer for the API + */ + public readonly authorizer: api.IAuthorizer; + + /** + * The request validator for the API + */ + public readonly requestValidator: api.IRequestValidator; + /** * The method used to get the details from our rest API */ @@ -248,15 +258,18 @@ export class UseCaseRestEndpointSetup extends BaseRestEndpoint { ); this.restApi = this.setRestApi(props); - const requestValidator = this.setRequestValidator(props); + this.requestValidator = this.setRequestValidator(props); const authorizerId = this.setRequestAuthorizer(props); + // Create the authorizer object for direct access + this.authorizer = { + authorizerId: authorizerId, + authorizationType: api.AuthorizationType.CUSTOM + } as api.RequestAuthorizer; + this.methodOptions = { - authorizer: { - authorizerId: authorizerId, - authorizationType: api.AuthorizationType.CUSTOM - } as api.RequestAuthorizer, - requestValidator: requestValidator + authorizer: this.authorizer, + requestValidator: this.requestValidator } as api.MethodOptions; this.detailsGETMethod = this.createUseCaseDetailsApi(createApiRoutesCondition); diff --git a/source/infrastructure/lib/api/websocket-endpoint.ts b/source/infrastructure/lib/api/websocket-endpoint.ts index 0181ca16..171b8d94 100644 --- a/source/infrastructure/lib/api/websocket-endpoint.ts +++ b/source/infrastructure/lib/api/websocket-endpoint.ts @@ -40,7 +40,12 @@ export interface WebSocketProps { /** * Lambda mapping with route action */ - lambdaRouteMapping: Map; + lambdaRouteMapping: Map; + + /** + * The underlying lambda function for environment variable configuration + */ + chatLlmProviderLambda: lambda.Function; /** * ID of the use case, used to create an easily readable API name. @@ -106,15 +111,27 @@ export class WebSocketEndpoint extends Construct { this.apiGatewayRole = apiGatewayV2WebSocketToSqs.apiGatewayRole; // this section of the code only creates sqs-lambda configuration for the first default route. - const lambda = props.lambdaRouteMapping.get(firstRouteKey!)!; //NOSONAR - typescript:S4325 - not null assertion required - lambda.addEnvironment('WEBSOCKET_CALLBACK_URL', apiGatewayV2WebSocketToSqs.webSocketStage.callbackUrl); - apiGatewayV2WebSocketToSqs.webSocketApi.grantManageConnections(lambda); - //prettier-ignore - new SqsToLambda(this, `${firstRouteKey}SqsToLambda`, { //NOSONAR - cdk instance creation does not require assignment - existingQueueObj: apiGatewayV2WebSocketToSqs.sqsQueue, - deployDeadLetterQueue: false, - existingLambdaObj: lambda - }); + const lambdaOrAlias = props.lambdaRouteMapping.get(firstRouteKey!)!; //NOSONAR - typescript:S4325 - not null assertion required + props.chatLlmProviderLambda.addEnvironment( + 'WEBSOCKET_CALLBACK_URL', + apiGatewayV2WebSocketToSqs.webSocketStage.callbackUrl + ); + apiGatewayV2WebSocketToSqs.webSocketApi.grantManageConnections(lambdaOrAlias); + + if (lambdaOrAlias instanceof lambda.Alias) { + new lambda.EventSourceMapping(this, `${firstRouteKey}EventSourceMapping`, { + target: lambdaOrAlias, + eventSourceArn: apiGatewayV2WebSocketToSqs.sqsQueue.queueArn + }); + apiGatewayV2WebSocketToSqs.sqsQueue.grantConsumeMessages(lambdaOrAlias); + } else { + //prettier-ignore + new SqsToLambda(this, `${firstRouteKey}SqsToLambda`, { //NOSONAR - cdk instance creation does not require assignment + existingQueueObj: apiGatewayV2WebSocketToSqs.sqsQueue, + deployDeadLetterQueue: false, + existingLambdaObj: lambdaOrAlias + }); + } NagSuppressions.addResourceSuppressions(this.websocketApiStage, [ { diff --git a/source/infrastructure/lib/api/websocket-request-processor.ts b/source/infrastructure/lib/api/websocket-request-processor.ts index 0a648761..a5204f2b 100644 --- a/source/infrastructure/lib/api/websocket-request-processor.ts +++ b/source/infrastructure/lib/api/websocket-request-processor.ts @@ -35,7 +35,12 @@ export interface WebsocketRequestProcessorProps extends RequestProcessorProps { /** * The function to back the LangChain chat LLM model */ - lambdaRouteMapping: Map; + lambdaRouteMapping: Map; + + /** + * The underlying lambda function for environment variable configuration + */ + chatLlmProviderLambda: lambda.Function; /** * Name of table which stores policies for cognito user groups. Required if existingCognitoUserPoolId is provided. @@ -217,7 +222,8 @@ export class WebsocketRequestProcessor extends RequestProcessor { onConnectLambda: this.onConnectLambda, onDisconnectLambda: this.onDisconnectLambda, useCaseUUID: props.useCaseUUID, - lambdaRouteMapping: props.lambdaRouteMapping + lambdaRouteMapping: props.lambdaRouteMapping, + chatLlmProviderLambda: props.chatLlmProviderLambda }); this.webSocketApi = webSocketEndpoint.webSocketApi; @@ -305,14 +311,26 @@ export function addAddtionalRoutes( ) ); - const lambda = props.lambdaRouteMapping.get(routeKey); - lambda!.addEnvironment('WEBSOCKET_CALLBACK_URL', webSocketEndpoint.websocketApiStage.callbackUrl); - webSocketEndpoint.webSocketApi.grantManageConnections(lambda!); - new SqsToLambda(construct, `${routeKey}SqsToLambda`, { - existingQueueObj: queue, - deployDeadLetterQueue: false, - existingLambdaObj: lambda - }); + const lambdaOrAlias = props.lambdaRouteMapping.get(routeKey)!; + props.chatLlmProviderLambda.addEnvironment( + 'WEBSOCKET_CALLBACK_URL', + webSocketEndpoint.websocketApiStage.callbackUrl + ); + webSocketEndpoint.webSocketApi.grantManageConnections(props.chatLlmProviderLambda); + + if (lambdaOrAlias instanceof lambda.Alias) { + new lambda.EventSourceMapping(construct, `${routeKey}EventSourceMapping`, { + target: lambdaOrAlias, + eventSourceArn: queue.queueArn + }); + queue.grantConsumeMessages(lambdaOrAlias); + } else { + new SqsToLambda(construct, `${routeKey}SqsToLambda`, { + existingQueueObj: queue, + deployDeadLetterQueue: false, + existingLambdaObj: lambdaOrAlias + }); + } } const routesListForSuppressions = ['$disconnect-Route']; diff --git a/source/infrastructure/lib/auth/cognito-setup.ts b/source/infrastructure/lib/auth/cognito-setup.ts index d70e4e45..81ed1dfa 100644 --- a/source/infrastructure/lib/auth/cognito-setup.ts +++ b/source/infrastructure/lib/auth/cognito-setup.ts @@ -155,7 +155,6 @@ export class CognitoSetup extends Construct { */ private cognitoGroupPolicyTableExport: dynamodb.ITable; - constructor(scope: Construct, id: string, props: CognitoSetupProps) { super(scope, id); this.stack = cdk.Stack.of(scope); @@ -187,7 +186,7 @@ export class CognitoSetup extends Construct { userPool.userPoolId, props.userPoolProps!.existingCognitoUserPoolId ).toString(); - + // Use CfnOutput for conditional UserPool in nested stack, allowing cross stack access without dependency on underlying condition if (cdk.Stack.of(this).nestedStackResource) { const output = new cdk.CfnOutput(this, 'UserPoolId', { value: userPoolId }); @@ -210,7 +209,9 @@ export class CognitoSetup extends Construct { // Use CfnOutput for conditional GroupPolicyTable in nested stack, allowing cross stack access without dependency on underlying condition if (cdk.Stack.of(this).nestedStackResource) { - const output = new cdk.CfnOutput(this, 'CognitoGroupPolicyTableName', { value: CognitoGroupPolicyTableName }); + const output = new cdk.CfnOutput(this, 'CognitoGroupPolicyTableName', { + value: CognitoGroupPolicyTableName + }); this.cognitoGroupPolicyTableExport = dynamodb.Table.fromTableName( cdk.Stack.of(this).nestedStackParent!, 'CognitoGroupPolicyTable', @@ -410,23 +411,30 @@ export class CognitoSetup extends Construct { return userPool; } - protected createUserAndUserGroup(props: UserPoolProps) { + public createAgentCoreResourceServer() { + return new cognito.UserPoolResourceServer(this, 'AgentCoreResourceServer', { + identifier: 'agentcore', + userPoolResourceServerName: 'agentcore', + userPool: this.userPool, + scopes: [ + { + scopeName: 'componentAccess', + scopeDescription: 'Scope for component authentication' + } + ] + }); + } + protected createUserAndUserGroup(props: UserPoolProps) { const cognitoGroupCondition = new cdk.CfnCondition(this, 'CognitoGroupCondition', { - expression: cdk.Fn.conditionNot( - cdk.Fn.conditionEquals(props.defaultUserEmail, '') - ) + expression: cdk.Fn.conditionNot(cdk.Fn.conditionEquals(props.defaultUserEmail, '')) }); const cognitoUserCondition = new cdk.CfnCondition(this, 'CognitoUserCondition', { - expression: - cdk.Fn.conditionAnd( - cdk.Fn.conditionNot( - cdk.Fn.conditionEquals(props.defaultUserEmail, PLACEHOLDER_EMAIL) - ), - cognitoGroupCondition - ) - + expression: cdk.Fn.conditionAnd( + cdk.Fn.conditionNot(cdk.Fn.conditionEquals(props.defaultUserEmail, PLACEHOLDER_EMAIL)), + cognitoGroupCondition + ) }); const cognitoUser = new CfnUserPoolUser(this, 'DefaultUser', { @@ -613,7 +621,7 @@ export class CognitoSetup extends Construct { * @returns The user pool */ getUserPool(construct: Construct): cognito.IUserPool { - if(cdk.Stack.of(this) == cdk.Stack.of(construct)) { + if (cdk.Stack.of(this) == cdk.Stack.of(construct)) { return this.userPool; } return this.userPoolExport; @@ -627,14 +635,12 @@ export class CognitoSetup extends Construct { * @returns The user pool client */ getUserPoolClient(construct: Construct): cognito.IUserPoolClient { - - if(cdk.Stack.of(this) == cdk.Stack.of(construct)) { + if (cdk.Stack.of(this) == cdk.Stack.of(construct)) { return this.userPoolClient; } return this.userPoolClientExport; } - /** * Method to return the CognitoSetup group policy table. Returns the direct reference (including the conditional), if accessed from the same stack. * When accessed from a different stack, returns the CfnOutput reference to remove dependency on the conditional. @@ -643,7 +649,7 @@ export class CognitoSetup extends Construct { * @returns The cognito group policy table */ getCognitoGroupPolicyTable(construct: Construct): dynamodb.ITable { - if(cdk.Stack.of(this) == cdk.Stack.of(construct)) { + if (cdk.Stack.of(this) == cdk.Stack.of(construct)) { return this.cognitoGroupPolicyTable; } return this.cognitoGroupPolicyTableExport; diff --git a/source/infrastructure/lib/auth/component-cognito-app-client.ts b/source/infrastructure/lib/auth/component-cognito-app-client.ts new file mode 100644 index 00000000..599e2580 --- /dev/null +++ b/source/infrastructure/lib/auth/component-cognito-app-client.ts @@ -0,0 +1,134 @@ +#!/usr/bin/env node +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cognito from 'aws-cdk-lib/aws-cognito'; +import { NagSuppressions } from 'cdk-nag'; +import { Construct } from 'constructs'; +import * as cfn_guard from '../utils/cfn-guard-suppressions'; + +/** + * Component types supported by GAAB v4.0.0 + */ +export enum ComponentType { + AGENT = 'AGENT', + WORKFLOW = 'WORKFLOW', + MCP = 'MCP' +} + +/** + * Default token validity periods for component authentication + */ +export enum ComponentTokenValidity { + ACCESS_TOKEN_MINUTES = 60, + REFRESH_TOKEN_HOURS = 24, + AUTH_SESSION_MINUTES = 3 +} + +export interface ComponentCognitoAppClientProps { + /** + * The Cognito User Pool to create the App Client in + */ + userPool: cognito.IUserPool; + + /** + * The short use case ID (8-character UUID) that uniquely identifies this component instance + * This is generated for every use case deployment and used to create unique client names + */ + useCaseShortId: string; + + /** + * The type of component being deployed + */ + componentType: ComponentType; + + /** + * Access token validity in minutes + */ + accessTokenValidityMinutes?: number; + + /** + * Refresh token validity in hours + */ + refreshTokenValidityHours?: number; +} + +/** + * CDK Construct for creating Cognito App Clients specifically for GAAB v4.0.0 component authentication. + * + * Creates App Clients configured for machine-to-machine (M2M) authentication between components + * (Agents, Workflows, MCP Servers) using OAuth 2.0 Client Credentials flow. + * + * The client secret is securely managed by Amazon Bedrock AgentCore Identity service, + * not stored in AWS Secrets Manager. AgentCore Identity handles secure storage and + * token retrieval through the @requires_access_token decorator. + * + * Key differences from web app clients: + * - Generates client secret for secure M2M communication + * - Uses CLIENT_CREDENTIALS_AUTH flow instead of OAuth authorization code flow + * - Shorter token validity periods appropriate for component-to-component calls + * - Client secret managed by AgentCore Identity (not Secrets Manager) + */ +export class ComponentCognitoAppClient extends Construct { + public readonly appClient: cognito.CfnUserPoolClient; + + constructor(scope: Construct, id: string, props: ComponentCognitoAppClientProps) { + super(scope, id); + + this.appClient = new cognito.CfnUserPoolClient(this, 'ComponentAppClient', { + userPoolId: props.userPool.userPoolId, + clientName: this.generateClientName(props.componentType, props.useCaseShortId), + generateSecret: true, + explicitAuthFlows: ['ALLOW_REFRESH_TOKEN_AUTH'], + allowedOAuthFlowsUserPoolClient: true, + allowedOAuthFlows: ['client_credentials'], + allowedOAuthScopes: ['agentcore/componentAccess'], + tokenValidityUnits: { + accessToken: 'minutes', + refreshToken: 'hours' + }, + accessTokenValidity: props.accessTokenValidityMinutes || ComponentTokenValidity.ACCESS_TOKEN_MINUTES, + refreshTokenValidity: props.refreshTokenValidityHours || ComponentTokenValidity.REFRESH_TOKEN_HOURS, + preventUserExistenceErrors: 'ENABLED', + enableTokenRevocation: true, + authSessionValidity: ComponentTokenValidity.AUTH_SESSION_MINUTES, + supportedIdentityProviders: ['COGNITO'] + }); + + NagSuppressions.addResourceSuppressions(this.appClient, [ + { + id: 'AwsSolutions-COG7', + reason: 'This App Client is specifically designed for machine-to-machine authentication and does not require MFA' + } + ]); + + cfn_guard.addCfnSuppressRules(this.appClient, [ + { + id: 'W57', + reason: 'This App Client is designed for machine-to-machine authentication and uses client credentials flow' + } + ]); + } + + /** + * Generate a client name using component type and use case short ID + */ + private generateClientName(componentType: ComponentType, useCaseShortId: string): string { + const componentTypeLower = componentType.toLowerCase(); + return `${componentTypeLower}-${useCaseShortId}-client`; + } + + /** + * Get the client ID for use in stack outputs or other constructs + */ + public getClientId(): string { + return this.appClient.attrClientId; + } + + /** + * Get the client secret for use in OAuth configurations + */ + public getClientSecret(): string { + return this.appClient.attrClientSecret; + } +} diff --git a/source/infrastructure/lib/bedrock-agent-stack.ts b/source/infrastructure/lib/bedrock-agent-stack.ts index e85fce48..901da460 100644 --- a/source/infrastructure/lib/bedrock-agent-stack.ts +++ b/source/infrastructure/lib/bedrock-agent-stack.ts @@ -58,8 +58,8 @@ export class BedrockAgentParameters extends UseCaseParameters { const existingParameterGroups = this.cfnStack.templateOptions.metadata !== undefined && - Object.hasOwn(this.cfnStack.templateOptions.metadata, 'AWS::CloudFormation::Interface') && - this.cfnStack.templateOptions.metadata['AWS::CloudFormation::Interface'].ParameterGroups !== undefined + Object.hasOwn(this.cfnStack.templateOptions.metadata, 'AWS::CloudFormation::Interface') && + this.cfnStack.templateOptions.metadata['AWS::CloudFormation::Interface'].ParameterGroups !== undefined ? this.cfnStack.templateOptions.metadata['AWS::CloudFormation::Interface'].ParameterGroups : []; @@ -67,6 +67,44 @@ export class BedrockAgentParameters extends UseCaseParameters { Label: { default: 'Please provide Bedrock Agent configuration' }, Parameters: [this.bedrockAgentId.logicalId, this.bedrockAgentAliasId.logicalId] }); + + // Multimodal parameters are not supported for Bedrock Agent Use Cases + new cdk.CfnRule(this.cfnStack, 'NoMultimodalEnabledForBedrockAgentRule', { + ruleCondition: cdk.Fn.conditionEquals(this.multimodalEnabled.valueAsString, 'Yes'), + assertions: [ + { + assert: cdk.Fn.conditionEquals('false', 'true'), + assertDescription: + 'Multimodal functionality is not supported for Bedrock Agent Use Cases. Please set MultimodalEnabled to No.' + } + ] + }); + + new cdk.CfnRule(this.cfnStack, 'NoMultimodalBucketForBedrockAgentRule', { + ruleCondition: cdk.Fn.conditionNot( + cdk.Fn.conditionEquals(this.existingMultimodalDataBucket.valueAsString, '') + ), + assertions: [ + { + assert: cdk.Fn.conditionEquals('false', 'true'), + assertDescription: + 'Multimodal data bucket is not supported for Bedrock Agent Use Cases. Please leave ExistingMultimodalDataBucket empty.' + } + ] + }); + + new cdk.CfnRule(this.cfnStack, 'NoMultimodalTableForBedrockAgentRule', { + ruleCondition: cdk.Fn.conditionNot( + cdk.Fn.conditionEquals(this.existingMultimodalDataMetadataTable.valueAsString, '') + ), + assertions: [ + { + assert: cdk.Fn.conditionEquals('false', 'true'), + assertDescription: + 'Multimodal metadata table is not supported for Bedrock Agent Use Cases. Please leave ExistingMultimodalDataMetadataTable empty.' + } + ] + }); } } @@ -79,15 +117,15 @@ export class BedrockAgent extends UseCaseStack { constructor(stack: Construct, id: string, props: BaseStackProps) { super(stack, id, props); this.withAdditionalResourceSetup(props); - this.withAnonymousMetrics(props); + this.withMetrics(props); } /** * setting websocket route for agent stack * @returns */ - protected getWebSocketRoutes(): Map { - return new Map().set('invokeAgent', this.chatLlmProviderLambda); + protected getWebSocketRoutes(): Map { + return new Map().set('invokeAgent', this.chatLlmProviderAlias); } /** @@ -198,11 +236,11 @@ export class BedrockAgent extends UseCaseStack { // connection to the conversation memory // prettier-ignore new LambdaToDynamoDB(this, 'ChatProviderLambdaToConversationTable', { // NOSONAR - construct instantiation - existingLambdaObj: this.chatLlmProviderLambda, - existingTableObj: this.chatStorageSetup.chatStorage.conversationTable, - tablePermissions: 'ReadWrite', - tableEnvironmentVariableName: CONVERSATION_TABLE_NAME_ENV_VAR - }); + existingLambdaObj: this.chatLlmProviderLambda, + existingTableObj: this.chatStorageSetup.chatStorage.conversationTable, + tablePermissions: 'ReadWrite', + tableEnvironmentVariableName: CONVERSATION_TABLE_NAME_ENV_VAR + }); } protected initializeCfnParameters(): void { diff --git a/source/infrastructure/lib/bedrock-chat-stack.ts b/source/infrastructure/lib/bedrock-chat-stack.ts index 96110e36..6b723279 100644 --- a/source/infrastructure/lib/bedrock-chat-stack.ts +++ b/source/infrastructure/lib/bedrock-chat-stack.ts @@ -37,7 +37,7 @@ export class BedrockChat extends TextUseCase { constructor(scope: Construct, id: string, props: BaseStackProps) { super(scope, id, props); this.withAdditionalResourceSetup(props); - this.withAnonymousMetrics(props); + this.withMetrics(props); } protected withAdditionalResourceSetup(props: BaseStackProps): void { diff --git a/source/infrastructure/lib/deployment-platform-stack.ts b/source/infrastructure/lib/deployment-platform-stack.ts index 650d2089..bcb0b774 100644 --- a/source/infrastructure/lib/deployment-platform-stack.ts +++ b/source/infrastructure/lib/deployment-platform-stack.ts @@ -3,10 +3,11 @@ // SPDX-License-Identifier: Apache-2.0 import * as cdk from 'aws-cdk-lib'; +import * as ssm from 'aws-cdk-lib/aws-ssm'; import { Construct } from 'constructs'; import { ApplicationSetup } from './framework/application-setup'; -import { BaseStack, BaseStackProps } from './framework/base-stack'; +import { BaseStack, BaseStackProps, BaseParameters } from './framework/base-stack'; import { DashboardType } from './metrics/custom-dashboard'; import { CopyUIAssets } from './s3web/copy-ui-assets-nested-stack'; import { UIDistribution } from './s3web/ui-distribution-nested-stack'; @@ -18,11 +19,27 @@ import { INTERNAL_EMAIL_DOMAIN, OPTIONAL_EMAIL_REGEX_PATTERN, REST_API_NAME_ENV_VAR, + SHARED_ECR_CACHE_PREFIX_ENV_VAR, UIAssetFolders, USE_CASE_UUID_ENV_VAR, WEB_CONFIG_PREFIX } from './utils/constants'; import { VPCSetup } from './vpc/vpc-setup'; +import { ECRPullThroughCache } from './use-case-stacks/agent-core/components/ecr-pull-through-cache'; + +export class DeploymentPlatformParameters extends BaseParameters { + constructor(stack: cdk.Stack) { + super(stack); + } + + protected setupUseCaseConfigTableParams(stack: cdk.Stack): void { + //override + } + + protected setupUUIDParams(stack: cdk.Stack): void { + // override + } +} /** * The main stack creating the infrastructure @@ -49,13 +66,17 @@ export class DeploymentPlatformStack extends BaseStack { */ public readonly deploymentPlatformStorageSetup: DeploymentPlatformStorageSetup; + /** + * Shared ECR Pull-Through Cache for AgentCore images used by dashboard-deployed use cases + */ + public readonly sharedEcrPullThroughCache: ECRPullThroughCache; + constructor(scope: Construct, id: string, props: BaseStackProps) { super(scope, id, props); new cdk.CfnMapping(this, 'Solution', { mapping: { Data: { - SendAnonymousUsageData: 'Yes', ID: props.solutionID, Version: props.solutionVersion, SolutionName: props.solutionName @@ -73,34 +94,21 @@ export class DeploymentPlatformStack extends BaseStack { const adminUserEmail = new cdk.CfnParameter(this, 'AdminUserEmail', { type: 'String', - description: 'Optional - Email used to create the default cognito user for the admin platform. If empty, the Cognito User, Group and Attachment will not be created.', + description: + 'Optional - Email used to create the default cognito user for the admin platform. If empty, the Cognito User, Group and Attachment will not be created.', allowedPattern: OPTIONAL_EMAIL_REGEX_PATTERN, constraintDescription: 'Please provide a valid email' }); - const existingCognitoUserPoolId = new cdk.CfnParameter(this, 'ExistingCognitoUserPoolId', { - type: 'String', - allowedPattern: '^$|^[0-9a-zA-Z_-]{9,24}$', - maxLength: 24, - description: - 'UserPoolId of an existing cognito user pool which this use case will be authenticated with. Typically will be provided when deploying from the deployment platform, but can be omitted when deploying this use-case stack standalone.', - default: '' - }); - - const existingUserPoolClientId = new cdk.CfnParameter(this, 'ExistingCognitoUserPoolClient', { - type: 'String', - allowedPattern: '^$|^[a-z0-9]{3,128}$', - maxLength: 128, - description: - 'Optional - Provide a User Pool Client (App Client) to use an existing one. If not provided a new User Pool Client will be created. This parameter can only be provided if an existing User Pool Id is provided', - default: '' - }); - new cdk.CfnRule(this, 'CognitoUserPoolAndClientRule', { - ruleCondition: cdk.Fn.conditionNot(cdk.Fn.conditionEquals(existingCognitoUserPoolId.valueAsString, '')), + ruleCondition: cdk.Fn.conditionNot( + cdk.Fn.conditionEquals(this.stackParameters.existingCognitoUserPoolId.valueAsString, '') + ), assertions: [ { - assert: cdk.Fn.conditionNot(cdk.Fn.conditionEquals(existingUserPoolClientId.valueAsString, '')), + assert: cdk.Fn.conditionNot( + cdk.Fn.conditionEquals(this.stackParameters.existingUserPoolClientId.valueAsString, '') + ), assertDescription: 'If an existing User Pool Id is provided, then an existing User Pool Client Id must also be provided.' } @@ -108,7 +116,9 @@ export class DeploymentPlatformStack extends BaseStack { }); new cdk.CfnRule(this, 'CognitoDomainNotProvidedIfPoolIsRule', { - ruleCondition: cdk.Fn.conditionNot(cdk.Fn.conditionEquals(existingCognitoUserPoolId.valueAsString, '')), + ruleCondition: cdk.Fn.conditionNot( + cdk.Fn.conditionEquals(this.stackParameters.existingCognitoUserPoolId.valueAsString, '') + ), assertions: [ { assert: cdk.Fn.conditionEquals(this.stackParameters.cognitoUserPoolClientDomain.valueAsString, ''), @@ -150,13 +160,25 @@ export class DeploymentPlatformStack extends BaseStack { default: 'Optional: Provide existing Cognito UserPool and UserPoolClient IDs if you want to use your own managed resources. If left empty, the solution will manage these resources for you. Note: To prevent the creation of Cognito resources within the user pool (Users/Groups), simply leave the AdminUserEmail parameter empty.' }, - Parameters: [existingCognitoUserPoolId.logicalId, existingUserPoolClientId.logicalId] + Parameters: [ + this.stackParameters.existingCognitoUserPoolId.logicalId, + this.stackParameters.existingUserPoolClientId.logicalId + ] }); // internal users are identified by being of the form "X@amazon.Y" const isInternalUserCondition: cdk.CfnCondition = new cdk.CfnCondition(this, 'IsInternalUserCondition', { expression: cdk.Fn.conditionEquals( - cdk.Fn.select(0, cdk.Fn.split('.', cdk.Fn.select(1, cdk.Fn.split('@', cdk.Fn.join("", [adminUserEmail.valueAsString, "@example.com"]))))), + cdk.Fn.select( + 0, + cdk.Fn.split( + '.', + cdk.Fn.select( + 1, + cdk.Fn.split('@', cdk.Fn.join('', [adminUserEmail.valueAsString, '@example.com'])) + ) + ) + ), INTERNAL_EMAIL_DOMAIN ) }); @@ -188,6 +210,15 @@ export class DeploymentPlatformStack extends BaseStack { ...this.baseStackProps }); + // Create shared ECR Pull-Through Cache for AgentCore images + // This cache will be used by all agent builder and workflow use cases deployed through the dashboard + const solutionVersion = process.env.VERSION ?? this.node.tryGetContext('solution_version'); + this.sharedEcrPullThroughCache = new ECRPullThroughCache(this, 'SharedECRPullThroughCache', { + gaabVersion: solutionVersion, + customResourceLambda: this.applicationSetup.customResourceLambda + // No useCaseShortId provided - will generate from stack name (shared cache) + }); + this.useCaseManagementSetup = new UseCaseManagementSetup(this, 'UseCaseManagementSetup', { defaultUserEmail: adminUserEmail.valueAsString, webConfigSSMKey: webConfigSsmKey, @@ -199,23 +230,93 @@ export class DeploymentPlatformStack extends BaseStack { deployWebApp: this.deployWebApp.valueAsString, deployWebAppCondition: uiInfrastructureBuilder.deployWebAppCondition, accessLoggingBucket: this.applicationSetup.accessLoggingBucket, - existingCognitoUserPoolId: existingCognitoUserPoolId.valueAsString, - existingCognitoUserPoolClientId: existingUserPoolClientId.valueAsString, + existingCognitoUserPoolId: this.stackParameters.existingCognitoUserPoolId.valueAsString, + existingCognitoUserPoolClientId: this.stackParameters.existingUserPoolClientId.valueAsString, llmConfigTable: this.deploymentPlatformStorageSetup.deploymentPlatformStorage.useCaseConfigTable, ...this.baseStackProps }); - this.deploymentPlatformStorageSetup.addLambdaDependencies({ - deploymentApiLambda: this.useCaseManagementSetup.useCaseManagement.useCaseManagementApiLambda, - modelInfoApiLambda: this.useCaseManagementSetup.useCaseManagement.modelInfoApiLambda, - feedbackApiLambda: this.useCaseManagementSetup.feedbackSetupStack.feedbackAPILambda + this.deploymentPlatformStorageSetup.configureDeploymentApiLambda( + this.useCaseManagementSetup.useCaseManagement.useCaseManagementApiLambda + ); + this.deploymentPlatformStorageSetup.configureModelInfoApiLambda( + this.useCaseManagementSetup.useCaseManagement.modelInfoApiLambda + ); + this.deploymentPlatformStorageSetup.configureFeedbackApiLambda( + this.useCaseManagementSetup.feedbackSetupStack.feedbackAPILambda + ); + this.deploymentPlatformStorageSetup.configureUseCaseManagementApiLambda( + this.useCaseManagementSetup.useCaseManagement.mcpManagementApiLambda, + 'MCP' + ); + this.deploymentPlatformStorageSetup.configureUseCaseManagementApiLambda( + this.useCaseManagementSetup.useCaseManagement.agentManagementApiLambda, + 'Agent', + true + ); + this.deploymentPlatformStorageSetup.configureUseCaseManagementApiLambda( + this.useCaseManagementSetup.useCaseManagement.workflowManagementApiLambda, + 'Workflow', + true + ); + this.deploymentPlatformStorageSetup.configureFilesHandlerLambda( + this.useCaseManagementSetup.multimodalSetup.filesHandlerLambda + ); + + // Create SSM parameter for Strands tools configuration + const strandsToolsParameter = new ssm.StringParameter(this, 'StrandsToolsParameter', { + parameterName: `/gaab/${cdk.Aws.STACK_NAME}/strands-tools`, + stringValue: JSON.stringify([ + { + name: 'Calculator', + description: 'Perform mathematical calculations and operations', + value: 'calculator', + category: 'Math', + isDefault: true + }, + { + name: 'Current Time', + description: 'Get current date and time information', + value: 'current_time', + category: 'Utilities', + isDefault: true + }, + { + name: 'Environment', + description: 'Access environment variables and system information', + value: 'environment', + category: 'System', + isDefault: false + } + ]), + description: 'Available Strands SDK tools for Agent Builder and Workflow use cases', + simpleName: false }); + // Grant MCP Management Lambda permission to read Strands tools parameter and set environment variable + strandsToolsParameter.grantRead(this.useCaseManagementSetup.useCaseManagement.mcpManagementApiLambda.role!); + this.useCaseManagementSetup.useCaseManagement.mcpManagementApiLambda.addEnvironment( + 'STRANDS_TOOLS_SSM_PARAM', + strandsToolsParameter.parameterName + ); + this.applicationSetup.scheduledMetricsLambda.addEnvironment( REST_API_NAME_ENV_VAR, `${this.useCaseManagementSetup.useCaseManagement.stackName}-UseCaseManagementAPI` ); + // Add shared ECR cache prefix to agent management lambda + this.useCaseManagementSetup.useCaseManagement.agentManagementApiLambda.addEnvironment( + SHARED_ECR_CACHE_PREFIX_ENV_VAR, + this.sharedEcrPullThroughCache.getRepositoryPrefix() + ); + + // Add shared ECR cache prefix to workflow management lambda + this.useCaseManagementSetup.useCaseManagement.workflowManagementApiLambda.addEnvironment( + SHARED_ECR_CACHE_PREFIX_ENV_VAR, + this.sharedEcrPullThroughCache.getRepositoryPrefix() + ); + const userPoolId = this.useCaseManagementSetup.userPool.userPoolId; const userPoolClientId = this.useCaseManagementSetup.userPoolClient.userPoolClientId; @@ -281,6 +382,11 @@ export class DeploymentPlatformStack extends BaseStack { }); cloudfrontUrlOutput.condition = uiInfrastructureBuilder.deployWebAppCondition; + new cdk.CfnOutput(cdk.Stack.of(this), 'SharedECRCachePrefix', { + value: this.sharedEcrPullThroughCache.getRepositoryPrefix(), + description: 'Shared ECR Pull-Through Cache repository prefix for AgentCore images' + }); + new cdk.CfnOutput(cdk.Stack.of(this), 'CognitoClientId', { value: userPoolClientId }); @@ -301,13 +407,25 @@ export class DeploymentPlatformStack extends BaseStack { value: this.deploymentPlatformStorageSetup.deploymentPlatformStorage.useCasesTable.tableName }); - this.applicationSetup.addAnonymousMetricsCustomLambda(props.solutionID, props.solutionVersion, { + new cdk.CfnOutput(cdk.Stack.of(this), 'MultimodalDataBucketName', { + value: this.useCaseManagementSetup.multimodalSetup.multimodalDataBucket.bucketName, + description: 'S3 bucket for storing multimodal files' + }); + + new cdk.CfnOutput(cdk.Stack.of(this), 'MultimodalDataMetadataTable', { + value: this.useCaseManagementSetup.multimodalSetup.multimodalDataMetadataTable.tableName, + description: 'DynamoDB table for storing multimodal files metadata' + }); + + this.applicationSetup.addMetricsCustomLambda(props.solutionID, props.solutionVersion, { UUID: uuid, VPC_ENABLED: this.vpcEnabled.valueAsString, CREATE_VPC: this.createNewVpc.valueAsString }); } - + protected initializeCfnParameters(): void { + this.stackParameters = new DeploymentPlatformParameters(this); + } protected setupVPC(): VPCSetup { return new VPCSetup(this, 'VPC', { stackType: 'deployment-platform', diff --git a/source/infrastructure/lib/feedback/feedback-setup-stack.ts b/source/infrastructure/lib/feedback/feedback-setup-stack.ts index 92a394a3..a2d1f8fa 100644 --- a/source/infrastructure/lib/feedback/feedback-setup-stack.ts +++ b/source/infrastructure/lib/feedback/feedback-setup-stack.ts @@ -10,7 +10,7 @@ import * as s3 from 'aws-cdk-lib/aws-s3'; import { NagSuppressions } from 'cdk-nag'; import * as cfn_nag from '../utils/cfn-guard-suppressions'; import { COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, LAMBDA_TIMEOUT_MINS, StackDeploymentSource } from '../utils/constants'; -import { feedbackRequestSchema } from './../api/model-schema/feedback-body'; +import { feedbackRequestSchema } from './../api/model-schema/feedback/feedback-body'; import { createCustomResourceForLambdaLogRetention, createDefaultLambdaRole, diff --git a/source/infrastructure/lib/framework/application-setup.ts b/source/infrastructure/lib/framework/application-setup.ts index e5d64d28..9c64f8dd 100644 --- a/source/infrastructure/lib/framework/application-setup.ts +++ b/source/infrastructure/lib/framework/application-setup.ts @@ -138,11 +138,6 @@ export class ApplicationSetup extends Construct { */ public readonly scheduledMetricsLambda: lambda.Function; - /** - * Condition to determine if anonymous metrics should be collected - */ - private sendAnonymousMetricsCondition: cdk.CfnCondition; - /** * This instance is created only after 'createWebConfigStorage' is called. This instance refers * to the CustomResource that writes the web configuration required for the UI project in to @@ -151,7 +146,7 @@ export class ApplicationSetup extends Construct { private webConfigResource: cdk.CustomResource; /** - * This Construct refers to the Anonymous Metrics Solution Helper which is used to send metrics + * This Construct refers to the Metrics Solution Helper which is used to send metrics * at cloudformation events of create, update and delete */ public solutionHelper: Construct; @@ -160,10 +155,6 @@ export class ApplicationSetup extends Construct { super(scope, id); this.scope = scope; - this.sendAnonymousMetricsCondition = new cdk.CfnCondition(cdk.Stack.of(this), 'AnonymousDataAWSCondition', { - expression: cdk.Fn.conditionEquals(cdk.Fn.findInMap('Solution', 'Data', 'SendAnonymousUsageData'), 'Yes') - }); - this.accessLoggingBucket = new s3.Bucket(this, 'AccessLog', { versioned: false, // NOSONAR - bucket versioning is recommended in the IG, but is not enforced encryption: s3.BucketEncryption.S3_MANAGED, @@ -178,7 +169,6 @@ export class ApplicationSetup extends Construct { solutionID: props.solutionID, solutionVersion: props.solutionVersion, useCaseUUID: props.useCaseUUID, - sendAnonymousMetricsCondition: this.sendAnonymousMetricsCondition }); this.customResourceLambda = customInfraSetup.customResourceLambda; this.customResourceRole = customInfraSetup.lambdaServiceRole; @@ -229,12 +219,12 @@ export class ApplicationSetup extends Construct { } /** - * This method adds the Anonymous Metrics lambda function to the solution. + * This method adds the Metrics lambda function to the solution. * * @param solutionId - The solution id for the AWS solution * @param solutionVersion - The solution version for the AWS solution */ - public addAnonymousMetricsCustomLambda( + public addMetricsCustomLambda( solutionId: string, solutionVersion: string, additionalProperties?: { [key: string]: any } @@ -243,8 +233,7 @@ export class ApplicationSetup extends Construct { customResource: this.customResourceLambda, solutionID: solutionId, version: solutionVersion, - resourceProperties: additionalProperties, - sendAnonymousMetricsCondition: this.sendAnonymousMetricsCondition + resourceProperties: additionalProperties }); } diff --git a/source/infrastructure/lib/framework/base-stack.ts b/source/infrastructure/lib/framework/base-stack.ts index c8be5e75..9d594b47 100644 --- a/source/infrastructure/lib/framework/base-stack.ts +++ b/source/infrastructure/lib/framework/base-stack.ts @@ -10,17 +10,111 @@ import { VPCSetup } from '../vpc/vpc-setup'; import { ApplicationSetup } from './application-setup'; export class BaseParameters { + /** + * Unique UUID for this deployed use case within an application. Provided by the deployment platform if in use. + */ + public useCaseUUID: cdk.CfnParameter; + + /** + * First 8 characters of the useCaseUUID. + */ + public useCaseShortId: string; + + /** + * Name of the table that stores the configuration for a use case. + */ + public useCaseConfigTableName: cdk.CfnParameter; + + /** + * Key corresponding of the record containing configurations required by the chat provider lambda at runtime. The record in the table should have a "key" + * attribute matching this value, and a "config" attribute containing the desired config. This record will be populated by the deployment platform if in + * use. For standalone deployments of this use-case, a manually created entry in the table defined in `UseCaseConfigTableName` is required. + * Consult the implementation guide for more details. + */ + public useCaseConfigRecordKey: cdk.CfnParameter; + + /** + * UserPoolId of an existing cognito user pool which this use case will be authenticated with. + * Typically will be provided when deploying from the deployment platform, but can be omitted when deploying this use-case stack standalone. + */ + public existingCognitoUserPoolId: cdk.CfnParameter; + + /** + * Cfn parameter for existing user pool client Id (App Client Id) + */ + public existingUserPoolClientId: cdk.CfnParameter; + /** * Optional parameter to specify domain when deploying the template. If not provided the template will generate * a random domain prefix using a hashing strategy using AWS account number, region, and stack name. */ - protected cognitoUserPoolClientDomain: cdk.CfnParameter; + public cognitoUserPoolClientDomain: cdk.CfnParameter; protected cfnStack: cdk.Stack; - constructor(stack: BaseStack) { + constructor(stack: cdk.Stack) { this.cfnStack = cdk.Stack.of(stack); + this.setupUseCaseConfigTableParams(stack); + this.setupCognitoUserPoolParams(stack); + this.setupUUIDParams(stack); + this.setupCognitoUserPoolClientDomainParams(stack); + } + + protected setupUseCaseConfigTableParams(stack: cdk.Stack): void { + this.useCaseConfigTableName = new cdk.CfnParameter(stack, 'UseCaseConfigTableName', { + type: 'String', + maxLength: 255, + allowedPattern: '^[a-zA-Z0-9_.-]{3,255}$', + description: 'DynamoDB table name for the table which contains the configuration for this use case.', + constraintDescription: + 'This parameter is required. The stack will read the configuration from this table to configure the resources during deployment' + }); + + this.useCaseConfigRecordKey = new cdk.CfnParameter(stack, 'UseCaseConfigRecordKey', { + type: 'String', + maxLength: 2048, + description: + 'Key corresponding of the record containing configurations required by the chat provider lambda at runtime. The record in the table should have a "key" attribute matching this value, and a "config" attribute containing the desired config. This record will be populated by the deployment platform if in use. For standalone deployments of this use-case, a manually created entry in the table defined in `UseCaseConfigTableName` is required. Consult the implementation guide for more details.' + }); + } + + protected setupCognitoUserPoolParams(stack: cdk.Stack): void { + this.existingCognitoUserPoolId = new cdk.CfnParameter(stack, 'ExistingCognitoUserPoolId', { + type: 'String', + allowedPattern: '^$|^[0-9a-zA-Z_-]{9,24}$', + maxLength: 24, + description: + 'Optional - UserPoolId of an existing cognito user pool which this use case will be authenticated with. Typically will be provided when deploying from the deployment platform, but can be omitted when deploying this use-case stack standalone.', + default: '' + }); + + this.existingUserPoolClientId = new cdk.CfnParameter(stack, 'ExistingCognitoUserPoolClient', { + type: 'String', + allowedPattern: '^$|^[a-z0-9]{3,128}$', + maxLength: 128, + description: + 'Optional - Provide a User Pool Client (App Client) to use an existing one. If not provided a new User Pool Client will be created. This parameter can only be provided if an existing User Pool Id is provided', + default: '' + }); + } + protected setupUUIDParams(stack: cdk.Stack): void { + this.useCaseUUID = new cdk.CfnParameter(stack, 'UseCaseUUID', { + type: 'String', + description: + 'UUID to identify this deployed use case within an application. Please provide a 36 character long UUIDv4. If you are editing the stack, do not modify the value (retain the value used during creating the stack). A different UUID when editing the stack will result in new AWS resource created and deleting the old ones', + allowedPattern: + '^[0-9a-fA-F]{8}$|^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$', + minLength: 8, + maxLength: 36, + constraintDescription: + 'Using digits and the letters A through F, please provide a 8 character id or a 36 character long UUIDv4.' + }); + + this.useCaseShortId = cdk.Fn.select(0, cdk.Fn.split('-', this.useCaseUUID.valueAsString)); + } + + protected setupCognitoUserPoolClientDomainParams(stack: cdk.Stack): void { this.cognitoUserPoolClientDomain = new cdk.CfnParameter(stack, 'CognitoDomainPrefix', { type: 'String', description: @@ -68,42 +162,42 @@ export class BaseStack extends cdk.Stack { /** * Construct managing the optional deployment of a VPC in a nested stack. */ - public readonly vpcSetup: VPCSetup; + public vpcSetup: VPCSetup; /** * If set to 'false', the deployed use case stack will not have a VPC */ - public readonly vpcEnabled: cdk.CfnParameter; + public vpcEnabled: cdk.CfnParameter; /** * The parameter to decide if customer will provide an existing VPC or the solution should create a new VPC */ - public readonly createNewVpc: cdk.CfnParameter; + public createNewVpc: cdk.CfnParameter; /** * AWS VPC IPAM Id to use for the VPC CIDR block */ - public readonly iPamPoolId: cdk.CfnParameter; + public iPamPoolId: cdk.CfnParameter; /** * ID of an existing VPC to be used for the use case. If none is provided, a new VPC will be created. */ - public readonly existingVpcId: cdk.CfnParameter; + public existingVpcId: cdk.CfnParameter; /** * ID of an existing Private Subnet to be used for the use case. */ - public readonly existingPrivateSubnetIds: cdk.CfnParameter; + public existingPrivateSubnetIds: cdk.CfnParameter; /** * SecurityGroup ids configured in the VPC */ - public readonly existingSecurityGroupIds: cdk.CfnParameter; + public existingSecurityGroupIds: cdk.CfnParameter; /** * AZs for the VPC */ - public readonly vpcAzs: cdk.CfnParameter; + public vpcAzs: cdk.CfnParameter; /** * condition to deploy VPC for use case stacks @@ -123,32 +217,32 @@ export class BaseStack extends cdk.Stack { /** * condition to deploy WebApp */ - public readonly deployWebApp: cdk.CfnParameter; + public deployWebApp: cdk.CfnParameter; /** * Rule to check for existing VPC if required parameters have been provided as CloudFormation input parameters */ - public readonly checkIfExistingVPCParamsAreProvided: cdk.CfnRule; + public checkIfExistingVPCParamsAreProvided: cdk.CfnRule; /** * Rule to check if existing VPC parameters were provided as CloudFormation input parameters when we are either creating a new VPC or not using VPC */ - public readonly checkIfExistingVPCParamsAreProvidedWhenNotAllowed: cdk.CfnRule; + public checkIfExistingVPCParamsAreProvidedWhenNotAllowed: cdk.CfnRule; /** * Rule to check if existing VPC parameters are empty, if either deployVPC is 'No', or deployVPC is 'Yes' and createNewVpc is 'Yes' */ - public readonly checkIfExistingVPCParamsAreEmpty: cdk.CfnRule; + public checkIfExistingVPCParamsAreEmpty: cdk.CfnRule; /** * The security group Ids finally assigned post resolving the condition */ - public readonly transpiredSecurityGroupIds: string; + public transpiredSecurityGroupIds: string; /** * The private subnet Ids finally assigned post resolving the condition */ - public readonly transpiredPrivateSubnetIds: string; + public transpiredPrivateSubnetIds: string; /** * core properties associated with stack @@ -172,6 +266,18 @@ export class BaseStack extends cdk.Stack { solutionVersion: props.solutionVersion }; + this.initializeCfnParameters(); + this.applicationSetup = this.createApplicationSetup(props); + + // Initialize base stack features if enabled + this.initializeBaseStackParameters(); + this.setupBaseStackResources(props); + } + + /** + * Initialize base stack parameters (VPC, deployment, etc.) + */ + protected initializeBaseStackParameters(): void { const stack = cdk.Stack.of(this); this.vpcEnabled = new cdk.CfnParameter(this, 'VpcEnabled', { @@ -231,8 +337,13 @@ export class BaseStack extends cdk.Stack { ParameterGroups: existingParameterGroups } }; + } - this.initializeCfnParameters(); + /** + * Setup base stack resources (VPC setup, application setup, etc.) + */ + protected setupBaseStackResources(props: BaseStackProps): void { + const stack = cdk.Stack.of(this); const captureExistingVPCParamerters = new ExistingVPCParameters(this); this.existingVpcId = captureExistingVPCParamerters.existingVpcId; @@ -350,7 +461,6 @@ export class BaseStack extends cdk.Stack { ] }); - this.applicationSetup = this.createApplicationSetup(props); this.vpcSetup = this.setupVPC(); // conditionally read subnet Ids and security group Ids from either the vpc provisioned by the solution diff --git a/source/infrastructure/lib/framework/text-use-case-stack.ts b/source/infrastructure/lib/framework/text-use-case-stack.ts index f0b96211..760f1086 100644 --- a/source/infrastructure/lib/framework/text-use-case-stack.ts +++ b/source/infrastructure/lib/framework/text-use-case-stack.ts @@ -272,6 +272,44 @@ export class TextUseCaseParameters extends UseCaseParameters { } ] }); + + // Multimodal parameters are not supported for Text Use Cases + new cdk.CfnRule(this.cfnStack, 'NoMultimodalEnabledForTextUseCaseRule', { + ruleCondition: cdk.Fn.conditionEquals(this.multimodalEnabled.valueAsString, 'Yes'), + assertions: [ + { + assert: cdk.Fn.conditionEquals('false', 'true'), + assertDescription: + 'Multimodal functionality is not supported for Text Use Cases. Please set MultimodalEnabled to No.' + } + ] + }); + + new cdk.CfnRule(this.cfnStack, 'NoMultimodalBucketForTextUseCaseRule', { + ruleCondition: cdk.Fn.conditionNot( + cdk.Fn.conditionEquals(this.existingMultimodalDataBucket.valueAsString, '') + ), + assertions: [ + { + assert: cdk.Fn.conditionEquals('false', 'true'), + assertDescription: + 'Multimodal data bucket is not supported for Text Use Cases. Please leave ExistingMultimodalDataBucket empty.' + } + ] + }); + + new cdk.CfnRule(this.cfnStack, 'NoMultimodalTableForTextUseCaseRule', { + ruleCondition: cdk.Fn.conditionNot( + cdk.Fn.conditionEquals(this.existingMultimodalDataMetadataTable.valueAsString, '') + ), + assertions: [ + { + assert: cdk.Fn.conditionEquals('false', 'true'), + assertDescription: + 'Multimodal metadata table is not supported for Text Use Cases. Please leave ExistingMultimodalDataMetadataTable empty.' + } + ] + }); } } @@ -307,8 +345,8 @@ export abstract class TextUseCase extends UseCaseStack { * * @returns */ - protected getWebSocketRoutes(): Map { - return new Map().set('sendMessage', this.chatLlmProviderLambda); + protected getWebSocketRoutes(): Map { + return new Map().set('sendMessage', this.chatLlmProviderAlias); } /** @@ -427,8 +465,8 @@ export abstract class TextUseCase extends UseCaseStack { this.kendraIndexCreatedCondition = cdk.Fn.conditionIf(deployKendraIndexCondition.logicalId, 'Yes', 'No'); } - protected withAnonymousMetrics(props: BaseStackProps) { - this.applicationSetup.addAnonymousMetricsCustomLambda(props.solutionID, props.solutionVersion, { + protected withMetrics(props: BaseStackProps) { + this.applicationSetup.addMetricsCustomLambda(props.solutionID, props.solutionVersion, { NEW_KENDRA_INDEX_CREATED: this.kendraIndexCreatedCondition, ...(this.stackParameters.newKendraIndexEdition.valueAsString && { KENDRA_EDITION: this.stackParameters.newKendraIndexEdition.valueAsString @@ -442,7 +480,7 @@ export abstract class TextUseCase extends UseCaseStack { }); ( this.applicationSetup.solutionHelper.node - .tryFindChild('AnonymousData') + .tryFindChild('Data') ?.node.tryFindChild('Default') as cdk.CfnResource ).addDependency( this.applicationSetup.webConfigCustomResource.node.tryFindChild('Default') as cdk.CfnCustomResource diff --git a/source/infrastructure/lib/framework/use-case-stack.ts b/source/infrastructure/lib/framework/use-case-stack.ts index 1243074f..f29ef0a5 100644 --- a/source/infrastructure/lib/framework/use-case-stack.ts +++ b/source/infrastructure/lib/framework/use-case-stack.ts @@ -5,6 +5,7 @@ import * as cdk from 'aws-cdk-lib'; import * as iam from 'aws-cdk-lib/aws-iam'; import * as lambda from 'aws-cdk-lib/aws-lambda'; import * as dynamodb from 'aws-cdk-lib/aws-dynamodb'; +import * as s3 from 'aws-cdk-lib/aws-s3'; import { Construct } from 'constructs'; import { FeedbackSetupStack } from '../feedback/feedback-setup-stack'; import * as api from 'aws-cdk-lib/aws-apigateway'; @@ -42,67 +43,56 @@ import { } from '../utils/constants'; import { VPCSetup } from '../vpc/vpc-setup'; import { UseCaseRestEndpointSetup } from '../api/use-case-rest-endpoint-setup'; +import { MultimodalSetup } from '../multimodal/multimodal-setup'; export class UseCaseParameters extends BaseParameters { /** - * Unique UUID for this deployed use case within an application. Provided by the deployment platform if in use. - */ - public useCaseUUID: cdk.CfnParameter; - - /** - * First 8 characters of the useCaseUUID. - */ - public useCaseShortId: string; - - /** - * Name of the table that stores the configuration for a use case. + * Email of the default user for this use case. A cognito user for this email will be created to access the use case. */ - public useCaseConfigTableName: cdk.CfnParameter; + public defaultUserEmail: cdk.CfnParameter; /** - * Key corresponding of the record containing configurations required by the chat provider lambda at runtime. The record in the table should have a "key" - * attribute matching this value, and a "config" attribute containing the desired config. This record will be populated by the deployment platform if in - * use. For standalone deployments of this use-case, a manually created entry in the table defined in `UseCaseConfigTableName` is required. - * Consult the implementation guide for more details. + * ARN of the DynamoDB table containing user group policies, used by the custom authorizer on this use-cases API. + * Typically will be provided when deploying from the deployment platform, but can be omitted when deploying this use-case stack standalone. */ - public useCaseConfigRecordKey: cdk.CfnParameter; + public existingCognitoGroupPolicyTableName: cdk.CfnParameter; /** - * Email of the default user for this use case. A cognito user for this email will be created to access the use case. + * Cfn parameter for existing websocket endpoint */ - public defaultUserEmail: cdk.CfnParameter; + public existingRestApiId: cdk.CfnParameter; /** - * UserPoolId of an existing cognito user pool which this use case will be authenticated with. - * Typically will be provided when deploying from the deployment platform, but can be omitted when deploying this use-case stack standalone. + * Cfn parameter for existing websocket endpoint */ - public existingCognitoUserPoolId: cdk.CfnParameter; + public existingApiRootResourceId: cdk.CfnParameter; /** - * ARN of the DynamoDB table containing user group policies, used by the custom authorizer on this use-cases API. - * Typically will be provided when deploying from the deployment platform, but can be omitted when deploying this use-case stack standalone. + * If set to 'false', the deployed use case stack will not have access to the feedback feature */ - public existingCognitoGroupPolicyTableName: cdk.CfnParameter; + public feedbackEnabled: cdk.CfnParameter; /** - * Cfn parameter for existing user pool client Id (App Client Id) + * Existing multimodal data metadata table name + * Passed by the deployment dashboard's management lambda when deploying use cases */ - public existingUserPoolClientId: cdk.CfnParameter; + public existingMultimodalDataMetadataTable: cdk.CfnParameter; /** - * Cfn parameter for existing websocket endpoint + * Existing multimodal data bucket name + * Passed by the deployment dashboard's management lambda when deploying use cases */ - public existingRestApiId: cdk.CfnParameter; + public existingMultimodalDataBucket: cdk.CfnParameter; /** - * Cfn parameter for existing websocket endpoint + * Enable multimodal functionality for the AgentCore deployment */ - public existingApiRootResourceId: cdk.CfnParameter; + public multimodalEnabled: cdk.CfnParameter; /** - * If set to 'false', the deployed use case stack will not have access to the feedback feature + * Number of execution environments to keep warm for provisioned concurrency */ - public feedbackEnabled: cdk.CfnParameter; + public provisionedConcurrencyValue: cdk.CfnParameter; /** * The source where this code was called from @@ -119,36 +109,6 @@ export class UseCaseParameters extends BaseParameters { * @param stack */ protected withAdditionalCfnParameters(stack: BaseStack) { - this.useCaseUUID = new cdk.CfnParameter(stack, 'UseCaseUUID', { - type: 'String', - description: - 'UUID to identify this deployed use case within an application. Please provide a 36 character long UUIDv4. If you are editing the stack, do not modify the value (retain the value used during creating the stack). A different UUID when editing the stack will result in new AWS resource created and deleting the old ones', - allowedPattern: - '^[0-9a-fA-F]{8}$|^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$', - minLength: 8, - maxLength: 36, - constraintDescription: - 'Using digits and the letters A through F, please provide a 8 character id or a 36 character long UUIDv4.' - }); - - this.useCaseShortId = cdk.Fn.select(0, cdk.Fn.split('-', this.useCaseUUID.valueAsString)); - - this.useCaseConfigTableName = new cdk.CfnParameter(stack, 'UseCaseConfigTableName', { - type: 'String', - maxLength: 255, - allowedPattern: '^[a-zA-Z0-9_.-]{3,255}$', - description: 'DynamoDB table name for the table which contains the configuration for this use case.', - constraintDescription: - 'This parameter is required. The stack will read the configuration from this table to configure the resources during deployment' - }); - - this.useCaseConfigRecordKey = new cdk.CfnParameter(stack, 'UseCaseConfigRecordKey', { - type: 'String', - maxLength: 2048, - description: - 'Key corresponding of the record containing configurations required by the chat provider lambda at runtime. The record in the table should have a "key" attribute matching this value, and a "config" attribute containing the desired config. This record will be populated by the deployment platform if in use. For standalone deployments of this use-case, a manually created entry in the table defined in `UseCaseConfigTableName` is required. Consult the implementation guide for more details.' - }); - this.defaultUserEmail = new cdk.CfnParameter(stack, 'DefaultUserEmail', { type: 'String', description: @@ -158,15 +118,6 @@ export class UseCaseParameters extends BaseParameters { constraintDescription: 'Please provide a valid email' }); - this.existingCognitoUserPoolId = new cdk.CfnParameter(stack, 'ExistingCognitoUserPoolId', { - type: 'String', - allowedPattern: '^$|^[0-9a-zA-Z_-]{9,24}$', - maxLength: 24, - description: - 'Optional - UserPoolId of an existing cognito user pool which this use case will be authenticated with. Typically will be provided when deploying from the deployment platform, but can be omitted when deploying this use-case stack standalone.', - default: '' - }); - this.existingCognitoGroupPolicyTableName = new cdk.CfnParameter(stack, 'ExistingCognitoGroupPolicyTableName', { type: 'String', allowedPattern: '^$|^[a-zA-Z0-9_.-]{3,255}$', @@ -176,15 +127,6 @@ export class UseCaseParameters extends BaseParameters { default: '' }); - this.existingUserPoolClientId = new cdk.CfnParameter(stack, 'ExistingCognitoUserPoolClient', { - type: 'String', - allowedPattern: '^$|^[a-z0-9]{3,128}$', - maxLength: 128, - description: - 'Optional - Provide a User Pool Client (App Client) to use an existing one. If not provided a new User Pool Client will be created. This parameter can only be provided if an existing User Pool Id is provided', - default: '' - }); - this.existingRestApiId = new cdk.CfnParameter(stack, 'ExistingRestApiId', { type: 'String', allowedPattern: '^$|^[a-zA-Z0-9]+$', @@ -209,6 +151,28 @@ export class UseCaseParameters extends BaseParameters { default: 'No' }); + this.multimodalEnabled = new cdk.CfnParameter(stack, 'MultimodalEnabled', { + type: 'String', + description: 'If set to Yes, the deployed use case stack will have access to multimodal functionality. This functionality is only enabled for Agentcore-based AgentBuilder and Workflow usecases.', + allowedValues: ['Yes', 'No'], + allowedPattern: '^Yes|No$', + default: 'No' + }); + + this.existingMultimodalDataMetadataTable = new cdk.CfnParameter(stack, 'ExistingMultimodalDataMetadataTable', { + type: 'String', + description: 'Existing multimodal data metadata table name which contains references of the files in S3', + default: '', + constraintDescription: 'Must be a valid DynamoDB table name or empty string' + }); + + this.existingMultimodalDataBucket = new cdk.CfnParameter(stack, 'ExistingMultimodalDataBucket', { + type: 'String', + description: 'Existing multimodal data bucket name which stores the multimodal data files', + default: '', + constraintDescription: 'Must be a valid S3 bucket name or empty string' + }); + this.stackDeploymentSource = new cdk.CfnParameter(stack, 'StackDeploymentSource', { type: 'String', description: @@ -217,10 +181,19 @@ export class UseCaseParameters extends BaseParameters { allowedValues: ['UseCase', 'StandaloneUseCase'] }).valueAsString as StackDeploymentSource; + this.provisionedConcurrencyValue = new cdk.CfnParameter(stack, 'ProvisionedConcurrencyValue', { + type: 'Number', + description: + 'Provisioned concurrency value for Lambda functions. Set to 0 to disable provisioned concurrency.', + default: 0, + minValue: 0, + maxValue: 5 + }); + const existingParameterGroups = this.cfnStack.templateOptions.metadata !== undefined && - Object.hasOwn(this.cfnStack.templateOptions.metadata, 'AWS::CloudFormation::Interface') && - this.cfnStack.templateOptions.metadata['AWS::CloudFormation::Interface'].ParameterGroups !== undefined + Object.hasOwn(this.cfnStack.templateOptions.metadata, 'AWS::CloudFormation::Interface') && + this.cfnStack.templateOptions.metadata['AWS::CloudFormation::Interface'].ParameterGroups !== undefined ? this.cfnStack.templateOptions.metadata['AWS::CloudFormation::Interface'].ParameterGroups : []; @@ -243,10 +216,18 @@ export class UseCaseParameters extends BaseParameters { this.useCaseConfigTableName.logicalId, this.existingRestApiId.logicalId, this.feedbackEnabled.logicalId, + this.multimodalEnabled.logicalId, + this.existingMultimodalDataMetadataTable.logicalId, + this.existingMultimodalDataBucket.logicalId, this.existingApiRootResourceId.logicalId ] }); + existingParameterGroups.unshift({ + Label: { default: 'Optional: Performance Optimization Configuration' }, + Parameters: [this.provisionedConcurrencyValue.logicalId] + }); + // prettier-ignore new cdk.CfnRule(this.cfnStack, 'PolicyTableRequiredRule', { // NOSONAR - construct instantiation ruleCondition: cdk.Fn.conditionNot( @@ -310,6 +291,66 @@ export class UseCaseParameters extends BaseParameters { } ] }); + + new cdk.CfnCondition(this.cfnStack, 'MultimodalEnabledCondition', { + expression: cdk.Fn.conditionEquals(this.multimodalEnabled.valueAsString, 'Yes') + }); + + const multimodalBucketProvided = new cdk.CfnCondition(this.cfnStack, 'MultimodalBucketProvided', { + expression: cdk.Fn.conditionNot(cdk.Fn.conditionEquals(this.existingMultimodalDataBucket.valueAsString, '')) + }); + + const multimodalTableProvided = new cdk.CfnCondition(this.cfnStack, 'MultimodalTableProvided', { + expression: cdk.Fn.conditionNot( + cdk.Fn.conditionEquals(this.existingMultimodalDataMetadataTable.valueAsString, '') + ) + }); + + new cdk.CfnCondition(this.cfnStack, 'MultimodalDataProvidedCondition', { + expression: cdk.Fn.conditionAnd(multimodalBucketProvided, multimodalTableProvided) + }); + + // If multimodal is enabled, both existing resources must be provided together or both must be empty + new cdk.CfnRule(this.cfnStack, 'ValidateMultimodalResourcesConfiguration', { + ruleCondition: cdk.Fn.conditionEquals(this.multimodalEnabled.valueAsString, 'Yes'), + assertions: [ + { + assert: cdk.Fn.conditionOr( + // Both are provided + cdk.Fn.conditionAnd( + cdk.Fn.conditionNot( + cdk.Fn.conditionEquals(this.existingMultimodalDataBucket.valueAsString, '') + ), + cdk.Fn.conditionNot( + cdk.Fn.conditionEquals(this.existingMultimodalDataMetadataTable.valueAsString, '') + ) + ), + // Both are empty + cdk.Fn.conditionAnd( + cdk.Fn.conditionEquals(this.existingMultimodalDataBucket.valueAsString, ''), + cdk.Fn.conditionEquals(this.existingMultimodalDataMetadataTable.valueAsString, '') + ) + ), + assertDescription: + 'When multimodal functionality is enabled, both multimodal data bucket and metadata table must be provided together, or both must be empty to create new resources' + } + ] + }); + + // If existing multimodal resources are provided, multimodal must be enabled + new cdk.CfnRule(this.cfnStack, 'ValidateMultimodalEnabledWithResources', { + ruleCondition: cdk.Fn.conditionAnd( + cdk.Fn.conditionNot(cdk.Fn.conditionEquals(this.existingMultimodalDataBucket.valueAsString, '')), + cdk.Fn.conditionNot(cdk.Fn.conditionEquals(this.existingMultimodalDataMetadataTable.valueAsString, '')) + ), + assertions: [ + { + assert: cdk.Fn.conditionEquals(this.multimodalEnabled.valueAsString, 'Yes'), + assertDescription: + 'When existing multimodal data bucket and metadata table are provided, multimodal functionality must be enabled (MultimodalEnabled=Yes)' + } + ] + }); } } @@ -342,19 +383,102 @@ export abstract class UseCaseStack extends BaseStack { */ public chatLlmProviderLambda: lambda.Function; + /** + * Alias for the chat Lambda function (always created for consistent behavior) + */ + public chatLlmProviderAlias: lambda.Alias; + /** * The Rest Endpoint for the use case */ public useCaseRestEndpointSetup: UseCaseRestEndpointSetup; + /** + * Multimodal setup construct for handling multimodal data functionality when standalone mode is enabled + */ + protected multimodalSetup: MultimodalSetup; + + /** + * Multimodal data metadata table - available when multimodal is enabled + */ + protected multimodalDataMetadataTable?: dynamodb.ITable; + + /** + * Multimodal data bucket - available when multimodal is enabled + */ + protected multimodalDataBucket?: s3.IBucket; + + /** + * Condition that determines if new API resources should be created (for standalone mode) + */ + protected createApiResourcesCondition: cdk.CfnCondition; + + /** + * Condition that determines if multimodal functionality is enabled + */ + protected multimodalEnabledCondition: cdk.CfnCondition; + + /** + * Condition that determines if new multimodal resources should be created + */ + protected createMultimodalResourcesCondition: cdk.CfnCondition; + constructor(scope: Construct, id: string, props: BaseStackProps) { super(scope, id, props); } - protected getWebSocketRoutes(): Map { + protected getWebSocketRoutes(): Map { throw new Error('Constructs inheriting this stack should be providing their implementation'); } + /** + * Method to create lambda version and alias with optional provisioned concurrency + */ + protected setupLambdaVersioning(props: BaseStackProps): void { + const provisionedConcurrencyCondition = new cdk.CfnCondition(this, 'ProvisionedConcurrencyCondition', { + expression: cdk.Fn.conditionNot( + cdk.Fn.conditionEquals(this.stackParameters.provisionedConcurrencyValue.valueAsNumber, 0) + ) + }); + + // Custom resource to create Lambda version on every Create/Update + const versionGenerator = new cdk.CfnResource(this, 'LambdaVersionGenerator', { + type: 'Custom::LambdaVersion', + properties: { + ServiceToken: this.applicationSetup.customResourceLambda.functionArn, + Resource: 'LAMBDA_VERSION_GENERATOR', + FunctionName: this.chatLlmProviderLambda.functionName, + Triggers: { + UseCaseConfigTrigger: this.stackParameters.useCaseConfigRecordKey.valueAsString + } + } + }); + + // Retain the custom resource on stack deletion to prevent errors during rollback scenarios + versionGenerator.cfnOptions.deletionPolicy = cdk.CfnDeletionPolicy.RETAIN; + + const version = lambda.Version.fromVersionAttributes(this, 'ChatLambdaVersion', { + lambda: this.chatLlmProviderLambda, + version: versionGenerator.getAtt('VersionNumber').toString() + }); + + this.chatLlmProviderAlias = new lambda.Alias(this, 'ChatLambdaAlias', { + aliasName: 'live', + version: version, + description: 'Alias for chat Lambda function' + }); + + // Only add provisioned concurrency when condition is met + const cfnAlias = this.chatLlmProviderAlias.node.defaultChild as lambda.CfnAlias; + cfnAlias.provisionedConcurrencyConfig = cdk.Fn.conditionIf( + provisionedConcurrencyCondition.logicalId, + { + ProvisionedConcurrentExecutions: this.stackParameters.provisionedConcurrencyValue.valueAsNumber + }, + cdk.Aws.NO_VALUE + ); + } + /** * Method to create additional resources as required by the stack * @@ -365,7 +489,6 @@ export abstract class UseCaseStack extends BaseStack { new cdk.CfnMapping(this, 'Solution', { mapping: { Data: { - SendAnonymousUsageData: 'Yes', ID: props.solutionID, Version: props.solutionVersion, SolutionName: props.solutionName, @@ -392,7 +515,13 @@ export abstract class UseCaseStack extends BaseStack { 0, cdk.Fn.split( '.', - cdk.Fn.select(1, cdk.Fn.split('@', cdk.Fn.join("", [this.stackParameters.defaultUserEmail.valueAsString, "@example.com"]))) + cdk.Fn.select( + 1, + cdk.Fn.split( + '@', + cdk.Fn.join('', [this.stackParameters.defaultUserEmail.valueAsString, '@example.com']) + ) + ) ) ), INTERNAL_EMAIL_DOMAIN @@ -400,6 +529,32 @@ export abstract class UseCaseStack extends BaseStack { }); this.llmProviderSetup(); + + // Lambda version policy for publishing Lambda versions + const lambdaVersionPolicy = new iam.Policy(this, 'LambdaVersionPolicy', { + statements: [ + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['lambda:PublishVersion'], + resources: [`arn:${cdk.Aws.PARTITION}:lambda:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:function:*`] + }) + ] + }); + lambdaVersionPolicy.attachToRole(this.applicationSetup.customResourceRole); + + // CFN Nag suppression: Wildcard resource required to avoid timing issues, attempting to scope to specific function ARNs results in access denied errors during deployment + NagSuppressions.addResourceSuppressions(lambdaVersionPolicy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'Lambda version policy requires wildcard permissions to publish versions for use case Lambda functions', + appliesTo: [ + 'Resource::arn::lambda:::function:*' + ] + } + ]); + + this.setupLambdaVersioning(props); + // setup lambda logs retention policy createCustomResourceForLambdaLogRetention( this, @@ -442,13 +597,14 @@ export abstract class UseCaseStack extends BaseStack { cloudFrontUrl: uiInfrastructureBuilder.getCloudFrontUrlWithCondition(), deployWebApp: this.deployWebApp.valueAsString, lambdaRouteMapping: this.getWebSocketRoutes(), + chatLlmProviderLambda: this.chatLlmProviderLambda, deployVPCCondition: this.vpcEnabledCondition, privateSubnetIds: this.transpiredPrivateSubnetIds, securityGroupIds: this.transpiredSecurityGroupIds }); // Existing API was not provided and so a new API will be created. - const createApiResourcesCondition = new cdk.CfnCondition(this, 'CreateApiResourcesCondition', { + this.createApiResourcesCondition = new cdk.CfnCondition(this, 'CreateApiResourcesCondition', { expression: cdk.Fn.conditionOr( cdk.Fn.conditionEquals(this.stackParameters.existingRestApiId, ''), cdk.Fn.conditionEquals(this.stackParameters.existingApiRootResourceId, '') @@ -467,7 +623,7 @@ export abstract class UseCaseStack extends BaseStack { userPoolGroupName: this.requestProcessor.cognitoSetup.userPoolGroup.groupName, // additional inputs for creating resources llmConfigTable: this.stackParameters.useCaseConfigTableName.valueAsString, - createApiResourcesCondition: createApiResourcesCondition, + createApiResourcesCondition: this.createApiResourcesCondition, customResourceLambda: this.applicationSetup.customResourceLambda, deployVPCCondition: this.vpcEnabledCondition, privateSubnetIds: this.transpiredPrivateSubnetIds, @@ -478,7 +634,7 @@ export abstract class UseCaseStack extends BaseStack { expression: cdk.Fn.conditionAnd( // FeedbackEnabled was provided as 'Yes' and a new API was created for this use case type cdk.Fn.conditionEquals(this.stackParameters.feedbackEnabled, 'Yes'), - createApiResourcesCondition + this.createApiResourcesCondition ) }); @@ -520,6 +676,57 @@ export abstract class UseCaseStack extends BaseStack { }) ); + const multimodalDataProvidedCondition = this.node.findChild( + 'MultimodalDataProvidedCondition' + ) as cdk.CfnCondition; + + this.multimodalEnabledCondition = this.node.findChild('MultimodalEnabledCondition') as cdk.CfnCondition; + + this.createMultimodalResourcesCondition = new cdk.CfnCondition(this, 'CreateMultimodalResourcesCondition', { + expression: cdk.Fn.conditionAnd( + this.multimodalEnabledCondition, + cdk.Fn.conditionNot(multimodalDataProvidedCondition), + this.createApiResourcesCondition + ) + }); + + this.multimodalSetup = new MultimodalSetup(this, 'MultimodalSetup', { + restApi: this.useCaseRestEndpointSetup.restApi as api.RestApi, + deploymentPlatformAuthorizer: this.useCaseRestEndpointSetup.authorizer as api.RequestAuthorizer, + requestValidator: this.useCaseRestEndpointSetup.requestValidator as api.RequestValidator, + dlq: this.useCaseRestEndpointSetup.dlq, + deployVPCCondition: this.vpcEnabledCondition, + privateSubnetIds: this.transpiredPrivateSubnetIds, + securityGroupIds: this.transpiredSecurityGroupIds, + customResourceLambdaArn: this.applicationSetup.customResourceLambda.functionArn, + customResourceLambdaRoleArn: this.applicationSetup.customResourceRole.roleArn, + accessLoggingS3Bucket: this.applicationSetup.accessLoggingBucket, + stackSource: this.stackParameters.stackDeploymentSource + }); + this.multimodalSetup.applyConditionToAllResources(this.createMultimodalResourcesCondition); + + // Set multimodal resource properties based on whether we're using existing resources or creating new ones + // When CreateMultimodalResourcesCondition is true, use the newly created resources else reference the existing resources + this.multimodalDataMetadataTable = dynamodb.Table.fromTableName( + this, + 'MultimodalDataMetadataTableRef', + cdk.Fn.conditionIf( + this.createMultimodalResourcesCondition.logicalId, + this.multimodalSetup.multimodalDataMetadataTable.tableName, + this.stackParameters.existingMultimodalDataMetadataTable.valueAsString + ).toString() + ); + + this.multimodalDataBucket = s3.Bucket.fromBucketName( + this, + 'MultimodalDataBucketRef', + cdk.Fn.conditionIf( + this.createMultimodalResourcesCondition.logicalId, + this.multimodalSetup.multimodalDataBucket.bucketName, + this.stackParameters.existingMultimodalDataBucket.valueAsString + ).toString() + ); + const webConfigSsmKey = `${WEB_CONFIG_PREFIX}/${this.stackParameters.useCaseShortId}`; this.applicationSetup.createWebConfigStorage( { @@ -551,8 +758,10 @@ export abstract class UseCaseStack extends BaseStack { this.stackParameters.useCaseUUID.valueAsString ); - // Prevents deletion of UseCase policies during updates due to Logical ID changes and old custom resource deletion occurring after new custom resource creation - (useCasePolicyCustomResource.node.defaultChild as cdk.CfnResource).overrideLogicalId('WebsocketRequestProcessorCognitoUseCaseGroupPolicyCBC41F18'); + // Prevents deletion of UseCase policies during updates due to Logical ID changes and old custom resource deletion occurring after new custom resource creation + (useCasePolicyCustomResource.node.defaultChild as cdk.CfnResource).overrideLogicalId( + 'WebsocketRequestProcessorCognitoUseCaseGroupPolicyCBC41F18' + ); const redeployRestApiCustomResource = this.useCaseRestEndpointSetup.redeployRestApi( this.applicationSetup.customResourceLambda, @@ -562,7 +771,7 @@ export abstract class UseCaseStack extends BaseStack { ); (redeployRestApiCustomResource.node.defaultChild as cdk.CfnResource).cfnOptions.condition = - createApiResourcesCondition; + this.createApiResourcesCondition; const redeployRestApiCustomResourceFeedback = this.useCaseRestEndpointSetup.redeployRestApi( this.applicationSetup.customResourceLambda, @@ -666,10 +875,31 @@ export abstract class UseCaseStack extends BaseStack { }); new cdk.CfnOutput(cdk.Stack.of(this), 'WebsocketEndpoint', { - value: this.requestProcessor.webSocketApi.apiEndpoint, + value: `${this.requestProcessor.webSocketApi.apiEndpoint}/${this.requestProcessor.websocketApiStage.stageName}`, description: 'Websocket API endpoint' }); + // Conditionally create multimodal outputs + const multimodalBucketOutput = new cdk.CfnOutput(this, 'MultimodalDataBucketName', { + value: cdk.Fn.conditionIf( + this.createMultimodalResourcesCondition.logicalId, + this.multimodalSetup.multimodalDataBucket.bucketName, + this.stackParameters.existingMultimodalDataBucket.valueAsString + ).toString(), + description: 'S3 bucket for storing multimodal files' + }); + multimodalBucketOutput.condition = this.multimodalEnabledCondition; + + const multimodalTableOutput = new cdk.CfnOutput(this, 'MultimodalDataMetadataTableName', { + value: cdk.Fn.conditionIf( + this.createMultimodalResourcesCondition.logicalId, + this.multimodalSetup.multimodalDataMetadataTable.tableName, + this.stackParameters.existingMultimodalDataMetadataTable.valueAsString + ).toString(), + description: 'DynamoDB table for storing multimodal files metadata' + }); + multimodalTableOutput.condition = this.multimodalEnabledCondition; + if (process.env.DIST_OUTPUT_BUCKET) { generateSourceCodeMapping(this, props.solutionName, props.solutionVersion); generateSourceCodeMapping(this.uiDistribution, props.solutionName, props.solutionVersion); @@ -678,12 +908,12 @@ export abstract class UseCaseStack extends BaseStack { } /** - * Method to add anonymous metrics to the application stack + * Method to add metrics to the application stack * * @param props */ - protected withAnonymousMetrics(props: BaseStackProps) { - this.applicationSetup.addAnonymousMetricsCustomLambda(props.solutionID, props.solutionVersion, { + protected withMetrics(props: BaseStackProps) { + this.applicationSetup.addMetricsCustomLambda(props.solutionID, props.solutionVersion, { USE_CASE_CONFIG_RECORD_KEY: this.stackParameters.useCaseConfigRecordKey.valueAsString, USE_CASE_CONFIG_TABLE_NAME: this.stackParameters.useCaseConfigTableName.valueAsString, UUID: this.stackParameters.useCaseUUID, @@ -692,7 +922,7 @@ export abstract class UseCaseStack extends BaseStack { }); ( this.applicationSetup.solutionHelper.node - .tryFindChild('AnonymousData') + .tryFindChild('Data') ?.node.tryFindChild('Default') as cdk.CfnResource ).addDependency( this.applicationSetup.webConfigCustomResource.node.tryFindChild('Default') as cdk.CfnCustomResource diff --git a/source/infrastructure/lib/layers/runtime-libs.ts b/source/infrastructure/lib/layers/runtime-libs.ts index 382bbe34..06e4b386 100644 --- a/source/infrastructure/lib/layers/runtime-libs.ts +++ b/source/infrastructure/lib/layers/runtime-libs.ts @@ -7,9 +7,9 @@ import { Construct } from 'constructs'; import * as path from 'path'; import { ApplicationAssetBundler } from '../framework/bundler/asset-options-factory'; import { - COMMERCIAL_REGION_LAMBDA_JS_LAYER_RUNTIME, COMMERCIAL_REGION_LAMBDA_LAYER_PYTHON_RUNTIME, COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, + COMMERCIAL_REGION_LAMBDA_NODE_TS_LAYER_RUNTIME, COMMERCIAL_REGION_LAMBDA_PYTHON_RUNTIME, GOV_CLOUD_REGION_LAMBDA_NODE_RUNTIME, GOV_CLOUD_REGION_LAMBDA_PYTHON_RUNTIME, @@ -39,7 +39,7 @@ export class AwsNodeSdkLibLayer extends lambda.LayerVersion { code: lambda.Code.fromAsset( entry, ApplicationAssetBundler.assetBundlerFactory() - .assetOptions(COMMERCIAL_REGION_LAMBDA_JS_LAYER_RUNTIME) + .assetOptions(COMMERCIAL_REGION_LAMBDA_NODE_TS_LAYER_RUNTIME) .options(scope, entry) ), compatibleRuntimes, diff --git a/source/infrastructure/lib/mcp-server-stack.ts b/source/infrastructure/lib/mcp-server-stack.ts new file mode 100644 index 00000000..74cd3acc --- /dev/null +++ b/source/infrastructure/lib/mcp-server-stack.ts @@ -0,0 +1,474 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as iam from 'aws-cdk-lib/aws-iam'; +import { NagSuppressions } from 'cdk-nag'; +import { USE_CASE_TYPES, ECR_URI_PATTERN } from './utils/constants'; +import { Construct } from 'constructs'; +import { ApplicationSetup } from './framework/application-setup'; +import { BaseStack, BaseStackProps, BaseParameters } from './framework/base-stack'; +import { setupAgentCorePermissionsWithPassRole, setupAgentCorePermissions } from './utils/common-utils'; +import { AgentExecutionRole } from './use-case-stacks/agent-core/components/agent-execution-role'; + +export class MCPServerParameters extends BaseParameters { + /** + * S3 Bucket Name for the S3 bucket that stores the Lambda/API schema + */ + public s3BucketName: cdk.CfnParameter; + + /** + * Optional ECR URI for the container image used by the MCP server through Agentcore Runtime + */ + public ecrUri: cdk.CfnParameter; + + /** + * Optional Rest API ID for the existing API Gateway REST API + */ + protected existingRestApiId: cdk.CfnParameter; + + constructor(stack: cdk.Stack) { + super(stack); + this.withAdditionalCfnParameters(stack); + } + + protected setupCognitoUserPoolClientDomainParams(stack: cdk.Stack): void { + // Overriding the parent function as the parameters is not necessary for the MCPSeverParameters + } + protected withAdditionalCfnParameters(stack: cdk.Stack) { + this.s3BucketName = new cdk.CfnParameter(stack, 'S3BucketName', { + type: 'String', + description: 'S3 Bucket Name for the S3 bucket that stores the Lambda/API schema', + allowedPattern: '^[a-z0-9][a-z0-9\\-]*[a-z0-9]$', + constraintDescription: 'Please provide a valid S3 bucket name', + maxLength: 63 + }); + + this.ecrUri = new cdk.CfnParameter(stack, 'EcrUri', { + type: 'String', + description: 'Optional ECR URI for the container image used by the MCP server', + allowedPattern: `^$|${ECR_URI_PATTERN}`, + constraintDescription: + 'Please provide a valid ECR URI format (e.g., 123456789012.dkr.ecr.us-east-1.amazonaws.com/my-repo:tag) or leave empty', + maxLength: 200, + default: '' + }); + + this.existingRestApiId = new cdk.CfnParameter(stack, 'ExistingRestApiId', { + type: 'String', + allowedPattern: '^$|^[a-zA-Z0-9]+$', + description: + 'Optional - Provide the API Gateway REST API ID to use an existing one. If not provided, a new API Gateway REST API will be created. Note that for standalone use cases, existing APIs should have the pre-configured UseCaseDetails (and Feedback if Feedback is enabled) routes with expected models. Additionally, ExistingApiRootResourceId must also be provided.', + default: '' + }); + + const existingParameterGroups = + this.cfnStack.templateOptions.metadata?.['AWS::CloudFormation::Interface']?.ParameterGroups || []; + + existingParameterGroups.unshift({ + Label: { default: 'MCP Server Configuration' }, + Parameters: [this.s3BucketName.logicalId, this.ecrUri.logicalId] + }); + + this.cfnStack.templateOptions.metadata = { + 'AWS::CloudFormation::Interface': { + ParameterGroups: existingParameterGroups + } + }; + } +} + +/** + * Stack for MCP (Model Context Protocol) Server infrastructure + */ +export class MCPServerStack extends BaseStack { + private mcpGatewayRole: iam.Role; + private agentExecutionRole: AgentExecutionRole; + private hasExistingRestApiIdCondition: cdk.CfnCondition; + + constructor(scope: Construct, id: string, props: BaseStackProps) { + super(scope, id, props); + this.withAdditionalResourceSetup(props); + } + + protected withAdditionalResourceSetup(props: BaseStackProps): void { + super.withAdditionalResourceSetup(props); + // Setup permissions for custom resource lambda + const mcpRuntimeName = `gaab_mcp_${this.stackParameters.useCaseShortId}`; + const mcpGatewayName = `gaab-mcp-${this.stackParameters.useCaseShortId}`; + + this.setupS3Permissions(); + this.setupDynamoDBPermissions(); + setupAgentCorePermissions(this.applicationSetup.customResourceRole); + + this.mcpGatewayRole = this.createMCPGatewayRole(mcpGatewayName); + this.setupBedrockAgentCoreGatewayPermissions(); + + this.agentExecutionRole = new AgentExecutionRole(this, 'MCPAgentExecutionRole', { + useCaseConfigTableName: this.stackParameters.useCaseConfigTableName.valueAsString + }); + + setupAgentCorePermissionsWithPassRole( + this.applicationSetup.customResourceRole, + this.agentExecutionRole.role.roleArn + ); + + const hasEcrImage = new cdk.CfnCondition(this, 'HasEcrImage', { + expression: cdk.Fn.conditionNot(cdk.Fn.conditionEquals(this.stackParameters.ecrUri.valueAsString, '')) + }); + + const noEcrImage = new cdk.CfnCondition(this, 'NoEcrImage', { + expression: cdk.Fn.conditionEquals(this.stackParameters.ecrUri.valueAsString, '') + }); + + const mcpRuntimeResource = new cdk.CustomResource(this, 'CreateMCPRuntime', { + resourceType: 'Custom::CreateMCPRuntime', + serviceToken: this.applicationSetup.customResourceLambda.functionArn, + properties: { + Resource: 'DEPLOY_MCP_RUNTIME', + MCPAgentCoreName: mcpRuntimeName, + USE_CASE_CONFIG_TABLE_NAME: this.stackParameters.useCaseConfigTableName.valueAsString, + USE_CASE_CONFIG_RECORD_KEY: this.stackParameters.useCaseConfigRecordKey.valueAsString, + EXECUTION_ROLE_ARN: this.agentExecutionRole.role.roleArn, + ECR_URI: this.stackParameters.ecrUri.valueAsString, + COGNITO_USER_POOL_ID: this.stackParameters.existingCognitoUserPoolId.valueAsString, + COGNITO_USER_POOL_CLIENT_ID: this.stackParameters.existingUserPoolClientId.valueAsString + } + }); + (mcpRuntimeResource.node.defaultChild as cdk.CfnCustomResource).cfnOptions.condition = hasEcrImage; + + const mcpGatewayResource = new cdk.CustomResource(this, 'CreateMCPServer', { + resourceType: 'Custom::CreateMCPServer', + serviceToken: this.applicationSetup.customResourceLambda.functionArn, + properties: { + Resource: 'DEPLOY_MCP_GATEWAY', + MCPAgentCoreName: mcpGatewayName, + USE_CASE_CONFIG_TABLE_NAME: this.stackParameters.useCaseConfigTableName.valueAsString, + USE_CASE_CONFIG_RECORD_KEY: this.stackParameters.useCaseConfigRecordKey.valueAsString, + USE_CASE_UUID: this.stackParameters.useCaseShortId, + S3_BUCKET_NAME: this.stackParameters.s3BucketName.valueAsString, + GATEWAY_ROLE_ARN: this.mcpGatewayRole.roleArn, + COGNITO_USER_POOL_ID: this.stackParameters.existingCognitoUserPoolId.valueAsString, + COGNITO_USER_POOL_CLIENT_ID: this.stackParameters.existingUserPoolClientId.valueAsString + } + }); + (mcpGatewayResource.node.defaultChild as cdk.CfnCustomResource).cfnOptions.condition = noEcrImage; + + new cdk.CfnOutput(this, 'MCPRuntimeArn', { + value: mcpRuntimeResource.getAttString('MCPRuntimeArn'), + description: 'ARN of the created MCP Runtime resource', + condition: hasEcrImage + }); + + new cdk.CfnOutput(this, 'MCPRuntimeExecutionRoleArn', { + value: this.agentExecutionRole.role.roleArn, + description: 'IAM Role ARN used for MCP Runtime execution', + condition: hasEcrImage + }); + + new cdk.CfnOutput(this, 'MCPGatewayArn', { + value: mcpGatewayResource.getAttString('GatewayArn'), + description: 'ARN of the created MCP Gateway resource', + condition: noEcrImage + }); + + new cdk.CfnOutput(this, 'MCPGatewayRoleArn', { + value: this.mcpGatewayRole.roleArn, + description: 'IAM Role ARN used for MCP Gateway operations', + condition: noEcrImage + }); + + new cdk.CfnMapping(this, 'Solution', { + mapping: { + Data: { + ID: props.solutionID, + Version: props.solutionVersion, + SolutionName: props.solutionName, + UseCaseName: USE_CASE_TYPES.MCP_SERVER + } + } + }); + + // Add anonymous metrics for stack state changes (CREATE/UPDATE/DELETE) + this.applicationSetup.addMetricsCustomLambda(props.solutionID, props.solutionVersion, { + USE_CASE_CONFIG_RECORD_KEY: this.stackParameters.useCaseConfigRecordKey.valueAsString, + USE_CASE_CONFIG_TABLE_NAME: this.stackParameters.useCaseConfigTableName.valueAsString, + UUID: this.stackParameters.useCaseUUID + }); + + this.addNagSuppressions(); + } + + private createMCPGatewayRole(mcpGatewayName: string): iam.Role { + const gatewayRole = new iam.Role(this, 'MCPGatewayRole', { + assumedBy: new iam.ServicePrincipal('bedrock-agentcore.amazonaws.com'), + description: 'IAM role for MCP Gateway to invoke Lambda functions' + }); + + gatewayRole.attachInlinePolicy( + new iam.Policy(this, 'GatewayAccessPolicy', { + statements: [ + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'bedrock-agentcore:GetWorkloadAccessToken', + 'bedrock-agentcore:GetResourceApiKey', + 'bedrock-agentcore:GetResourceOauth2Token' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:workload-identity-directory/default`, + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:workload-identity-directory/default/workload-identity/${mcpGatewayName}-*` + ] + }), + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['bedrock-agentcore:GetGateway'], + resources: [ + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:gateway/${mcpGatewayName}-*` + ] + }) + ] + }) + ); + + // Allow gateway to access S3 schemas + gatewayRole.addToPolicy( + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['s3:GetObject'], + resources: [ + `arn:${cdk.Aws.PARTITION}:s3:::${this.stackParameters.s3BucketName.valueAsString}/mcp/schemas/*` + ] + }) + ); + + return gatewayRole; + } + + protected initializeBaseStackParameters(): void { + // Overriding base stack function ensuring params are not created for MCP Server stack + } + + protected setupBaseStackResources(props: BaseStackProps): void { + // Overriding base stack function ensuring resources are not created for MCP Server stack + } + + protected initializeCfnParameters(): void { + this.stackParameters = new MCPServerParameters(this); + } + /** + * Define core setup of infrastructure resources like s3 logging bucket, custom resource definitions + * which are used by root and nested stacks. The root stack should invoke this method and then pass + * the resources/ resource arns to the nested stack + * + * @param props + * @returns + */ + protected createApplicationSetup(props: BaseStackProps): ApplicationSetup { + return new ApplicationSetup(this, 'MCPServerSetup', { + solutionID: props.solutionID, + solutionVersion: props.solutionVersion, + useCaseUUID: this.stackParameters.useCaseShortId + }); + } + + /** + * Setup S3 permissions for the custom resource lambda + */ + private setupS3Permissions(): void { + const s3BucketAccessPolicy = new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['s3:GetObject', 's3:ListBucket'], + resources: [ + `arn:${cdk.Aws.PARTITION}:s3:::${this.stackParameters.s3BucketName.valueAsString}`, + `arn:${cdk.Aws.PARTITION}:s3:::${this.stackParameters.s3BucketName.valueAsString}/*` + ] + }); + + this.applicationSetup.customResourceRole.addToPolicy(s3BucketAccessPolicy); + } + + /** + * Setup DynamoDB permissions for the custom resource lambda + */ + private setupDynamoDBPermissions(): void { + const customResourceUseCaseTablePolicy = new iam.PolicyStatement({ + actions: ['dynamodb:GetItem', 'dynamodb:PutItem'], + resources: [ + `arn:${cdk.Aws.PARTITION}:dynamodb:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:table/${this.stackParameters.useCaseConfigTableName.valueAsString}` + ], + conditions: { + 'ForAllValues:StringEquals': { + 'dynamodb:LeadingKeys': [this.stackParameters.useCaseConfigRecordKey.valueAsString] + } + }, + effect: iam.Effect.ALLOW + }); + + this.applicationSetup.customResourceRole.addToPolicy(customResourceUseCaseTablePolicy); + } + + /** + * Setup Bedrock Agent Core Gateway permissions for the custom resource lambda + */ + private setupBedrockAgentCoreGatewayPermissions(): void { + const bedrockAgentCoreGatewayPolicy = new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'bedrock-agentcore:CreateGateway', + 'bedrock-agentcore:UpdateGateway', + 'bedrock-agentcore:DeleteGateway', + 'bedrock-agentcore:GetGateway', + 'bedrock-agentcore:ListGateways', + 'bedrock-agentcore:CreateGatewayTarget', + 'bedrock-agentcore:UpdateGatewayTarget', + 'bedrock-agentcore:DeleteGatewayTarget', + 'bedrock-agentcore:GetGatewayTarget', + 'bedrock-agentcore:ListGatewayTargets', + 'bedrock-agentcore:SynchronizeGatewayTargets' + ], + resources: [`arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:gateway/*`] + }); + + const bedrockAgentCoreWorkloadIdentityPolicy = new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'bedrock-agentcore:CreateWorkloadIdentity', + 'bedrock-agentcore:GetWorkloadIdentity', + 'bedrock-agentcore:UpdateWorkloadIdentity', + 'bedrock-agentcore:DeleteWorkloadIdentity' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:workload-identity-directory/*` + ] + }); + + const passRolePolicy = new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['iam:PassRole'], + resources: [this.mcpGatewayRole.roleArn], + conditions: { + StringEquals: { + 'iam:PassedToService': 'bedrock-agentcore.amazonaws.com' + } + } + }); + + // Add credential provider read permissions + const bedrockAgentCoreCredentialProviderPolicy = new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['bedrock-agentcore:GetApiKeyCredentialProvider', 'bedrock-agentcore:GetOauth2CredentialProvider'], + resources: [ + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:token-vault/*` + ] + }); + + // Add IAM permission to update gateway role policies + const iamPutRolePolicyPermission = new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['iam:PutRolePolicy', 'iam:GetRolePolicy', 'iam:DeleteRolePolicy', 'iam:ListRolePolicies'], + resources: [this.mcpGatewayRole.roleArn] + }); + + this.applicationSetup.customResourceRole.addToPolicy(bedrockAgentCoreGatewayPolicy); + this.applicationSetup.customResourceRole.addToPolicy(bedrockAgentCoreWorkloadIdentityPolicy); + this.applicationSetup.customResourceRole.addToPolicy(passRolePolicy); + this.applicationSetup.customResourceRole.addToPolicy(bedrockAgentCoreCredentialProviderPolicy); + this.applicationSetup.customResourceRole.addToPolicy(iamPutRolePolicyPermission); + } + + /** + * Add NAG suppressions for wildcard permissions in IAM policies + */ + private addNagSuppressions(): void { + // Suppress NAG warnings for the custom resource role's default policy + NagSuppressions.addResourceSuppressions( + this.applicationSetup.customResourceRole.node.tryFindChild('DefaultPolicy') as iam.Policy, + [ + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the custom resource lambda to access S3 bucket objects for MCP server schema storage', + appliesTo: [`Resource::arn::s3:::/*`] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the custom resource lambda to manage Bedrock AgentCore runtime resources for MCP server operations', + appliesTo: [ + 'Resource::arn::bedrock-agentcore:::runtime/*' + ] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the custom resource lambda to manage Bedrock AgentCore runtime endpoint resources for MCP server operations', + appliesTo: [ + 'Resource::arn::bedrock-agentcore:::runtime/*/runtime-endpoint/*' + ] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the custom resource lambda to manage Bedrock AgentCore gateway resources for MCP server operations', + appliesTo: [ + 'Resource::arn::bedrock-agentcore:::gateway/*' + ] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the custom resource lambda to manage Bedrock AgentCore workload identity resources for MCP server authentication', + appliesTo: [ + 'Resource::arn::bedrock-agentcore:::workload-identity-directory/*' + ] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the custom resource lambda to read credential providers from the token vault for MCP gateway authentication', + appliesTo: [ + 'Resource::arn::bedrock-agentcore:::token-vault/*' + ] + } + ] + ); + + if (this.mcpGatewayRole) { + NagSuppressions.addResourceSuppressions( + this.mcpGatewayRole.node.tryFindChild('DefaultPolicy') as iam.Policy, + [ + { + id: 'AwsSolutions-IAM5', + reason: 'The MCP Gateway role requires Lambda invoke permissions for functions in the same account and region as specified in MCP target configurations', + appliesTo: [`Resource::arn::lambda:::function:*`] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'The MCP Gateway role requires access to MCP schema files stored under the mcp/schemas/ prefix in the S3 bucket', + appliesTo: [`Resource::arn::s3:::/mcp/schemas/*`] + } + ] + ); + + // Add suppressions for GatewayAccessPolicy + const gatewayAccessPolicy = this.node.tryFindChild('GatewayAccessPolicy') as iam.Policy; + if (gatewayAccessPolicy) { + NagSuppressions.addResourceSuppressions(gatewayAccessPolicy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'The MCP Gateway role requires access to workload identities with dynamic suffixes based on use case UUID for authentication', + appliesTo: [ + { + regex: '/^Resource::arn::bedrock-agentcore:::workload-identity-directory\\/default\\/workload-identity\\/gaab-mcp-.*\\*$/g' + } + ] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'The MCP Gateway role requires access to gateways with dynamic suffixes based on use case UUID', + appliesTo: [ + { + regex: '/^Resource::arn::bedrock-agentcore:::gateway\\/gaab-mcp-.*\\*$/g' + } + ] + } + ]); + } + } + } +} diff --git a/source/infrastructure/lib/metrics/use-case-dashboard.ts b/source/infrastructure/lib/metrics/use-case-dashboard.ts index 47f2dc9e..68738671 100644 --- a/source/infrastructure/lib/metrics/use-case-dashboard.ts +++ b/source/infrastructure/lib/metrics/use-case-dashboard.ts @@ -40,6 +40,7 @@ export class UseCaseDashboard extends CustomDashboard { const shortUseCaseId = cdk.Fn.select(0, cdk.Fn.split('-', this.props.useCaseUUID)); const metricsServiceName = `GAABUseCase-${shortUseCaseId}`; const feedbackServiceName = 'FeedbackManagement'; + const fileManagementServiceName = 'FilesManagement'; this.dashboard.addWidgets( new cloudwatch.GraphWidget({ @@ -544,6 +545,337 @@ export class UseCaseDashboard extends CustomDashboard { color: cloudwatch.Color.PINK }) ] + }), + new cloudwatch.GraphWidget({ + title: 'Multimodal File Operations', + left: [ + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: CloudWatchMetrics.FILES_UPLOADED + 'Count', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID + }, + color: cloudwatch.Color.GREEN + }), + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILE_DELETE, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: CloudWatchMetrics.FILE_DELETE + 'Count', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID + }, + color: cloudwatch.Color.ORANGE + }), + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILE_DOWNLOAD, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: CloudWatchMetrics.FILE_DOWNLOAD + 'Count', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID + }, + color: cloudwatch.Color.PURPLE + }), + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILE_UPLOAD_FAILURE, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: CloudWatchMetrics.FILE_UPLOAD_FAILURE + 'Count', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID + }, + color: cloudwatch.Color.RED + }), + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILE_ACCESS_FAILURES, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: CloudWatchMetrics.FILE_ACCESS_FAILURES + 'Count', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID + }, + color: cloudwatch.Color.RED + }), + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.METADATA_UPDATE_FAILURE, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: CloudWatchMetrics.METADATA_UPDATE_FAILURE + 'Count', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID + }, + color: cloudwatch.Color.RED + }), + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.METADATA_VALIDATION_FAILURE, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: CloudWatchMetrics.METADATA_VALIDATION_FAILURE + 'Count', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID + }, + color: cloudwatch.Color.ORANGE + }), + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.MULTIMODAL_DISABLED_ERROR, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: CloudWatchMetrics.MULTIMODAL_DISABLED_ERROR + 'Count', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID + }, + color: cloudwatch.Color.PINK + }) + ] + }), + new cloudwatch.GraphWidget({ + title: 'Multimodal File Size Stats', + left: [ + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILE_SIZE, + statistic: cloudwatch.Stats.AVERAGE, + label: 'Average' + CloudWatchMetrics.FILE_SIZE, + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID + }, + color: cloudwatch.Color.BLUE + }), + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILE_SIZE, + statistic: cloudwatch.Stats.MAXIMUM, + label: 'Max' + CloudWatchMetrics.FILE_SIZE, + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID + }, + color: cloudwatch.Color.ORANGE + }) + ] + }), + new cloudwatch.GraphWidget({ + title: 'Multimodal File Uploads by Extension', + left: [ + // Image extensions + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: 'png', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID, + FileExtension: 'png' + }, + color: cloudwatch.Color.BLUE + }), + // Combined JPEG images (jpg + jpeg) + new cloudwatch.MathExpression({ + expression: 'jpeg_jpg + jpeg_jpeg', + usingMetrics: { + jpeg_jpg: new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID, + FileExtension: 'jpg' + } + }), + jpeg_jpeg: new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID, + FileExtension: 'jpeg' + } + }) + }, + label: 'jpg/jpeg', + color: cloudwatch.Color.GREEN + }), + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: 'gif', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID, + FileExtension: 'gif' + }, + color: cloudwatch.Color.PURPLE + }), + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: 'webp', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID, + FileExtension: 'webp' + }, + color: cloudwatch.Color.PINK + }), + // Document extensions + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: 'pdf', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID, + FileExtension: 'pdf' + }, + color: cloudwatch.Color.RED + }), + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: 'csv', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID, + FileExtension: 'csv' + }, + color: cloudwatch.Color.BLUE + }), + // Combined Word documents (doc + docx) + new cloudwatch.MathExpression({ + expression: 'word_doc + word_docx', + usingMetrics: { + word_doc: new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID, + FileExtension: 'doc' + } + }), + word_docx: new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID, + FileExtension: 'docx' + } + }) + }, + label: 'doc/docx', + color: cloudwatch.Color.BROWN + }), + // Combined Excel spreadsheets (xls + xlsx) + new cloudwatch.MathExpression({ + expression: 'excel_xls + excel_xlsx', + usingMetrics: { + excel_xls: new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID, + FileExtension: 'xls' + } + }), + excel_xlsx: new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID, + FileExtension: 'xlsx' + } + }) + }, + label: 'xls/xlsx', + color: cloudwatch.Color.GREY + }), + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: 'html', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID, + FileExtension: 'html' + }, + color: cloudwatch.Color.GREEN + }), + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: 'txt', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID, + FileExtension: 'txt' + }, + color: cloudwatch.Color.PURPLE + }), + new cloudwatch.Metric({ + namespace: CloudWatchNamespace.FILE_HANDLING, + metricName: CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + statistic: cloudwatch.Stats.SAMPLE_COUNT, + label: 'md', + period: cdk.Duration.hours(1), + dimensionsMap: { + service: fileManagementServiceName, + UseCaseId: this.props.useCaseUUID, + FileExtension: 'md' + }, + color: cloudwatch.Color.PINK + }) + ] }) ); } diff --git a/source/infrastructure/lib/multimodal/multimodal-setup.ts b/source/infrastructure/lib/multimodal/multimodal-setup.ts new file mode 100644 index 00000000..c2f8f93c --- /dev/null +++ b/source/infrastructure/lib/multimodal/multimodal-setup.ts @@ -0,0 +1,586 @@ +#!/usr/bin/env node +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as iam from 'aws-cdk-lib/aws-iam'; +import * as api from 'aws-cdk-lib/aws-apigateway'; +import * as lambda from 'aws-cdk-lib/aws-lambda'; +import * as s3 from 'aws-cdk-lib/aws-s3'; +import * as dynamodb from 'aws-cdk-lib/aws-dynamodb'; +import * as events from 'aws-cdk-lib/aws-events'; +import * as targets from 'aws-cdk-lib/aws-events-targets'; +import { ConstructsFactories } from '@aws-solutions-constructs/aws-constructs-factories'; +import { NagSuppressions } from 'cdk-nag'; +import * as cfn_nag from '../utils/cfn-guard-suppressions'; +import { + COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, + LAMBDA_TIMEOUT_MINS, + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR, + MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR, + POWERTOOLS_SERVICE_NAME_ENV_VAR, + MULTIMODAL_FILE_EXPIRATION_DAYS, + MULTIMODAL_ENABLED_ENV_VAR, + StackDeploymentSource +} from '../utils/constants'; +import { FileOperationSchemas } from '../api/model-schema'; +import { + createCustomResourceForLambdaLogRetention, + createDefaultLambdaRole, + createVpcConfigForLambda +} from '../utils/common-utils'; +import { ApplicationAssetBundler } from '../framework/bundler/asset-options-factory'; +import * as sqs from 'aws-cdk-lib/aws-sqs'; +import { Construct } from 'constructs'; +import { DeploymentRestApiHelper, DeploymentApiContext } from '../api/deployment-platform-rest-api-helper'; +import { ResourceConditionsAspect } from '../utils/resource-conditions-aspect'; + +export interface MultimodalSetupProps { + /** + * The restAPI to which multimodal file routes will be added + */ + restApi: api.RestApi; + + /** + * The deployment platform authorizer to allow users to access the file management API + */ + deploymentPlatformAuthorizer: api.RequestAuthorizer; + + /** + * The API request validator + */ + requestValidator: api.RequestValidator; + + /** + * The DLQ which the main stack defines which will be reused for lambdas in multimodal setup + */ + dlq: sqs.Queue; + + /** + * Condition that determines if VPC configuration should be applied + * When false, VPC related props (privateSubnetIds, securityGroupIds) are ignored + */ + deployVPCCondition: cdk.CfnCondition; + + /** + * Existing private subnet IDs for VPC configuration if VPC is setup + */ + privateSubnetIds: string; + + /** + * Existing security group IDs for VPC configuration if VPC is setup + */ + securityGroupIds: string; + + /** + * Custom resource lambda ARN for log retention + */ + customResourceLambdaArn: string; + + /** + * Custom resource lambda role ARN for granting permissions + */ + customResourceLambdaRoleArn: string; + + /** + * Access logging S3 bucket for server access logs + */ + accessLoggingS3Bucket: s3.Bucket; + + /** + * Stack deployment source to determine if multimodal should be enabled via environment variable + */ + stackSource: StackDeploymentSource; +} + +/** + * Construct to deploy Multimodal File Management Resources + */ +export class MultimodalSetup extends Construct { + /** + * S3 bucket for storing multimodal data files + */ + public readonly multimodalDataBucket: s3.Bucket; + + /** + * DynamoDB table for storing file metadata + */ + public readonly multimodalDataMetadataTable: dynamodb.Table; + + /** + * The lambda function that backs the file management routes and processes file operations + */ + public readonly filesHandlerLambda: lambda.Function; + + /** + * Lambda function role for the file handler Lambda + */ + private readonly filesHandlerLambdaRole: iam.Role; + + /** + * The lambda function that gets triggered by S3 when files are uploaded + */ + public readonly updateFilesMetadataLambda: lambda.Function; + + /** + * Lambda function role for the update metadata lambda + */ + private readonly updateFilesMetadataRole: iam.Role; + + /** + * The REST API for adding routes + */ + private readonly restApi: api.RestApi; + + /** + * The event rule which is added for listening to S3 creation events + */ + private readonly s3EventRule: events.Rule; + + /** + * API Gateway resources created for file operations + */ + private filesResource: api.Resource; + + constructor(scope: Construct, id: string, props: MultimodalSetupProps) { + super(scope, id); + + this.restApi = props.restApi; + + this.multimodalDataMetadataTable = new dynamodb.Table(this, 'MultimodalDataMetadataTable', { + encryption: dynamodb.TableEncryption.AWS_MANAGED, + billingMode: dynamodb.BillingMode.PAY_PER_REQUEST, + partitionKey: { + name: 'fileKey', + type: dynamodb.AttributeType.STRING + }, + sortKey: { + name: 'fileName', + type: dynamodb.AttributeType.STRING + }, + timeToLiveAttribute: 'ttl', + removalPolicy: cdk.RemovalPolicy.RETAIN + }); + + const factories = new ConstructsFactories(this, 'Factories'); + + this.multimodalDataBucket = factories.s3BucketFactory('MultimodalDataBucket', { + bucketProps: { + // Note: Server access logging is not configured for the multimodal data bucket + // to avoid CloudFormation dependency issues with conditional resource creation + versioned: false, // NOSONAR - bucket versioning is recommended in the IG, but is not enforced + publicReadAccess: false, + encryption: s3.BucketEncryption.S3_MANAGED, + removalPolicy: cdk.RemovalPolicy.RETAIN, + blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL, + enforceSSL: true, + lifecycleRules: [ + { + id: 'DeleteFilesAfter48Hours', + enabled: true, + expiration: cdk.Duration.days(MULTIMODAL_FILE_EXPIRATION_DAYS) + } + ], + cors: [ + { + allowedMethods: [s3.HttpMethods.POST], + allowedOrigins: ['*'], + allowedHeaders: ['*'], + maxAge: 3600 + } + ] + } + }).s3Bucket; + + // Configure S3 EventBridge notifications using custom resource + new cdk.CustomResource(this, 'BucketNotificationsCustomResource', { + resourceType: 'Custom::MultimodalBucketNotifications', + serviceToken: props.customResourceLambdaArn, + properties: { + Resource: 'MULTIMODAL_BUCKET_NOTIFICATIONS', + [MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR]: this.multimodalDataBucket.bucketName + } + }); + + // Grant the custom resource Lambda permission to configure bucket notifications + const customResourceLambdaRole = iam.Role.fromRoleArn( + this, + 'MultimodalCustomResourceRole', + props.customResourceLambdaRoleArn + ); + + const customResourceS3EventsNotificationsPolicy = new iam.Policy( + this, + 'CustomResourceS3EventsNotificationsPolicy', + { + statements: [ + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['s3:PutBucketNotification', 's3:PutBucketNotificationConfiguration'], + resources: [this.multimodalDataBucket.bucketArn] + }) + ] + } + ); + customResourceS3EventsNotificationsPolicy.attachToRole(customResourceLambdaRole); + + this.filesHandlerLambdaRole = createDefaultLambdaRole( + this, + 'FilesManagementLambdaRole', + props.deployVPCCondition + ); + + // Build environment variables for the files handler lambda + const filesHandlerEnvironment: { [key: string]: string } = { + [POWERTOOLS_SERVICE_NAME_ENV_VAR]: 'FILES_MANAGEMENT', + [MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR]: this.multimodalDataBucket.bucketName, + [MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR]: this.multimodalDataMetadataTable.tableName + }; + + // In standalone mode, when UseCasesTable is not available, this env variable is used instead + if (props.stackSource === StackDeploymentSource.STANDALONE_USE_CASE) { + filesHandlerEnvironment[MULTIMODAL_ENABLED_ENV_VAR] = 'true'; + } + + this.filesHandlerLambda = new lambda.Function(this, 'FilesManagementLambda', { + description: 'Lambda function backing the REST API for file management operations', + code: lambda.Code.fromAsset( + '../lambda/files-management', + ApplicationAssetBundler.assetBundlerFactory() + .assetOptions(COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME) + .options(this, '../lambda/files-management') + ), + role: this.filesHandlerLambdaRole, + runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, + handler: 'index.handler', + timeout: cdk.Duration.minutes(LAMBDA_TIMEOUT_MINS), + tracing: lambda.Tracing.ACTIVE, + deadLetterQueue: props.dlq, + environment: filesHandlerEnvironment + }); + + createCustomResourceForLambdaLogRetention( + this, + 'FilesHandlerLambdaLogRetention', + this.filesHandlerLambda.functionName, + props.customResourceLambdaArn + ); + + createVpcConfigForLambda( + this.filesHandlerLambda, + props.deployVPCCondition, + props.privateSubnetIds, + props.securityGroupIds + ); + + this.createFileManagementApi(props); + + this.updateFilesMetadataRole = createDefaultLambdaRole( + this, + 'UpdateFilesMetadataLambdaRole', + props.deployVPCCondition + ); + + this.updateFilesMetadataLambda = new lambda.Function(this, 'UpdateFilesMetadataLambda', { + description: 'Lambda function that updates multimodal files metadata when files are uploaded to S3', + code: lambda.Code.fromAsset( + '../lambda/files-metadata-management', + ApplicationAssetBundler.assetBundlerFactory() + .assetOptions(COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME) + .options(this, '../lambda/files-metadata-management') + ), + role: this.updateFilesMetadataRole, + runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, + handler: 'index.handler', + timeout: cdk.Duration.minutes(LAMBDA_TIMEOUT_MINS), + tracing: lambda.Tracing.ACTIVE, + deadLetterQueue: props.dlq, + environment: { + [POWERTOOLS_SERVICE_NAME_ENV_VAR]: 'FILES_METADATA_MANAGEMENT', + [MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR]: this.multimodalDataMetadataTable.tableName, + [MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR]: this.multimodalDataBucket.bucketName + } + }); + + createCustomResourceForLambdaLogRetention( + this, + 'UpdateFilesMetadataLambdaLogRetention', + this.updateFilesMetadataLambda.functionName, + props.customResourceLambdaArn + ); + + createVpcConfigForLambda( + this.updateFilesMetadataLambda, + props.deployVPCCondition, + props.privateSubnetIds, + props.securityGroupIds + ); + + // The EventBridge rule to capture S3 Object Created events with updateFilesMetadataLambda as target so it can process uploaded files + this.s3EventRule = new events.Rule(this, 'S3ObjectCreatedRule', { + eventPattern: { + source: ['aws.s3'], + detailType: ['Object Created'], + detail: { + bucket: { + name: [this.multimodalDataBucket.bucketName] + } + } + }, + description: 'Trigger metadata update when files are uploaded to multimodal bucket' + }); + this.s3EventRule.addTarget(new targets.LambdaFunction(this.updateFilesMetadataLambda)); + + // Add explicit permission for EventBridge to invoke the update metadata Lambda + this.updateFilesMetadataLambda.addPermission('EventBridgeInvoke', { + principal: new iam.ServicePrincipal('events.amazonaws.com'), + action: 'lambda:InvokeFunction', + sourceArn: this.s3EventRule.ruleArn + }); + + this.configureLambdaPermissions(); + this.addSuppressions(); + } + + /** + * Creates all API resources and methods for the files management API + */ + private createFileManagementApi(props: MultimodalSetupProps): void { + const filesHandlerIntegration = new api.LambdaIntegration(this.filesHandlerLambda, { + passthroughBehavior: api.PassthroughBehavior.NEVER + }); + + const apiContext: DeploymentApiContext = { + scope: this, + requestValidator: props.requestValidator, + authorizer: props.deploymentPlatformAuthorizer, + integration: filesHandlerIntegration + }; + + // Create /files resource with all HTTP methods + this.filesResource = props.restApi.root.addResource('files'); + const filesUseCaseResource = this.filesResource.addResource('{useCaseId}'); // for adding files for a particular usecase Id + + // Configure CORS for all methods + DeploymentRestApiHelper.configureCors(filesUseCaseResource, ['POST', 'DELETE', 'GET', 'OPTIONS']); + + // Create models for file operations + const uploadRequestModel = DeploymentRestApiHelper.createModel( + apiContext, + props.restApi, + 'FilesUploadRequest', + 'Defines the required JSON structure for file upload requests', + FileOperationSchemas.upload.request + ); + + const uploadResponseModel = DeploymentRestApiHelper.createModel( + apiContext, + props.restApi, + 'FilesUploadResponse', + 'Response model for file upload operations', + FileOperationSchemas.upload.response + ); + + const deleteRequestModel = DeploymentRestApiHelper.createModel( + apiContext, + props.restApi, + 'FilesDeleteRequest', + 'Defines the required JSON structure for file deletion requests', + FileOperationSchemas.delete.request + ); + + const deleteResponseModel = DeploymentRestApiHelper.createModel( + apiContext, + props.restApi, + 'FilesDeleteResponse', + 'Response model for file deletion operations', + FileOperationSchemas.delete.response + ); + + const getResponseModel = DeploymentRestApiHelper.createModel( + apiContext, + props.restApi, + 'FilesGetResponse', + 'Response model for file retrieval operations', + FileOperationSchemas.get.response + ); + + // Add POST /files endpoint for file uploads + const uploadMethodOptions = DeploymentRestApiHelper.createMethodOptionsWithModels( + apiContext, + 'UploadFiles', + uploadRequestModel, + uploadResponseModel + ); + filesUseCaseResource.addMethod('POST', filesHandlerIntegration, uploadMethodOptions); + + // Add DELETE /files endpoint for file deletion + const deleteMethodOptions = DeploymentRestApiHelper.createMethodOptionsWithModels( + apiContext, + 'DeleteFiles', + deleteRequestModel, + deleteResponseModel + ); + filesUseCaseResource.addMethod('DELETE', filesHandlerIntegration, deleteMethodOptions); + + // Add GET /files endpoint (query parameters: fileName, conversationId, messageId) + const getParams = { + 'method.request.querystring.fileName': true, + 'method.request.querystring.conversationId': true, + 'method.request.querystring.messageId': true + }; + + const getMethodOptions = DeploymentRestApiHelper.createMethodOptionsWithModels( + apiContext, + 'GetFile', + undefined, // No request model for GET + getResponseModel, + getParams + ); + filesUseCaseResource.addMethod('GET', filesHandlerIntegration, getMethodOptions); + } + + /** + * Configures permissions for both lambda functions + */ + private configureLambdaPermissions(): void { + this.multimodalDataMetadataTable.grantReadWriteData(this.filesHandlerLambda); + this.multimodalDataMetadataTable.grantReadWriteData(this.updateFilesMetadataLambda); + this.multimodalDataBucket.grantReadWrite(this.filesHandlerLambda); + this.multimodalDataBucket.grantRead(this.updateFilesMetadataLambda); + + // Add permission for API Gateway to invoke the files handler Lambda + this.filesHandlerLambda.addPermission('APIGatewayInvoke', { + principal: new iam.ServicePrincipal('apigateway.amazonaws.com'), + action: 'lambda:InvokeFunction', + sourceArn: `arn:${cdk.Aws.PARTITION}:execute-api:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:${this.restApi.restApiId}/*` + }); + } + + private addSuppressions(): void { + NagSuppressions.addResourceSuppressions( + this.filesHandlerLambda.role!.node.tryFindChild('DefaultPolicy')!.node.tryFindChild('Resource')!, + [ + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the Lambda function to perform x-ray tracing and access DynamoDB tables with wildcards for use case management' + } + ] + ); + + cfn_nag.addCfnSuppressRules(this.filesHandlerLambda, [ + { + id: 'W89', + reason: 'VPC deployment is not enforced. If the solution is deployed in a VPC, this lambda function will be deployed with VPC enabled configuration' + }, + { + id: 'W92', + reason: 'The solution does not enforce reserved concurrency' + } + ]); + + cfn_nag.addCfnSuppressRules(this.filesHandlerLambdaRole, [ + { + id: 'F10', + reason: 'The inline policy avoids a rare race condition between the lambda, Role and the policy resource creation.' + } + ]); + + NagSuppressions.addResourceSuppressions( + this.updateFilesMetadataLambda.role!.node.tryFindChild('DefaultPolicy')!.node.tryFindChild('Resource')!, + [ + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the Lambda function to perform x-ray tracing and access DynamoDB tables for file metadata management' + } + ] + ); + + cfn_nag.addCfnSuppressRules(this.updateFilesMetadataLambda, [ + { + id: 'W89', + reason: 'VPC deployment is not enforced. If the solution is deployed in a VPC, this lambda function will be deployed with VPC enabled configuration' + }, + { + id: 'W92', + reason: 'The solution does not enforce reserved concurrency' + } + ]); + + cfn_nag.addCfnSuppressRules(this.updateFilesMetadataRole, [ + { + id: 'F10', + reason: 'The inline policy avoids a rare race condition between the lambda, Role and the policy resource creation.' + } + ]); + + // S3 bucket suppressions + cfn_nag.addCfnSuppressRules(this.multimodalDataBucket, [ + { + id: 'W35', + reason: 'Access logging is configured with a separate access logging bucket' + }, + { + id: 'W51', + reason: 'Bucket policy is not required as the bucket uses IAM roles for access control' + } + ]); + + cfn_nag.addCfnSuppressRules(this.multimodalDataMetadataTable, [ + { + id: 'W78', + reason: 'Enabling point-in-time recovery is recommended in the implementation guide, but is not enforced' + }, + { + id: 'W74', + reason: 'The table is configured with AWS Managed key' + } + ]); + + // API Gateway method suppressions for files endpoints + const resourcePathsToSuppress = ['files/{useCaseId}']; + const operationsToSuppress = ['GET', 'POST', 'DELETE', 'OPTIONS']; + + resourcePathsToSuppress.forEach((_path) => { + operationsToSuppress.forEach((_operation) => { + try { + NagSuppressions.addResourceSuppressionsByPath( + cdk.Stack.of(this), + `${this.restApi.root}/${_path}/${_operation}/Resource`, + [ + { + id: 'AwsSolutions-COG4', + reason: 'The API uses a custom authorizer instead of Cognito user pool authorizer for authentication' + } + ], + false + ); + } catch (error) { + // Ignore if resource doesn't exist + } + }); + }); + + cfn_nag.addCfnSuppressRules(this.s3EventRule, [ + { + id: 'W92', + reason: 'EventBridge rules do not require reserved concurrency' + } + ]); + } + + /** + * Apply condition to all resources created by this construct, including API Gateway resources + * This method should be called externally after the condition is available + */ + public applyConditionToAllResources(condition: cdk.CfnCondition): void { + // Apply the CDK aspect to all resources in this construct (Lambda, S3, DynamoDB, etc.) + cdk.Aspects.of(this).add(new ResourceConditionsAspect(condition, true, true), { + priority: cdk.AspectPriority.MUTATING + }); + + cdk.Aspects.of(this.filesResource).add(new ResourceConditionsAspect(condition, true, true), { + priority: cdk.AspectPriority.MUTATING + }); + } +} diff --git a/source/infrastructure/lib/sagemaker-chat-stack.ts b/source/infrastructure/lib/sagemaker-chat-stack.ts index f8e7765e..665d085b 100644 --- a/source/infrastructure/lib/sagemaker-chat-stack.ts +++ b/source/infrastructure/lib/sagemaker-chat-stack.ts @@ -35,7 +35,7 @@ export class SageMakerChat extends TextUseCase { constructor(scope: Construct, id: string, props: BaseStackProps) { super(scope, id, props); this.withAdditionalResourceSetup(props); - this.withAnonymousMetrics(props); + this.withMetrics(props); } protected withAdditionalResourceSetup(props: BaseStackProps): void { diff --git a/source/infrastructure/lib/storage/chat-storage-stack.ts b/source/infrastructure/lib/storage/chat-storage-stack.ts index 7260b68f..0f829d0c 100644 --- a/source/infrastructure/lib/storage/chat-storage-stack.ts +++ b/source/infrastructure/lib/storage/chat-storage-stack.ts @@ -9,7 +9,7 @@ import { NagSuppressions } from 'cdk-nag'; import { Construct, IConstruct } from 'constructs'; import { BaseNestedStack } from '../framework/base-nested-stack'; import * as cfn_guard from '../utils/cfn-guard-suppressions'; -import { DynamoDBAttributes, USE_CASE_TYPES } from '../utils/constants'; +import { DynamoDBAttributes, CHAT_ENABLED_USE_CASE_TYPES } from '../utils/constants'; import { UseCaseModelInfoStorage } from './use-case-model-info-storage'; export class DynamoDBChatStorageParameters { @@ -52,7 +52,7 @@ export class DynamoDBChatStorageParameters { this.useCaseType = new cdk.CfnParameter(stack, 'UseCaseType', { type: 'String', description: 'The UseCaseType. The value is provided as Agent or Text', - allowedValues: Object.values(USE_CASE_TYPES) + allowedValues: CHAT_ENABLED_USE_CASE_TYPES }).valueAsString; } } diff --git a/source/infrastructure/lib/storage/deployment-platform-storage-setup.ts b/source/infrastructure/lib/storage/deployment-platform-storage-setup.ts index 3ece92b3..c62274bf 100644 --- a/source/infrastructure/lib/storage/deployment-platform-storage-setup.ts +++ b/source/infrastructure/lib/storage/deployment-platform-storage-setup.ts @@ -35,21 +35,6 @@ export interface DeploymentPlatformStorageProps extends BaseStackProps { accessLoggingBucket: s3.Bucket; } -export interface LambdaDependencies { - /** - * Lambda which backs API calls interacting with the use cases - */ - deploymentApiLambda: lambda.Function; - /** - * Lambda which backs API calls for retrieving model info - */ - modelInfoApiLambda: lambda.Function; - /** - * Lambda which backs API calls for retrieving feedback - */ - feedbackApiLambda: lambda.Function; -} - /** * This Construct sets up the nested stack managing dynamoDB tables for use case management */ @@ -79,9 +64,8 @@ export class DeploymentPlatformStorageSetup extends Construct { }); } - public addLambdaDependencies(lambdas: LambdaDependencies): void { - // Create and attach the DDB policy for the Lambda functions - const ddbUCMLPolicy = new iam.Policy(this, 'DDBUCMLPolicy', { + public configureDeploymentApiLambda(deploymentApiLambda: lambda.Function): void { + const ddbPolicy = new iam.Policy(this, 'DeploymentApiDDBPolicy', { statements: [ new iam.PolicyStatement({ actions: [ @@ -102,34 +86,35 @@ export class DeploymentPlatformStorageSetup extends Construct { }) ] }); - ddbUCMLPolicy.attachToRole(lambdas.deploymentApiLambda.role!); + ddbPolicy.attachToRole(deploymentApiLambda.role!); - // Add environment variables to the deployment API Lambda - lambdas.deploymentApiLambda.addEnvironment( + deploymentApiLambda.addEnvironment( USE_CASES_TABLE_NAME_ENV_VAR, this.deploymentPlatformStorage.useCasesTable.tableName ); - - lambdas.deploymentApiLambda.addEnvironment( + deploymentApiLambda.addEnvironment( MODEL_INFO_TABLE_NAME_ENV_VAR, this.deploymentPlatformStorage.modelInfoTable.tableName ); - - lambdas.deploymentApiLambda.addEnvironment( + deploymentApiLambda.addEnvironment( USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, this.deploymentPlatformStorage.useCaseConfigTable.tableName ); - // Set up model info Lambda with DynamoDB + this.addDynamoDBNagSuppressions(ddbPolicy, 'deploymentAPI'); + } + + public configureModelInfoApiLambda(modelInfoApiLambda: lambda.Function): void { new LambdaToDynamoDB(this, 'ModelInfoLambdaToModelInfoDDB', { - existingLambdaObj: lambdas.modelInfoApiLambda, + existingLambdaObj: modelInfoApiLambda, existingTableObj: this.deploymentPlatformStorage.modelInfoTable, tablePermissions: 'Read', tableEnvironmentVariableName: MODEL_INFO_TABLE_NAME_ENV_VAR }); + } - // Add permissions for feedback API Lambda - lambdas.feedbackApiLambda.addToRolePolicy( + public configureFeedbackApiLambda(feedbackApiLambda: lambda.Function): void { + feedbackApiLambda.addToRolePolicy( new iam.PolicyStatement({ effect: iam.Effect.ALLOW, actions: ['dynamodb:GetItem', 'dynamodb:Query'], @@ -140,21 +125,95 @@ export class DeploymentPlatformStorageSetup extends Construct { }) ); - lambdas.feedbackApiLambda.addEnvironment( + feedbackApiLambda.addEnvironment( USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, this.deploymentPlatformStorage.useCaseConfigTable.tableName ); + feedbackApiLambda.addEnvironment( + USE_CASES_TABLE_NAME_ENV_VAR, + this.deploymentPlatformStorage.useCasesTable.tableName + ); + } + + public configureFilesHandlerLambda(filesMetadataLambda: lambda.Function): void { + filesMetadataLambda.addToRolePolicy( + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['dynamodb:GetItem', 'dynamodb:Query'], + resources: [ + this.deploymentPlatformStorage.useCaseConfigTable.tableArn, + this.deploymentPlatformStorage.useCasesTable.tableArn + ] + }) + ); - lambdas.feedbackApiLambda.addEnvironment( + filesMetadataLambda.addEnvironment( + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, + this.deploymentPlatformStorage.useCaseConfigTable.tableName + ); + filesMetadataLambda.addEnvironment( + USE_CASES_TABLE_NAME_ENV_VAR, + this.deploymentPlatformStorage.useCasesTable.tableName + ); + } + + public configureUseCaseManagementApiLambda( + managementApiLambda: lambda.Function, + type: string, + includeModelInfoTable: boolean = false + ): void { + const resources = [ + this.deploymentPlatformStorage.useCasesTable.tableArn, + this.deploymentPlatformStorage.useCaseConfigTable.tableArn + ]; + + if (includeModelInfoTable) { + resources.push(this.deploymentPlatformStorage.modelInfoTable.tableArn); + } + + const ddbPolicy = new iam.Policy(this, `${type}ManagementDDBPolicy`, { + statements: [ + new iam.PolicyStatement({ + actions: [ + 'dynamodb:Batch*', + 'dynamodb:ConditionCheckItem', + 'dynamodb:DeleteItem', + 'dynamodb:Get*', + 'dynamodb:PutItem', + 'dynamodb:Query', + 'dynamodb:Scan', + 'dynamodb:UpdateItem' + ], + resources: resources + }) + ] + }); + ddbPolicy.attachToRole(managementApiLambda.role!); + + managementApiLambda.addEnvironment( USE_CASES_TABLE_NAME_ENV_VAR, this.deploymentPlatformStorage.useCasesTable.tableName ); + managementApiLambda.addEnvironment( + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, + this.deploymentPlatformStorage.useCaseConfigTable.tableName + ); + + if (includeModelInfoTable) { + managementApiLambda.addEnvironment( + MODEL_INFO_TABLE_NAME_ENV_VAR, + this.deploymentPlatformStorage.modelInfoTable.tableName + ); + } + + this.addDynamoDBNagSuppressions(ddbPolicy, `${type.toLowerCase()}Management`); + } - // Add NAG suppressions - NagSuppressions.addResourceSuppressions(ddbUCMLPolicy, [ + private addDynamoDBNagSuppressions(policy: iam.Policy, lambdaType: string): void { + NagSuppressions.addResourceSuppressions(policy, [ { id: 'AwsSolutions-IAM5', - reason: 'The IAM role allows the Lambda function to create, delete table. Table name is not known', + reason: `The IAM role allows the ${lambdaType} Lambda function to perform DynamoDB operations. Table name is not known here.`, appliesTo: ['Action::dynamodb:Batch*', 'Action::dynamodb:Get*'] } ]); diff --git a/source/infrastructure/lib/use-case-management/cfn-deploy-role-factory.ts b/source/infrastructure/lib/use-case-management/cfn-deploy-role-factory.ts new file mode 100644 index 00000000..1dbfced6 --- /dev/null +++ b/source/infrastructure/lib/use-case-management/cfn-deploy-role-factory.ts @@ -0,0 +1,698 @@ +#!/usr/bin/env node +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as iam from 'aws-cdk-lib/aws-iam'; +import { NagSuppressions } from 'cdk-nag'; +import { Construct } from 'constructs'; +import * as cfn_nag from '../utils/cfn-guard-suppressions'; +import { generateCfnTemplateUrl } from '../utils/common-utils'; + +/** + * Configuration interface for CFN deploy role creation + */ +export interface CfnDeployRoleConfig { + /** + * Include VPC creation and management permissions + * Required for text use cases that support VPC deployment + */ + includeVpcPermissions?: boolean; + + /** + * Include Amazon Kendra permissions for knowledge base operations + * Required for text use cases with Kendra knowledge bases + */ + includeKendraPermissions?: boolean; + + /** + * Include Amazon ECR permissions for container registry operations + * Required for agent use cases with pull-through cache + */ + includeEcrPermissions?: boolean; + + /** + * Additional services that the role should be able to pass roles to + * Default includes lambda.amazonaws.com, apigateway.amazonaws.com, cloudformation.amazonaws.com + */ + additionalPassRoleServices?: string[]; + + /** + * Custom role name suffix for identification + */ + roleName?: string; +} + +/** + * Factory function to create CFN deploy roles with modular permissions + * This replaces the monolithic buildCfnDeployRole with a configurable approach + */ +export const createCfnDeployRole = ( + scope: Construct, + id: string, + lambdaRole: iam.Role, + config: CfnDeployRoleConfig = {} +): iam.Role => { + const { + includeVpcPermissions = true, + includeKendraPermissions = true, + includeEcrPermissions = false, + additionalPassRoleServices = [], + roleName = 'CfnDeployRole' + } = config; + + const awsTagKeysCondition = { + 'ForAllValues:StringEquals': { + 'aws:TagKeys': ['createdVia', 'userId'] + } + }; + + const awsCalledViaCondition = { + 'ForAnyValue:StringEquals': { + 'aws:CalledVia': ['cloudformation.amazonaws.com'] + } + }; + + // Create the base role with core permissions + const cfnDeployRole = new iam.Role(scope, id, { + assumedBy: new iam.ServicePrincipal('cloudformation.amazonaws.com'), + inlinePolicies: { + CfnDeployPolicy: new iam.PolicyDocument({ + statements: [ + new iam.PolicyStatement({ + actions: [ + 'dynamodb:CreateTable', + 'dynamodb:DeleteTable', + 'dynamodb:DescribeTable', + 'dynamodb:DescribeTimeToLive', + 'dynamodb:ListTagsOfResource', + 'dynamodb:UpdateTimeToLive', + 'dynamodb:TagResource' + ], + resources: [`arn:${cdk.Aws.PARTITION}:dynamodb:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:table/*`] + }), + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['ssm:GetParameter'], + resources: [`arn:${cdk.Aws.PARTITION}:ssm:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:parameter*`] + }) + ] + }) + } + }); + + // Create and attach core CloudFormation deployment policy + const corePolicy = createCoreCfnDeployPolicy( + scope, + `${id}CorePolicy`, + awsTagKeysCondition, + awsCalledViaCondition, + additionalPassRoleServices + ); + // Only attach to CFN deploy role and lambda role (matching original behavior) + corePolicy.attachToRole(lambdaRole); + corePolicy.attachToRole(cfnDeployRole); + + // Conditionally add VPC permissions - attach to both roles (matching original) + if (includeVpcPermissions) { + const vpcPolicy = createVpcCreationPolicy(scope, `${id}VpcPolicy`, awsTagKeysCondition); + vpcPolicy.attachToRole(lambdaRole); + vpcPolicy.attachToRole(cfnDeployRole); + } + + // Conditionally add Kendra permissions - only attach to CFN deploy role (not lambda) + if (includeKendraPermissions) { + const kendraPolicy = createKendraPolicy(scope, `${id}KendraPolicy`, awsCalledViaCondition); + // Only attach to CFN deploy role - lambda doesn't need Kendra permissions + kendraPolicy.attachToRole(cfnDeployRole); + } + + // Conditionally add ECR permissions - only attach to CFN deploy role (not lambda) + if (includeEcrPermissions) { + const ecrPolicy = createEcrPolicy(scope, `${id}EcrPolicy`, awsCalledViaCondition); + // Only attach to CFN deploy role - lambda doesn't need ECR permissions + ecrPolicy.attachToRole(cfnDeployRole); + } + + // Add NAG suppressions + addCfnDeployRoleNagSuppressions(cfnDeployRole, corePolicy); + addModularPolicyNagSuppressions(scope, id, config); + + return cfnDeployRole; +}; + +/** + * Create core CloudFormation deployment policy with essential permissions + */ +const createCoreCfnDeployPolicy = ( + scope: Construct, + id: string, + awsTagKeysCondition: any, + awsCalledViaCondition: any, + additionalPassRoleServices: string[] +): iam.Policy => { + // Base services that all use cases need + const basePassRoleServices = ['lambda.amazonaws.com', 'apigateway.amazonaws.com', 'cloudformation.amazonaws.com']; + + // Combine base services with additional ones + const allPassRoleServices = [...basePassRoleServices, ...additionalPassRoleServices]; + + return new iam.Policy(scope, id, { + statements: [ + // CloudFormation stack operations + new iam.PolicyStatement({ + actions: ['cloudformation:CreateStack', 'cloudformation:UpdateStack'], + effect: iam.Effect.ALLOW, + resources: [`arn:${cdk.Aws.PARTITION}:cloudformation:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:stack/*`], + conditions: { + 'ForAllValues:StringEquals': { + 'aws:TagKeys': ['createdVia', 'userId'] + }, + 'StringLike': { + 'cloudformation:TemplateUrl': generateCfnTemplateUrl(scope) + } + } + }), + new iam.PolicyStatement({ + actions: ['cloudformation:DeleteStack', 'cloudformation:DescribeStack*', 'cloudformation:ListStacks'], + effect: iam.Effect.ALLOW, + resources: [`arn:${cdk.Aws.PARTITION}:cloudformation:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:stack/*`] + }), + + // IAM role management + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'iam:CreateRole', + 'iam:DeleteRole*', + 'iam:DetachRolePolicy', + 'iam:GetRole', + 'iam:ListRoleTags', + 'iam:*tRolePolicy', // Get|Put RolePolicy + 'iam:TagRole', + 'iam:UpdateAssumeRolePolicy' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:iam::${cdk.Aws.ACCOUNT_ID}:role/*`, + `arn:${cdk.Aws.PARTITION}:iam::${cdk.Aws.ACCOUNT_ID}:policy/*` + ], + conditions: { + 'ForAllValues:StringEquals': { + 'aws:TagKeys': ['createdVia', 'userId', 'Name'] + } + } + }), + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['iam:PassRole'], + resources: [`arn:${cdk.Aws.PARTITION}:iam::${cdk.Aws.ACCOUNT_ID}:role/*`], + conditions: { + 'ForAllValues:StringEquals': { + 'aws:TagKeys': ['createdVia', 'userId', 'Name'] + }, + 'StringEquals': { + 'iam:PassedToService': allPassRoleServices + } + } + }), + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['iam:AttachRolePolicy'], + resources: [`arn:${cdk.Aws.PARTITION}:iam::${cdk.Aws.ACCOUNT_ID}:role/*`], + conditions: { + ...awsCalledViaCondition, + ...awsTagKeysCondition, + StringEquals: { + 'iam:PolicyARN': [ + `arn:${cdk.Aws.PARTITION}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole` + ] + } + } + }), + + // Lambda function management + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'lambda:AddPermission', + 'lambda:CreateFunction', + 'lambda:Delete*', + 'lambda:GetFunction', + 'lambda:InvokeFunction', + 'lambda:ListTags', + 'lambda:RemovePermission', + 'lambda:TagResource', + 'lambda:UpdateEventSourceMapping', + 'lambda:UpdateFunction*', + 'lambda:*Alias*', + 'lambda:*Version*', + 'lambda:*ProvisionedConcurrency*' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:lambda:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:function:*`, + `arn:${cdk.Aws.PARTITION}:lambda:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:layer:*`, + `arn:${cdk.Aws.PARTITION}:lambda:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:event-source-mapping:*` + ], + conditions: { + ...awsTagKeysCondition + } + }), + + // S3 bucket management + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 's3:CreateBucket', + 's3:DeleteBucketPolicy', + 's3:GetBucketAcl', + 's3:GetBucketPolicy*', + 's3:GetBucketVersioning', + 's3:*EncryptionConfiguration', // Get|Put EncryptionConfiguration + 's3:GetObject', + 's3:PutBucket*' + ], + resources: [`arn:${cdk.Aws.PARTITION}:s3:::*`] + }), + + // EventBridge rules + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'events:DeleteRule', + 'events:DescribeRule', + 'events:PutRule', + 'events:*Targets' // Put|Remove Targets + ], + resources: [`arn:${cdk.Aws.PARTITION}:events:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:rule/*`] + }), + + // ServiceCatalog (required for application registry) + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['servicecatalog:*'], + resources: [ + `arn:${cdk.Aws.PARTITION}:servicecatalog:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:/attribute-groups/*`, + `arn:${cdk.Aws.PARTITION}:servicecatalog:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:/applications/*` + ], + conditions: { + ...awsCalledViaCondition + } + }), + + // API Gateway and WAF + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'apigateway:CreateRestApi', + 'apigateway:CreateStage', + 'apigateway:DELETE', + 'apigateway:Delete*', + 'apigateway:GET', + 'apigateway:PATCH', + 'apigateway:POST', + 'apigateway:PUT', + 'apigateway:SetWebACL', + 'apigateway:TagResource', + 'wafv2:*ForResource', + 'wafv2:*WebACL', + 'wafv2:TagResource' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:apigateway:${cdk.Aws.REGION}::/*`, + `arn:${cdk.Aws.PARTITION}:wafv2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:regional/*/*/*` + ], + conditions: { + ...awsCalledViaCondition + } + }), + + // Cognito + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'cognito-idp:AdminAddUserToGroup', + 'cognito-idp:AdminCreateUser', + 'cognito-idp:AdminDeleteUser', + 'cognito-idp:AdminGetUser', + 'cognito-idp:AdminListGroupsForUser', + 'cognito-idp:AdminRemoveUserFromGroup', + 'cognito-idp:CreateGroup', + 'cognito-idp:CreateUserPool*', + 'cognito-idp:Delete*', + 'cognito-idp:GetGroup', + 'cognito-idp:SetUserPoolMfaConfig', + 'cognito-idp:*UserPoolClient' // Describe|Update UserPoolClient + ], + resources: [`arn:${cdk.Aws.PARTITION}:cognito-idp:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:userpool/*`], + conditions: { + ...awsCalledViaCondition + } + }), + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['cognito-idp:DescribeUserPool'], + resources: [`arn:${cdk.Aws.PARTITION}:cognito-idp:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:userpool/*`] + }), + + // CloudFront + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'cloudfront:Create*', + 'cloudfront:Delete*', + 'cloudfront:DescribeFunction', + 'cloudfront:Get*', + 'cloudfront:ListTagsForResource', + 'cloudfront:PublishFunction', + 'cloudfront:TagResource', + 'cloudfront:Update*' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:cloudfront::${cdk.Aws.ACCOUNT_ID}:function/*`, + `arn:${cdk.Aws.PARTITION}:cloudfront::${cdk.Aws.ACCOUNT_ID}:origin-access-control/*`, + `arn:${cdk.Aws.PARTITION}:cloudfront::${cdk.Aws.ACCOUNT_ID}:distribution/*`, + `arn:${cdk.Aws.PARTITION}:cloudfront::${cdk.Aws.ACCOUNT_ID}:response-headers-policy/*` + ] + }), + + // KMS + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'kms:CreateGrant', + 'kms:Decrypt', + 'kms:DescribeKey', + 'kms:EnableKeyRotation', + 'kms:Encrypt', + 'kms:GenerateDataKey', + 'kms:PutKeyPolicy', + 'kms:TagResource' + ], + resources: [`arn:${cdk.Aws.PARTITION}:kms:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:key/*`], + conditions: { + ...awsCalledViaCondition + } + }), + new iam.PolicyStatement({ + actions: [ + 'kms:CreateKey', + 'lambda:CreateEventSourceMapping', + 'lambda:DeleteEventSourceMapping', + 'lambda:GetEventSourceMapping' + ], + effect: iam.Effect.ALLOW, + resources: ['*'], // these actions requires the resource to be '*'. There are additional conditions on the policy to help put guard rails + conditions: { + ...awsCalledViaCondition + } + }), + + // CloudWatch + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['cloudwatch:*Dashboard*', 'cloudwatch:GetMetricData', 'cloudwatch:TagResource'], + resources: [`arn:${cdk.Aws.PARTITION}:cloudwatch::${cdk.Aws.ACCOUNT_ID}:dashboard/*`], + conditions: { + ...awsCalledViaCondition + } + }), + + // SQS + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'sqs:CreateQueue', + 'sqs:GetQueueAttributes', + 'sqs:TagQueue', + 'sqs:DeleteQueue', + 'sqs:SetQueueAttributes' + ], + resources: [`arn:${cdk.Aws.PARTITION}:sqs:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:*`], + conditions: { + ...awsCalledViaCondition + } + }) + ] + }); +}; + +/** + * Create VPC creation and management policy + */ +const createVpcCreationPolicy = (scope: Construct, id: string, awsTagKeysCondition: any): iam.Policy => { + return new iam.Policy(scope, id, { + statements: [ + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'ec2:AllocateAddress', + 'ec2:AssociateRouteTable', + 'ec2:AttachInternetGateway', + 'ec2:AuthorizeSecurityGroup*', + 'ec2:CreateFlowLogs', + 'ec2:CreateInternetGateway', + 'ec2:CreateNatGateway', + 'ec2:CreateNetworkAcl*', + 'ec2:CreateRoute*', + 'ec2:CreateSecurityGroup', + 'ec2:CreateSubnet', + 'ec2:CreateTags', + 'ec2:createVPC*', + 'ec2:Delete*', + 'ec2:Detach*', + 'ec2:Disassociate*', + 'ec2:Modify*', + 'ec2:ReleaseAddress', + 'ec2:ReplaceNetworkAcl*', + 'ec2:RevokeSecurityGroup*', + 'ec2:UpdateSecurityGroupRuleDescriptions*' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:route-table/*`, + `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:security-group/*`, + `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:vpc*/*`, + `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:subnet/*`, + `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:internet-gateway/*`, + `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:elastic-ip/*`, + `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:natgateway/*`, + `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:network-interface/*`, + `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:network-acl/*`, + `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:ipam-pool/*` + ] + }), + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['ec2:Describe*'], + resources: ['*'] + }), + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'logs:CreateLogGroup', + 'logs:DescribeLogGroups', + 'logs:PutRetentionPolicy', + 'logs:TagResource', + 'logs:ListTagsForResource' + ], + resources: [`arn:${cdk.Aws.PARTITION}:logs:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:log-group:*`], + conditions: { + ...awsTagKeysCondition + } + }) + ] + }); +}; + +/** + * Create Kendra knowledge base policy + */ +const createKendraPolicy = (scope: Construct, id: string, awsCalledViaCondition: any): iam.Policy => { + return new iam.Policy(scope, id, { + statements: [ + new iam.PolicyStatement({ + actions: ['kendra:CreateIndex'], + effect: iam.Effect.ALLOW, + resources: ['*'], // CreateIndex requires resource to be '*' + conditions: { + ...awsCalledViaCondition + } + }), + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'kendra:DescribeIndex', + 'kendra:ListTagsForResource', + 'kendra:TagResource', + 'kendra:UpdateIndex' + ], + resources: [`arn:${cdk.Aws.PARTITION}:kendra:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:index/*`], + conditions: { + ...awsCalledViaCondition + } + }) + ] + }); +}; + +/** + * Create ECR policy for Agent Builder use cases + */ +const createEcrPolicy = (scope: Construct, id: string, awsCalledViaCondition: any): iam.Policy => { + return new iam.Policy(scope, id, { + statements: [ + // ECR Pull-Through Cache management + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'ecr:CreatePullThroughCacheRule', + 'ecr:DeletePullThroughCacheRule', + 'ecr:DescribePullThroughCacheRules' + ], + resources: ['*'], // Pull-through cache rules are account-level resources + conditions: { + ...awsCalledViaCondition + } + }), + // ECR Repository management + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'ecr:CreateRepository', + 'ecr:DescribeRepositories', + 'ecr:GetRepositoryPolicy', + 'ecr:TagResource', + 'ecr:ListTagsForResource' + ], + resources: [`arn:${cdk.Aws.PARTITION}:ecr:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:repository/*`], + conditions: { + ...awsCalledViaCondition + } + }), + // ECR Authorization (required for image operations) + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['ecr:GetAuthorizationToken'], + resources: ['*'] // GetAuthorizationToken requires resource to be '*' + }) + ] + }); +}; + +/** + * Add NAG suppressions for CFN deploy role and policies + */ +const addCfnDeployRoleNagSuppressions = (cfnDeployRole: iam.Role, corePolicy: iam.Policy): void => { + NagSuppressions.addResourceSuppressions(corePolicy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'This the minimum policy required for CloudFormation service to deploy the stack. Where possible there is a condition using aws:CalledVia for supported services' + } + ]); + + NagSuppressions.addResourceSuppressions(cfnDeployRole, [ + { + id: 'AwsSolutions-IAM5', + reason: 'Resource name is unknown and hence the wild card', + appliesTo: [ + 'Resource::arn::dynamodb:::table/*', + 'Resource::arn::ssm:::parameter*' + ] + } + ]); + + cfn_nag.addCfnSuppressRules(corePolicy, [ + { + id: 'F4', + reason: 'Due to policy byte size limitation, had to convert servicecatalog actions to use wildcard' + } + ]); + + cfn_nag.addCfnSuppressRules(cfnDeployRole, [ + { + id: 'F10', + reason: 'The inline policy is to avoid concurrency issues where a policy is created but not yet attached to the role.' + } + ]); +}; + +/** + * Add NAG suppressions for modular policies + */ +const addModularPolicyNagSuppressions = (scope: Construct, id: string, config: CfnDeployRoleConfig): void => { + // Add NAG suppressions for VPC policy if it exists + if (config.includeVpcPermissions) { + const vpcPolicyId = `${id}VpcPolicy`; + if (scope.node.tryFindChild(vpcPolicyId)) { + const vpcPolicy = scope.node.findChild(vpcPolicyId) as iam.Policy; + NagSuppressions.addResourceSuppressions(vpcPolicy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'Even though the resource is "*", the actions have been scoped down only to the ones required by the solution', + appliesTo: [ + 'Resource::*', + 'Resource::arn::ec2:::vpc/*', + 'Resource::arn::ec2:::vpc*/*', + 'Resource::arn::ec2:::security-group/*', + 'Resource::arn::ec2:::route-table/*', + 'Resource::arn::ec2:::elastic-ip/*', + 'Resource::arn::ec2:::internet-gateway/*', + 'Resource::arn::ec2:::natgateway/*', + 'Resource::arn::ec2:::network-interface/*', + 'Resource::arn::ec2:::subnet/*', + 'Resource::arn::ec2:::network-acl/*', + 'Resource::arn::ec2:::ipam-pool/*', + 'Resource::arn::logs:::log-group:*', + 'Action::ec2:AuthorizeSecurityGroup*', + 'Action::ec2:CreateNetworkAcl*', + 'Action::ec2:CreateRoute*', + 'Action::ec2:createVPC*', + 'Action::ec2:Delete*', + 'Action::ec2:Describe*', + 'Action::ec2:Detach*', + 'Action::ec2:Disassociate*', + 'Action::ec2:Modify*', + 'Action::ec2:ReplaceNetworkAcl*', + 'Action::ec2:RevokeSecurityGroup*', + 'Action::ec2:UpdateSecurityGroupRuleDescriptions*' + ] + } + ]); + } + } + + // Add NAG suppressions for Kendra policy if it exists + if (config.includeKendraPermissions) { + const kendraPolicyId = `${id}KendraPolicy`; + if (scope.node.tryFindChild(kendraPolicyId)) { + const kendraPolicy = scope.node.findChild(kendraPolicyId) as iam.Policy; + NagSuppressions.addResourceSuppressions(kendraPolicy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'Kendra CreateIndex action requires resource to be "*", other actions are scoped to specific index resources', + appliesTo: [ + 'Resource::*', + 'Resource::arn::kendra:::index/*' + ] + } + ]); + } + } + + // Add NAG suppressions for ECR policy if it exists + if (config.includeEcrPermissions) { + const ecrPolicyId = `${id}EcrPolicy`; + if (scope.node.tryFindChild(ecrPolicyId)) { + const ecrPolicy = scope.node.findChild(ecrPolicyId) as iam.Policy; + NagSuppressions.addResourceSuppressions(ecrPolicy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'ECR pull-through cache rules and GetAuthorizationToken require resource to be "*", repository actions are scoped to specific repositories', + appliesTo: [ + 'Resource::*', + 'Resource::arn::ecr:::repository/*' + ] + } + ]); + } + } +}; diff --git a/source/infrastructure/lib/use-case-management/management-stack.ts b/source/infrastructure/lib/use-case-management/management-stack.ts index 0e0a5adc..af6c0c16 100644 --- a/source/infrastructure/lib/use-case-management/management-stack.ts +++ b/source/infrastructure/lib/use-case-management/management-stack.ts @@ -6,7 +6,8 @@ import * as cdk from 'aws-cdk-lib'; import * as iam from 'aws-cdk-lib/aws-iam'; import * as lambda from 'aws-cdk-lib/aws-lambda'; import * as sqs from 'aws-cdk-lib/aws-sqs'; - +import * as s3 from 'aws-cdk-lib/aws-s3'; +import { ConstructsFactories } from '@aws-solutions-constructs/aws-constructs-factories'; import { NagSuppressions } from 'cdk-nag'; import { Construct, IConstruct } from 'constructs'; import { BaseNestedStack } from '../framework/base-nested-stack'; @@ -15,7 +16,6 @@ import * as cfn_nag from '../utils/cfn-guard-suppressions'; import { createCustomResourceForLambdaLogRetention, createDefaultLambdaRole, - generateCfnTemplateUrl, generateTemplateMapping, createVpcConfigForLambda } from '../utils/common-utils'; @@ -30,16 +30,20 @@ import { OPTIONAL_EMAIL_REGEX_PATTERN, POWERTOOLS_METRICS_NAMESPACE_ENV_VAR, TEMPLATE_FILE_EXTN_ENV_VAR, - USE_CASE_API_KEY_SUFFIX_ENV_VAR, USE_CASE_MANAGEMENT_NAMESPACE, WEBCONFIG_SSM_KEY_ENV_VAR, COGNITO_POLICY_TABLE_ENV_VAR, CLIENT_ID_ENV_VAR, - USER_POOL_ID_ENV_VAR + USER_POOL_ID_ENV_VAR, + GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR, + DEPLOYMENT_PLATFORM_STACK_NAME_ENV_VAR, + MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR, + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR } from '../utils/constants'; import { ExistingVPCParameters } from '../vpc/exisiting-vpc-params'; import { CognitoSetup } from '../auth/cognito-setup'; import { SearchAndReplaceRefactorAspect } from '../utils/search-and-replace-refactor-aspect'; +import { createCfnDeployRole } from './cfn-deploy-role-factory'; export class UseCaseManagementParameters { /** @@ -194,6 +198,21 @@ export class UseCaseManagement extends BaseNestedStack { */ public readonly modelInfoApiLambda: lambda.Function; + /** + * The lambda backing MCP management API calls + */ + public readonly mcpManagementApiLambda: lambda.Function; + + /** + * The lambda backing agent management API calls + */ + public readonly agentManagementApiLambda: lambda.Function; + + /** + * The lambda backing workflow management API calls + */ + public readonly workflowManagementApiLambda: lambda.Function; + /** * condition to check if vpc configuration should be applied to lambda functions */ @@ -219,6 +238,10 @@ export class UseCaseManagement extends BaseNestedStack { */ public readonly objectPrefix: string; + /** + * s3 Bucket to deployment dashboard artifacts, such exports artifacts, and user uploaded schema + */ + public readonly deploymentPlatformBucket: s3.Bucket; /** * The CognitoSetup construct to use for user pool and client setup @@ -258,7 +281,13 @@ export class UseCaseManagement extends BaseNestedStack { expression: cdk.Fn.conditionEquals( cdk.Fn.select( 0, - cdk.Fn.split('.', cdk.Fn.select(1, cdk.Fn.split('@', cdk.Fn.join("", [this.stackParameters.defaultUserEmail, "@example.com"])))) + cdk.Fn.split( + '.', + cdk.Fn.select( + 1, + cdk.Fn.split('@', cdk.Fn.join('', [this.stackParameters.defaultUserEmail, '@example.com'])) + ) + ) ), INTERNAL_EMAIL_DOMAIN ) @@ -298,6 +327,7 @@ export class UseCaseManagement extends BaseNestedStack { }, deployWebApp: this.stackParameters.deployWebApp.valueAsString }); + this.cognitoSetup.createAgentCoreResourceServer(); //this construct has undergone a refactor from its original definition and many //of the resources have new logical IDs. To prevent customers that upgrade existing @@ -320,7 +350,32 @@ export class UseCaseManagement extends BaseNestedStack { ); const useCaseMgmtRole = createDefaultLambdaRole(this, 'UCMLRole', this.deployVPCCondition); - const cfnDeployRole = buildCfnDeployRole(this, useCaseMgmtRole); + const cfnDeployRole = createCfnDeployRole(this, 'CfnDeployRole', useCaseMgmtRole, { + includeVpcPermissions: true, // Text use cases support VPC + includeKendraPermissions: true, // Text use cases use Kendra + includeEcrPermissions: false, // Text use cases don't need ECR + additionalPassRoleServices: ['kendra.amazonaws.com', 'vpc-flow-logs.amazonaws.com'], // Original services + roleName: 'CfnDeployRole' + }); + + // Create Agent Builder CFN deploy role for agent management lambda + const agentManagementAPILambdaRole = createDefaultLambdaRole( + this, + 'AgentManagementLambdaRole', + this.deployVPCCondition + ); + const agentBuilderCfnDeployRole = createCfnDeployRole( + this, + 'AgentBuilderCfnDeployRole', + agentManagementAPILambdaRole, + { + includeVpcPermissions: false, // Text use cases support VPC + includeKendraPermissions: false, // Text use cases use Kendra + includeEcrPermissions: true, // Text use cases don't need ECR + additionalPassRoleServices: ['bedrock-agentcore.amazonaws.com'], // Original services + roleName: 'AgentBuilderCfnDeployRole' + } + ); this.useCaseManagementApiLambda = new lambda.Function(this, 'UseCaseMgmt', { description: 'Lambda function backing the REST API for use case management', @@ -332,7 +387,7 @@ export class UseCaseManagement extends BaseNestedStack { ), role: useCaseMgmtRole, runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, - handler: 'index.handler', + handler: 'use-case-handler.handler', timeout: cdk.Duration.minutes(LAMBDA_TIMEOUT_MINS), tracing: lambda.Tracing.ACTIVE, environment: { @@ -344,7 +399,6 @@ export class UseCaseManagement extends BaseNestedStack { [POWERTOOLS_METRICS_NAMESPACE_ENV_VAR]: USE_CASE_MANAGEMENT_NAMESPACE, [WEBCONFIG_SSM_KEY_ENV_VAR]: this.stackParameters.webConfigSSMKey, [TEMPLATE_FILE_EXTN_ENV_VAR]: process.env.TEMPLATE_OUTPUT_BUCKET ? '.template' : '.template.json', - [USE_CASE_API_KEY_SUFFIX_ENV_VAR]: 'api-key', [IS_INTERNAL_USER_ENV_VAR]: cdk.Fn.conditionIf( isInternalUserCondition.logicalId, 'true', @@ -355,15 +409,7 @@ export class UseCaseManagement extends BaseNestedStack { }); // Env vars which need to be passed to use cases on deployment - this.useCaseManagementApiLambda.addEnvironment( - COGNITO_POLICY_TABLE_ENV_VAR, - this.cognitoSetup.getCognitoGroupPolicyTable(this).tableName - ); - this.useCaseManagementApiLambda.addEnvironment(USER_POOL_ID_ENV_VAR, this.cognitoSetup.getUserPool(this).userPoolId); - this.useCaseManagementApiLambda.addEnvironment( - CLIENT_ID_ENV_VAR, - this.cognitoSetup.getUserPoolClient(this).userPoolClientId - ); + this.addCommonEnvironmentVariables(this.useCaseManagementApiLambda); createCustomResourceForLambdaLogRetention( this, @@ -446,6 +492,212 @@ export class UseCaseManagement extends BaseNestedStack { cdk.Fn.join(',', this.stackParameters.existingSecurityGroupIds.valueAsList) ); + const accessLoggingS3Bucket = s3.Bucket.fromBucketArn( + this, + 'DeploymentPlatformLoggingBucket', + this.accessLoggingBucket + ); + + const factories = new ConstructsFactories(this, 'Factories'); + + this.deploymentPlatformBucket = factories.s3BucketFactory('DeploymentPlatformBucket', { + bucketProps: { + versioned: false, // bucket versioning is recommended in the IG, but is not enforced + blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL, + removalPolicy: cdk.RemovalPolicy.RETAIN, + encryption: s3.BucketEncryption.S3_MANAGED, + enforceSSL: true, + lifecycleRules: [], + serverAccessLogsBucket: accessLoggingS3Bucket, + serverAccessLogsPrefix: 'deployment-platform-bucket-logs/', + cors: [ + { + allowedMethods: [s3.HttpMethods.POST], + allowedOrigins: ['*'], + allowedHeaders: ['*'], + maxAge: 3600 + } + ] + } + }).s3Bucket; + + this.deploymentPlatformBucket.policy?.applyRemovalPolicy(cdk.RemovalPolicy.RETAIN); + + // A common warning was logged during synth stage when the referenced bucket is not part of the same stack + // This annotation is to suppress this warning given that log bucket is added to the feedbackBucket + cdk.Annotations.of(this.deploymentPlatformBucket).acknowledgeWarning( + '@aws-cdk/aws-s3:accessLogsPolicyNotAdded' + ); + + const mcpManagementAPILambdaRole = createDefaultLambdaRole( + this, + 'MCPManagementLambdaRole', + this.deployVPCCondition + ); + + const mcpCfnDeployRole = createCfnDeployRole(this, 'MCPCfnDeployRole', mcpManagementAPILambdaRole, { + includeVpcPermissions: false, // AgentCore doesn't support VPC + includeKendraPermissions: false, // AgentCore doesn't use Kendra + includeEcrPermissions: false, // Needed for pull-through cache + additionalPassRoleServices: ['bedrock-agentcore.amazonaws.com'], // Allow passing roles to AgentCore + roleName: 'MCPCfnDeployRole' + }); + + this.mcpManagementApiLambda = new lambda.Function(this, 'MCPManagementLambda', { + description: 'Lambda function backing the REST API for MCP server management', + code: lambda.Code.fromAsset( + '../lambda/use-case-management', + ApplicationAssetBundler.assetBundlerFactory() + .assetOptions(COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME) + .options(this, '../lambda/use-case-management') + ), + role: mcpManagementAPILambdaRole, + runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, + handler: 'mcp-handler.mcpHandler', + timeout: cdk.Duration.minutes(LAMBDA_TIMEOUT_MINS), + tracing: lambda.Tracing.ACTIVE, + deadLetterQueue: this.dlq, + environment: { + [ARTIFACT_BUCKET_ENV_VAR]: this.assetBucket, + ...(this.objectPrefix && { + [ARTIFACT_KEY_PREFIX_ENV_VAR]: this.objectPrefix + }), + [CFN_DEPLOY_ROLE_ARN_ENV_VAR]: mcpCfnDeployRole.roleArn, + [POWERTOOLS_METRICS_NAMESPACE_ENV_VAR]: USE_CASE_MANAGEMENT_NAMESPACE, + [GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR]: this.deploymentPlatformBucket.bucketName, + [TEMPLATE_FILE_EXTN_ENV_VAR]: process.env.TEMPLATE_OUTPUT_BUCKET ? '.template' : '.template.json', + [USER_POOL_ID_ENV_VAR]: this.cognitoSetup.getUserPool(this).userPoolId, + [IS_INTERNAL_USER_ENV_VAR]: cdk.Fn.conditionIf( + isInternalUserCondition.logicalId, + 'true', + 'false' + ).toString() + } + }); + + lambdaDDBPolicy.attachToRole(mcpManagementAPILambdaRole); + this.addCommonEnvironmentVariables(this.mcpManagementApiLambda); + + createCustomResourceForLambdaLogRetention( + this, + 'MCPManagementLambdaLogRetention', + this.mcpManagementApiLambda.functionName, + this.customResourceLambdaArn + ); + + this.addMCPLambdaPermissions(mcpManagementAPILambdaRole, this.deploymentPlatformBucket); + + this.agentManagementApiLambda = new lambda.Function(this, 'AgentManagementLambda', { + description: 'Lambda function backing the REST API for agent management', + code: lambda.Code.fromAsset( + '../lambda/use-case-management', + ApplicationAssetBundler.assetBundlerFactory() + .assetOptions(COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME) + .options(this, '../lambda/use-case-management') + ), + role: agentManagementAPILambdaRole, + runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, + handler: 'agents-handler.agentsHandler', + timeout: cdk.Duration.minutes(LAMBDA_TIMEOUT_MINS), + tracing: lambda.Tracing.ACTIVE, + deadLetterQueue: this.dlq, + environment: { + [ARTIFACT_BUCKET_ENV_VAR]: this.assetBucket, + ...(this.objectPrefix && { + [ARTIFACT_KEY_PREFIX_ENV_VAR]: this.objectPrefix + }), + [CFN_DEPLOY_ROLE_ARN_ENV_VAR]: agentBuilderCfnDeployRole.roleArn, + [POWERTOOLS_METRICS_NAMESPACE_ENV_VAR]: USE_CASE_MANAGEMENT_NAMESPACE, + [WEBCONFIG_SSM_KEY_ENV_VAR]: this.stackParameters.webConfigSSMKey, + [TEMPLATE_FILE_EXTN_ENV_VAR]: process.env.TEMPLATE_OUTPUT_BUCKET ? '.template' : '.template.json', + [IS_INTERNAL_USER_ENV_VAR]: cdk.Fn.conditionIf( + isInternalUserCondition.logicalId, + 'true', + 'false' + ).toString(), + [GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR]: this.deploymentPlatformBucket.bucketName, + [DEPLOYMENT_PLATFORM_STACK_NAME_ENV_VAR]: cdk.Stack.of(this).stackName + } + }); + + createCustomResourceForLambdaLogRetention( + this, + 'AgentManagementLambdaLogRetention', + this.agentManagementApiLambda.functionName, + this.customResourceLambdaArn + ); + + // Add common environment variables for agent management lambda + this.addCommonEnvironmentVariables(this.agentManagementApiLambda); + + // Add agent management specific permissions + this.addAgentManagementLambdaPermissions(agentManagementAPILambdaRole, this.deploymentPlatformBucket); + + const workflowManagementAPILambdaRole = createDefaultLambdaRole( + this, + 'WorkflowManagementLambdaRole', + this.deployVPCCondition + ); + + const workflowCfnDeployRole = createCfnDeployRole( + this, + 'WorkflowCfnDeployRole', + workflowManagementAPILambdaRole, + { + includeVpcPermissions: false, + includeKendraPermissions: false, + includeEcrPermissions: true, + additionalPassRoleServices: ['bedrock-agentcore.amazonaws.com'], // Allow passing roles to AgentCore + roleName: 'WorkflowCfnDeployRole' + } + ); + + this.workflowManagementApiLambda = new lambda.Function(this, 'WorkflowManagementLambda', { + description: 'Lambda function backing the REST API for workflow management', + code: lambda.Code.fromAsset( + '../lambda/use-case-management', + ApplicationAssetBundler.assetBundlerFactory() + .assetOptions(COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME) + .options(this, '../lambda/use-case-management') + ), + role: workflowManagementAPILambdaRole, + runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, + handler: 'workflows-handler.workflowsHandler', + timeout: cdk.Duration.minutes(LAMBDA_TIMEOUT_MINS), + tracing: lambda.Tracing.ACTIVE, + deadLetterQueue: this.dlq, + environment: { + [ARTIFACT_BUCKET_ENV_VAR]: this.assetBucket, + ...(this.objectPrefix && { + [ARTIFACT_KEY_PREFIX_ENV_VAR]: this.objectPrefix + }), + [CFN_DEPLOY_ROLE_ARN_ENV_VAR]: workflowCfnDeployRole.roleArn, + [POWERTOOLS_METRICS_NAMESPACE_ENV_VAR]: USE_CASE_MANAGEMENT_NAMESPACE, + [WEBCONFIG_SSM_KEY_ENV_VAR]: this.stackParameters.webConfigSSMKey, + [TEMPLATE_FILE_EXTN_ENV_VAR]: process.env.TEMPLATE_OUTPUT_BUCKET ? '.template' : '.template.json', + [IS_INTERNAL_USER_ENV_VAR]: cdk.Fn.conditionIf( + isInternalUserCondition.logicalId, + 'true', + 'false' + ).toString(), + [GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR]: this.deploymentPlatformBucket.bucketName, + [DEPLOYMENT_PLATFORM_STACK_NAME_ENV_VAR]: cdk.Stack.of(this).stackName + } + }); + + createCustomResourceForLambdaLogRetention( + this, + 'WorkflowManagementLambdaLogRetention', + this.workflowManagementApiLambda.functionName, + this.customResourceLambdaArn + ); + + // Add common environment variables for workflow management lambda + this.addCommonEnvironmentVariables(this.workflowManagementApiLambda); + + // Add workflow management specific permissions + this.addWorkflowManagementLambdaPermissions(workflowManagementAPILambdaRole, this.deploymentPlatformBucket); + NagSuppressions.addResourceSuppressions( this.useCaseManagementApiLambda.role!.node.tryFindChild('DefaultPolicy')!.node.tryFindChild('Resource')!, [ @@ -466,6 +718,36 @@ export class UseCaseManagement extends BaseNestedStack { ] ); + NagSuppressions.addResourceSuppressions( + this.mcpManagementApiLambda.role!.node.tryFindChild('DefaultPolicy')!.node.tryFindChild('Resource')!, + [ + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the Lambda function to perform x-ray tracing' + } + ] + ); + + NagSuppressions.addResourceSuppressions( + this.agentManagementApiLambda.role!.node.tryFindChild('DefaultPolicy')!.node.tryFindChild('Resource')!, + [ + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the Lambda function to perform x-ray tracing' + } + ] + ); + + NagSuppressions.addResourceSuppressions( + this.workflowManagementApiLambda.role!.node.tryFindChild('DefaultPolicy')!.node.tryFindChild('Resource')!, + [ + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the Lambda function to perform x-ray tracing' + } + ] + ); + NagSuppressions.addResourceSuppressions(lambdaDDBPolicy, [ { id: 'AwsSolutions-IAM5', @@ -522,456 +804,280 @@ export class UseCaseManagement extends BaseNestedStack { reason: 'The inline policy avoids a rare race condition between the lambda, Role and the policy resource creation.' } ]); + + cfn_nag.addCfnSuppressRules(this.mcpManagementApiLambda, [ + { + id: 'W89', + reason: 'VPC deployment is not enforced. If the solution is deployed in a VPC, this lambda function will be deployed with VPC enabled configuration' + }, + { + id: 'W92', + reason: 'The solution does not enforce reserved concurrency' + } + ]); + + cfn_nag.addCfnSuppressRules(mcpManagementAPILambdaRole, [ + { + id: 'F10', + reason: 'The inline policy avoids a rare race condition between the lambda, Role and the policy resource creation.' + } + ]); + + cfn_nag.addCfnSuppressRules(this.agentManagementApiLambda, [ + { + id: 'W89', + reason: 'VPC deployment is not enforced. If the solution is deployed in a VPC, this lambda function will be deployed with VPC enabled configuration' + }, + { + id: 'W92', + reason: 'The solution does not enforce reserved concurrency' + } + ]); + + cfn_nag.addCfnSuppressRules(agentManagementAPILambdaRole, [ + { + id: 'F10', + reason: 'The inline policy avoids a rare race condition between the lambda, Role and the policy resource creation.' + } + ]); + + cfn_nag.addCfnSuppressRules(this.workflowManagementApiLambda, [ + { + id: 'W89', + reason: 'VPC deployment is not enforced. If the solution is deployed in a VPC, this lambda function will be deployed with VPC enabled configuration' + }, + { + id: 'W92', + reason: 'The solution does not enforce reserved concurrency' + } + ]); + + cfn_nag.addCfnSuppressRules(workflowManagementAPILambdaRole, [ + { + id: 'F10', + reason: 'The inline policy avoids a rare race condition between the lambda, Role and the policy resource creation.' + } + ]); } -} -const buildCfnDeployRole = (scope: Construct, lambdaRole: iam.Role): iam.Role => { - const awsTagKeysCondition = { - 'ForAllValues:StringEquals': { - 'aws:TagKeys': ['createdVia', 'userId'] - } - }; + /** + * Adds all necessary permissions for MCP Management Lambda including S3 and Bedrock AgentCore permissions + * @param role The IAM role to attach the permissions to + * @param deploymentBucket The S3 bucket for deployment platform storage + */ + private addMCPLambdaPermissions(role: iam.Role, deploymentBucket: s3.Bucket): void { + const s3Policy = new iam.Policy(this, 'MCPLambdaS3Policy', { + statements: [ + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['s3:GetObject', 's3:PutObject', 's3:DeleteObject', 's3:PutObjectTagging'], + resources: [ + deploymentBucket.bucketArn, + `${deploymentBucket.bucketArn}/mcp/*`, + `arn:${cdk.Aws.PARTITION}:s3:::${this.assetBucket}`, + `arn:${cdk.Aws.PARTITION}:s3:::${this.assetBucket}/*` + ] + }) + ] + }); + s3Policy.attachToRole(role); - const awsCalledViaCondition = { - 'ForAnyValue:StringEquals': { - 'aws:CalledVia': ['cloudformation.amazonaws.com'] - } - }; - - const cfnDeployRole = new iam.Role(scope, 'CfnDeployRole', { - assumedBy: new iam.ServicePrincipal('cloudformation.amazonaws.com'), - inlinePolicies: { - CfnDeployPolicy: new iam.PolicyDocument({ - statements: [ - new iam.PolicyStatement({ - actions: [ - 'dynamodb:CreateTable', - 'dynamodb:DeleteTable', - 'dynamodb:DescribeTable', - 'dynamodb:DescribeTimeToLive', - 'dynamodb:ListTagsOfResource', - 'dynamodb:UpdateTimeToLive', - 'dynamodb:TagResource' - ], - resources: [`arn:${cdk.Aws.PARTITION}:dynamodb:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:table/*`] - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: ['ssm:GetParameter'], - resources: [`arn:${cdk.Aws.PARTITION}:ssm:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:parameter*`] - }) - ] - }) - } - }); - - const cfnDeployPolicy = new iam.Policy(scope, 'CfnDeployPolicy', { - statements: [ - new iam.PolicyStatement({ - actions: ['cloudformation:CreateStack', 'cloudformation:UpdateStack'], - effect: iam.Effect.ALLOW, - resources: [`arn:${cdk.Aws.PARTITION}:cloudformation:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:stack/*`], - conditions: { - 'ForAllValues:StringEquals': { - 'aws:TagKeys': ['createdVia', 'userId'] - }, - 'StringLike': { - 'cloudformation:TemplateUrl': generateCfnTemplateUrl(scope) - } - } - }), - new iam.PolicyStatement({ - actions: ['cloudformation:DeleteStack', 'cloudformation:DescribeStack*', 'cloudformation:ListStacks'], - effect: iam.Effect.ALLOW, - resources: [`arn:${cdk.Aws.PARTITION}:cloudformation:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:stack/*`] - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: [ - 'iam:CreateRole', - 'iam:DeleteRole*', - 'iam:DetachRolePolicy', - 'iam:GetRole', - 'iam:ListRoleTags', - 'iam:*tRolePolicy', // Get|Put RolePolicy - 'iam:TagRole', - 'iam:UpdateAssumeRolePolicy' - ], - resources: [ - `arn:${cdk.Aws.PARTITION}:iam::${cdk.Aws.ACCOUNT_ID}:role/*`, - `arn:${cdk.Aws.PARTITION}:iam::${cdk.Aws.ACCOUNT_ID}:policy/*` - ], - conditions: { - 'ForAllValues:StringEquals': { - 'aws:TagKeys': ['createdVia', 'userId', 'Name'] - } - } - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: ['iam:PassRole'], - resources: [`arn:${cdk.Aws.PARTITION}:iam::${cdk.Aws.ACCOUNT_ID}:role/*`], - conditions: { - 'ForAllValues:StringEquals': { - 'aws:TagKeys': ['createdVia', 'userId', 'Name'] - }, - 'StringEquals': { - 'iam:PassedToService': [ - 'lambda.amazonaws.com', - 'apigateway.amazonaws.com', - 'kendra.amazonaws.com', - 'vpc-flow-logs.amazonaws.com', - 'cloudformation.amazonaws.com' - ] - } - } - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: ['iam:AttachRolePolicy'], - resources: [`arn:${cdk.Aws.PARTITION}:iam::${cdk.Aws.ACCOUNT_ID}:role/*`], - conditions: { - ...awsCalledViaCondition, - ...awsTagKeysCondition, - StringEquals: { - 'iam:PolicyARN': [ - `arn:${cdk.Aws.PARTITION}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole` - ] - } - } - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: [ - 'lambda:AddPermission', - 'lambda:CreateFunction', - 'lambda:Delete*', - 'lambda:GetFunction', - 'lambda:*LayerVersion', // Get|Publish LayerVersion - 'lambda:InvokeFunction', - 'lambda:ListTags', - 'lambda:RemovePermission', - 'lambda:TagResource', - 'lambda:UpdateEventSourceMapping', - 'lambda:UpdateFunction*' - ], - resources: [ - `arn:${cdk.Aws.PARTITION}:lambda:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:function:*`, - `arn:${cdk.Aws.PARTITION}:lambda:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:layer:*`, - `arn:${cdk.Aws.PARTITION}:lambda:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:event-source-mapping:*` - ], - conditions: { - ...awsTagKeysCondition - } - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: [ - 's3:CreateBucket', - 's3:DeleteBucketPolicy', - 's3:GetBucketAcl', - 's3:GetBucketPolicy*', - 's3:GetBucketVersioning', - 's3:*EncryptionConfiguration', // Get|Put EncryptionConfiguration - 's3:GetObject', - 's3:PutBucket*' - ], - resources: [`arn:${cdk.Aws.PARTITION}:s3:::*`] - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: [ - 'events:DeleteRule', - 'events:DescribeRule', - 'events:PutRule', - 'events:*Targets' // Put|Remove Targets - ], - resources: [`arn:${cdk.Aws.PARTITION}:events:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:rule/*`] - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: ['servicecatalog:*'], - resources: [ - `arn:${cdk.Aws.PARTITION}:servicecatalog:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:/attribute-groups/*`, - `arn:${cdk.Aws.PARTITION}:servicecatalog:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:/applications/*` - ], - conditions: { - ...awsCalledViaCondition - } - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: [ - 'apigateway:CreateRestApi', - 'apigateway:CreateStage', - 'apigateway:DELETE', - 'apigateway:Delete*', - 'apigateway:GET', - 'apigateway:PATCH', - 'apigateway:POST', - 'apigateway:PUT', - 'apigateway:SetWebACL', - 'apigateway:TagResource', - 'wafv2:*ForResource', - 'wafv2:*WebACL', - 'wafv2:TagResource' - ], - resources: [ - `arn:${cdk.Aws.PARTITION}:apigateway:${cdk.Aws.REGION}::/*`, - `arn:${cdk.Aws.PARTITION}:wafv2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:regional/*/*/*` - ], - conditions: { - ...awsCalledViaCondition - } - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: [ - 'cognito-idp:AdminAddUserToGroup', - 'cognito-idp:AdminCreateUser', - 'cognito-idp:AdminDeleteUser', - 'cognito-idp:AdminGetUser', - 'cognito-idp:AdminListGroupsForUser', - 'cognito-idp:AdminRemoveUserFromGroup', - 'cognito-idp:CreateGroup', - 'cognito-idp:CreateUserPool*', - 'cognito-idp:Delete*', - 'cognito-idp:GetGroup', - 'cognito-idp:SetUserPoolMfaConfig', - 'cognito-idp:*UserPoolClient' // Describe|Update UserPoolClient - ], - resources: [`arn:${cdk.Aws.PARTITION}:cognito-idp:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:userpool/*`], - conditions: { - ...awsCalledViaCondition - } - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: ['cognito-idp:DescribeUserPool'], - resources: [`arn:${cdk.Aws.PARTITION}:cognito-idp:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:userpool/*`] - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: [ - 'cloudfront:Create*', - 'cloudfront:Delete*', - 'cloudfront:DescribeFunction', - 'cloudfront:Get*', - 'cloudfront:ListTagsForResource', - 'cloudfront:PublishFunction', - 'cloudfront:TagResource', - 'cloudfront:Update*' - ], - resources: [ - `arn:${cdk.Aws.PARTITION}:cloudfront::${cdk.Aws.ACCOUNT_ID}:function/*`, - `arn:${cdk.Aws.PARTITION}:cloudfront::${cdk.Aws.ACCOUNT_ID}:origin-access-control/*`, - `arn:${cdk.Aws.PARTITION}:cloudfront::${cdk.Aws.ACCOUNT_ID}:distribution/*`, - `arn:${cdk.Aws.PARTITION}:cloudfront::${cdk.Aws.ACCOUNT_ID}:response-headers-policy/*` - ] - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: [ - 'kms:CreateGrant', - 'kms:Decrypt', - 'kms:DescribeKey', - 'kms:EnableKeyRotation', - 'kms:Encrypt', - 'kms:GenerateDataKey', - 'kms:PutKeyPolicy', - 'kms:TagResource' - ], - resources: [`arn:${cdk.Aws.PARTITION}:kms:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:key/*`], - conditions: { - ...awsCalledViaCondition - } - }), - new iam.PolicyStatement({ - actions: [ - 'kms:CreateKey', - 'kendra:CreateIndex', - 'lambda:CreateEventSourceMapping', - 'lambda:DeleteEventSourceMapping', - 'lambda:GetEventSourceMapping' - ], - effect: iam.Effect.ALLOW, - resources: ['*'], // these actions requires the resource to be '*'. There are additional conditions on the policy to help put guard rails - conditions: { - ...awsCalledViaCondition - } - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: [ - 'kendra:DescribeIndex', - 'kendra:ListTagsForResource', - 'kendra:TagResource', - 'kendra:UpdateIndex' - ], - resources: [`arn:${cdk.Aws.PARTITION}:kendra:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:index/*`], - conditions: { - ...awsCalledViaCondition - } - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: ['cloudwatch:*Dashboard*', 'cloudwatch:GetMetricData', 'cloudwatch:TagResource'], - resources: [`arn:${cdk.Aws.PARTITION}:cloudwatch::${cdk.Aws.ACCOUNT_ID}:dashboard/*`], - conditions: { - ...awsCalledViaCondition - } - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: [ - 'sqs:CreateQueue', - 'sqs:GetQueueAttributes', - 'sqs:TagQueue', - 'sqs:DeleteQueue', - 'sqs:SetQueueAttributes' - ], - resources: [`arn:${cdk.Aws.PARTITION}:sqs:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:*`], - conditions: { - ...awsCalledViaCondition - } - }) - ] - }); - cfnDeployPolicy.attachToRole(lambdaRole); - cfnDeployPolicy.attachToRole(cfnDeployRole); - - const vpcCreationPolicy = new iam.Policy(scope, 'VpcCreationPolicy', { - statements: [ - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: [ - 'ec2:AllocateAddress', - 'ec2:AssociateRouteTable', - 'ec2:AttachInternetGateway', - 'ec2:AuthorizeSecurityGroup*', - 'ec2:CreateFlowLogs', - 'ec2:CreateInternetGateway', - 'ec2:CreateNatGateway', - 'ec2:CreateNetworkAcl*', - 'ec2:CreateRoute*', - 'ec2:CreateSecurityGroup', - 'ec2:CreateSubnet', - 'ec2:CreateTags', - 'ec2:createVPC*', - 'ec2:Delete*', - 'ec2:Detach*', - 'ec2:Disassociate*', - 'ec2:Modify*', - 'ec2:ReleaseAddress', - 'ec2:ReplaceNetworkAcl*', - 'ec2:RevokeSecurityGroup*', - 'ec2:UpdateSecurityGroupRuleDescriptions*' - ], - resources: [ - `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:route-table/*`, - `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:security-group/*`, - `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:vpc*/*`, - `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:subnet/*`, - `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:internet-gateway/*`, - `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:elastic-ip/*`, - `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:natgateway/*`, - `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:network-interface/*`, - `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:network-acl/*`, - `arn:${cdk.Aws.PARTITION}:ec2:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:ipam-pool/*` + // Add NAG suppressions for S3 policy + NagSuppressions.addResourceSuppressions(s3Policy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the MCP Management Lambda to access deployment platform S3 bucket and its objects under the mcp/ prefix, and CDK assets bucket for template access', + appliesTo: [ + `Resource::<${cdk.Stack.of(this).getLogicalId( + deploymentBucket.node.defaultChild as cdk.CfnResource + )}.Arn>/mcp/*`, + process.env.TEMPLATE_OUTPUT_BUCKET + ? 'Resource::arn::s3:::{"Fn::FindInMap":["Template","General","S3Bucket"]}/*' + : 'Resource::arn::s3:::cdk-hnb659fds-assets--/*' ] - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: ['ec2:Describe*'], - resources: ['*'] - }), - new iam.PolicyStatement({ - effect: iam.Effect.ALLOW, - actions: [ - 'logs:CreateLogGroup', - 'logs:DescribeLogGroups', - 'logs:PutRetentionPolicy', - 'logs:TagResource', - 'logs:ListTagsForResource' - ], - resources: [`arn:${cdk.Aws.PARTITION}:logs:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:log-group:*`], - conditions: { - ...awsTagKeysCondition - } - }) - ] - }); - - vpcCreationPolicy.attachToRole(lambdaRole); - vpcCreationPolicy.attachToRole(cfnDeployRole); + } + ]); + } - NagSuppressions.addResourceSuppressions(cfnDeployPolicy, [ - { - id: 'AwsSolutions-IAM5', - reason: 'This the minimum policy required for CloudFormation service to deploy the stack. Where possible there is a condition using aws:CalledVia for supported services' - } - ]); - - NagSuppressions.addResourceSuppressions(vpcCreationPolicy, [ - { - id: 'AwsSolutions-IAM5', - reason: 'Even though the resource is "*", the actions have been scoped down only to the ones required by the solution', - appliesTo: [ - 'Resource::*', - 'Resource::arn::ec2:::vpc/*', - 'Resource::arn::ec2:::vpc*/*', - 'Resource::arn::ec2:::security-group/*', - 'Resource::arn::ec2:::route-table/*', - 'Resource::arn::ec2:::elastic-ip/*', - 'Resource::arn::ec2:::internet-gateway/*', - 'Resource::arn::ec2:::natgateway/*', - 'Resource::arn::ec2:::network-interface/*', - 'Resource::arn::ec2:::subnet/*', - 'Resource::arn::ec2:::network-acl/*', - 'Resource::arn::ec2:::ipam-pool/*', - 'Resource::arn::logs:::log-group:*', - 'Action::ec2:AuthorizeSecurityGroup*', - 'Action::ec2:CreateNetworkAcl*', - 'Action::ec2:CreateRoute*', - 'Action::ec2:createVPC*', - 'Action::ec2:Delete*', - 'Action::ec2:Describe*', - 'Action::ec2:Detach*', - 'Action::ec2:Disassociate*', - 'Action::ec2:Modify*', - 'Action::ec2:ReplaceNetworkAcl*', - 'Action::ec2:RevokeSecurityGroup*', - 'Action::ec2:UpdateSecurityGroupRuleDescriptions*' + /** + * Adds all necessary permissions for Agent Management Lambda + * @param role The IAM role to attach the permissions to + * @param deploymentBucket The S3 bucket for deployment platform storage + */ + private addAgentManagementLambdaPermissions(role: iam.Role, deploymentBucket: s3.Bucket): void { + const agentManagementPolicy = new iam.Policy(this, 'AgentManagementLambdaPolicy', { + statements: [ + // API Gateway permissions for reading REST APIs + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['apigateway:GET'], + resources: [`arn:${cdk.Aws.PARTITION}:apigateway:${cdk.Aws.REGION}::/restapis/*`] + }), + // S3 permissions for agent artifacts + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['s3:GetObject', 's3:PutObject'], + resources: [cdk.Fn.join('', [deploymentBucket.bucketArn, '/agents/*'])] // restrict scope to /agents prefix + }), + // DynamoDB permissions for use case configuration + new iam.PolicyStatement({ + actions: [ + 'dynamodb:CreateTable', + 'dynamodb:DeleteTable', + 'dynamodb:DescribeTable', + 'dynamodb:*TimeToLive', // Describe|Update TimeToLive + 'dynamodb:ListTagsOfResource', + 'dynamodb:TagResource' + ], + resources: [`arn:${cdk.Aws.PARTITION}:dynamodb:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:table/*`] + }), + // SSM permissions for web config + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['ssm:GetParameter'], + resources: [ + `arn:${cdk.Aws.PARTITION}:ssm:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:parameter${this.stackParameters.webConfigSSMKey}` + ] + }), + // CloudWatch Logs permissions for deployed use cases + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'logs:CreateLogGroup', + 'logs:DescribeLogGroups', + 'logs:PutRetentionPolicy', + 'logs:TagResource', + 'logs:ListTagsForResource' + ], + resources: [`arn:${cdk.Aws.PARTITION}:logs:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:log-group:*`] + }) ] - } - ]); - - NagSuppressions.addResourceSuppressions(cfnDeployRole, [ - { - id: 'AwsSolutions-IAM5', - reason: 'Resource name is unknown and hence the wild card', - appliesTo: [ - 'Resource::arn::dynamodb:::table/*', - 'Resource::arn::ssm:::parameter*' + }); + agentManagementPolicy.attachToRole(role); + + NagSuppressions.addResourceSuppressions(agentManagementPolicy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the Agent Management Lambda to access API Gateway REST APIs, DynamoDB tables, CloudWatch Logs, and deployment platform S3 bucket objects under the agents/ prefix', + appliesTo: [ + 'Resource::arn::apigateway:::/restapis/*', + `Resource::<${cdk.Stack.of(this).getLogicalId( + deploymentBucket.node.defaultChild as cdk.CfnResource + )}.Arn>/agents/*`, + 'Resource::arn::dynamodb:::table/*', + 'Resource::arn::logs:::log-group:*', + 'Action::dynamodb:*TimeToLive' + ] + } + ]); + } + + /** + * Adds all necessary permissions for Workflow Management Lambda + * @param role The IAM role to attach the permissions to + * @param deploymentBucket The S3 bucket for deployment platform storage + */ + private addWorkflowManagementLambdaPermissions(role: iam.Role, deploymentBucket: s3.Bucket): void { + const workflowManagementPolicy = new iam.Policy(this, 'WorkflowManagementLambdaPolicy', { + statements: [ + // API Gateway permissions for reading REST APIs + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['apigateway:GET'], + resources: [`arn:${cdk.Aws.PARTITION}:apigateway:${cdk.Aws.REGION}::/restapis/*`] + }), + // S3 permissions for workflow artifacts + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['s3:GetObject', 's3:PutObject'], + resources: [cdk.Fn.join('', [deploymentBucket.bucketArn, '/workflows/*'])] // restrict scope to /workflows prefix + }), + // DynamoDB permissions for use case configuration + new iam.PolicyStatement({ + actions: [ + 'dynamodb:CreateTable', + 'dynamodb:DeleteTable', + 'dynamodb:DescribeTable', + 'dynamodb:*TimeToLive', // Describe|Update TimeToLive + 'dynamodb:ListTagsOfResource', + 'dynamodb:TagResource' + ], + resources: [`arn:${cdk.Aws.PARTITION}:dynamodb:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:table/*`] + }), + // SSM permissions for web config + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['ssm:GetParameter'], + resources: [ + `arn:${cdk.Aws.PARTITION}:ssm:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:parameter${this.stackParameters.webConfigSSMKey}` + ] + }), + // CloudWatch Logs permissions for deployed use cases + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'logs:CreateLogGroup', + 'logs:DescribeLogGroups', + 'logs:PutRetentionPolicy', + 'logs:TagResource', + 'logs:ListTagsForResource' + ], + resources: [`arn:${cdk.Aws.PARTITION}:logs:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:log-group:*`] + }) ] - } - ]); + }); + workflowManagementPolicy.attachToRole(role); - cfn_nag.addCfnSuppressRules(cfnDeployPolicy, [ - { - id: 'F4', - reason: 'Due to policy byte size limitation, had to convert servicecatalog actions to use wildcard' - } - ]); + NagSuppressions.addResourceSuppressions(workflowManagementPolicy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the Workflow Management Lambda to access API Gateway REST APIs, DynamoDB tables, CloudWatch Logs, and deployment platform S3 bucket objects under the workflows/ prefix', + appliesTo: [ + 'Resource::arn::apigateway:::/restapis/*', + `Resource::<${cdk.Stack.of(this).getLogicalId( + deploymentBucket.node.defaultChild as cdk.CfnResource + )}.Arn>/workflows/*`, + 'Resource::arn::dynamodb:::table/*', + 'Resource::arn::logs:::log-group:*', + 'Action::dynamodb:*TimeToLive' + ] + } + ]); + } - cfn_nag.addCfnSuppressRules(cfnDeployRole, [ - { - id: 'F10', - reason: 'The inline policy is to avoid concurrency issues where a policy is created but not yet attached to the role.' - } - ]); + /** + * Adds common environment variables needed by management lambdas + * @param lambdaFunction The lambda function to add environment variables to + */ + private addCommonEnvironmentVariables(lambdaFunction: lambda.Function): void { + lambdaFunction.addEnvironment( + COGNITO_POLICY_TABLE_ENV_VAR, + this.cognitoSetup.getCognitoGroupPolicyTable(this).tableName + ); + lambdaFunction.addEnvironment(USER_POOL_ID_ENV_VAR, this.cognitoSetup.getUserPool(this).userPoolId); + lambdaFunction.addEnvironment(CLIENT_ID_ENV_VAR, this.cognitoSetup.getUserPoolClient(this).userPoolClientId); + } - // this role returned here is used for setting lambda's environment variable. This role is to ensue backward compatibility - // of existing use case stacks. This role will not be used when new stacks are created in v2.0.0. - return cfnDeployRole; -}; + /** + * Sets multimodal environment variables for the agent and workflows management lambda + * This method should be called after the multimodal setup is created + * @param multimodalDataBucketName The name of the multimodal data bucket + * @param multimodalDataMetadataTableName The name of the multimodal data metadata table + */ + public setMultimodalEnvironmentVariables( + multimodalDataBucketName: string, + multimodalDataMetadataTableName: string + ): void { + this.agentManagementApiLambda.addEnvironment(MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR, multimodalDataBucketName); + this.agentManagementApiLambda.addEnvironment( + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR, + multimodalDataMetadataTableName + ); + + this.workflowManagementApiLambda.addEnvironment(MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR, multimodalDataBucketName); + this.workflowManagementApiLambda.addEnvironment( + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR, + multimodalDataMetadataTableName + ); + } +} diff --git a/source/infrastructure/lib/use-case-management/setup.ts b/source/infrastructure/lib/use-case-management/setup.ts index 220e93aa..ee2f505f 100644 --- a/source/infrastructure/lib/use-case-management/setup.ts +++ b/source/infrastructure/lib/use-case-management/setup.ts @@ -10,6 +10,7 @@ import { Construct } from 'constructs'; import { BaseStackProps } from '../framework/base-stack'; import { UseCaseManagement } from './management-stack'; import { FeedbackSetupStack } from '../feedback/feedback-setup-stack'; +import { MultimodalSetup } from '../multimodal/multimodal-setup'; import * as api from 'aws-cdk-lib/aws-apigateway'; import { RestRequestProcessor } from '../api/rest-request-processor'; import { UseCaseRestEndpointSetup } from '../api/use-case-rest-endpoint-setup'; @@ -105,6 +106,11 @@ export class UseCaseManagementSetup extends Construct { */ public readonly feedbackSetupStack: FeedbackSetupStack; + /** + * Construct that creates the resources for multimodal file management (API routes, processing lambda, etc.) + */ + public readonly multimodalSetup: MultimodalSetup; + /** * The API being served to allow use case management */ @@ -150,6 +156,9 @@ export class UseCaseManagementSetup extends Construct { const requestProcessor = new RestRequestProcessor(this, 'RequestProcessor', { useCaseManagementAPILambda: this.useCaseManagement.useCaseManagementApiLambda, modelInfoAPILambda: this.useCaseManagement.modelInfoApiLambda, + mcpManagementAPILambda: this.useCaseManagement.mcpManagementApiLambda, + agentManagementAPILambda: this.useCaseManagement.agentManagementApiLambda, + workflowManagementAPILambda: this.useCaseManagement.workflowManagementApiLambda, defaultUserEmail: props.defaultUserEmail, applicationTrademarkName: props.applicationTrademarkName, customResourceLambdaArn: props.customInfra.functionArn, @@ -213,6 +222,24 @@ export class UseCaseManagementSetup extends Construct { description: `Nested Stack that creates the Feedback Resources - Version ${props.solutionVersion}` }); - + this.multimodalSetup = new MultimodalSetup(this, 'MultimodalSetup', { + restApi: this.restApi, + deploymentPlatformAuthorizer: requestProcessor.requestAuthorizer, + requestValidator: requestProcessor.deploymentRestEndpoint.requestValidator, + dlq: this.useCaseManagement.dlq, + deployVPCCondition: this.useCaseManagement.deployVPCCondition, + privateSubnetIds: props.privateSubnetIds!, + securityGroupIds: props.securityGroupIds!, + customResourceLambdaArn: props.customInfra.functionArn, + customResourceLambdaRoleArn: props.customInfra.role!.roleArn, + accessLoggingS3Bucket: props.accessLoggingBucket, + stackSource: StackDeploymentSource.DEPLOYMENT_PLATFORM + }); + + // Set multimodal environment variables for the agent and workflow management lambda + this.useCaseManagement.setMultimodalEnvironmentVariables( + this.multimodalSetup.multimodalDataBucket.bucketName, + this.multimodalSetup.multimodalDataMetadataTable.tableName + ); } } diff --git a/source/infrastructure/lib/use-case-stacks/agent-core/agent-builder-stack.ts b/source/infrastructure/lib/use-case-stacks/agent-core/agent-builder-stack.ts new file mode 100644 index 00000000..aa96f0b3 --- /dev/null +++ b/source/infrastructure/lib/use-case-stacks/agent-core/agent-builder-stack.ts @@ -0,0 +1,153 @@ +#!/usr/bin/env node +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; + +import { Construct } from 'constructs'; + +import { BaseStack, BaseStackProps } from '../../framework/base-stack'; +import { CHAT_PROVIDERS, ECR_URI_PATTERN, USE_CASE_TYPES, GAAB_STRANDS_AGENT_IMAGE_NAME } from '../../utils/constants'; +import { AgentCoreBaseStack, AgentCoreBaseParameters } from './agent-core-base-stack'; +import { VPCSetup } from '../../vpc/vpc-setup'; + +/** + * CloudFormation parameters specific to AgentCore agent deployment + * Extends the base AgentCoreBaseParameters with agent-specific configuration + */ +export class AgentBuilderParameters extends AgentCoreBaseParameters { + /** + * Optional custom ECR image URI for the agent + */ + public customAgentImageUri: cdk.CfnParameter; + + constructor(stack: BaseStack) { + super(stack); + } + + /** + * Create use case-specific parameters for AgentBuilder + */ + protected createUseCaseSpecificParameters(stack: BaseStack): void { + this.createCustomImageParameters(stack); + } + + /** + * Get the custom image parameter for AgentBuilder + */ + public getCustomImageParameter(): cdk.CfnParameter { + return this.customAgentImageUri; + } + + /** + * Create custom image URI parameters + */ + private createCustomImageParameters(stack: BaseStack): void { + this.customAgentImageUri = new cdk.CfnParameter(stack, 'CustomAgentImageUri', { + type: 'String', + description: + 'Optional custom ECR image URI for the agent. If provided, overrides default image resolution.', + default: '', + allowedPattern: ECR_URI_PATTERN + '|^$', + constraintDescription: this.getCustomImageConstraintDescription(USE_CASE_TYPES.AGENT_BUILDER) + }); + } + + /** + * Override base configuration group label for agent-specific naming + */ + protected getBaseConfigurationGroupLabel(): string { + return 'Agent Configuration'; + } + + /** + * Get agent-specific parameter labels for better CloudFormation console UX + */ + protected getUseCaseSpecificParameterLabels(): Record { + return { + [this.customAgentImageUri.logicalId]: 'Custom Agent Image URI' + }; + } +} + +/** + * The main stack creating the Amazon Bedrock AgentCore agent use case infrastructure + * + * This stack orchestrates the deployment of Amazon Bedrock AgentCore agents following + * the GAAB v4.0.0 design patterns with modular helper classes. + * + * IMPORTANT: Amazon Bedrock AgentCore (preview service) does not support VPC deployments. + * All Amazon Bedrock AgentCore components run in non-VPC mode regardless of the + * deployment platform's VPC configuration. VPC support will be added in future releases. + */ +export class AgentBuilderStack extends AgentCoreBaseStack { + constructor(scope: Construct, id: string, props: BaseStackProps) { + super(scope, id, props); + } + + /** + * Get the image name for AgentBuilder use case + */ + public getImageName(): string { + return GAAB_STRANDS_AGENT_IMAGE_NAME; + } + + /** + * Get the use case type for AgentBuilder + */ + public getUseCaseType(): USE_CASE_TYPES { + return USE_CASE_TYPES.AGENT_BUILDER; + } + + /** + * Get the WebSocket route name for AgentBuilder + */ + public getWebSocketRouteName(): string { + return 'invokeAgentCore'; + } + + /** + * Get the LLM provider name for AgentBuilder + */ + public getLlmProviderName(): CHAT_PROVIDERS { + return CHAT_PROVIDERS.AGENT_CORE; + } + + /** + * Get the agent runtime name for AgentBuilder + */ + public getAgentRuntimeName(): string { + return `gaab_agent_${this.stackParameters.useCaseShortId}`; + } + + /** + * AgentBuilder supports inference profiles + */ + public shouldIncludeInferenceProfileSupport(): boolean { + return true; + } + + /** + * Initialize CloudFormation parameters + */ + protected initializeCfnParameters(): void { + this.stackParameters = new AgentBuilderParameters(this); + } + + /** + * Set up VPC configuration for AgentBuilder stack + * Note: Amazon Bedrock AgentCore (preview) does not support VPC deployments. + * The VPC setup will create minimal infrastructure for future compatibility. + */ + protected setupVPC(): VPCSetup { + return new VPCSetup(this, 'VPC', { + stackType: 'agent-builder', + deployVpcCondition: this.deployVpcCondition, + customResourceLambdaArn: this.applicationSetup.customResourceLambda.functionArn, + customResourceRoleArn: this.applicationSetup.customResourceLambda.role!.roleArn, + iPamPoolId: this.iPamPoolId.valueAsString, + accessLogBucket: this.applicationSetup.accessLoggingBucket, + ...this.baseStackProps + }); + } +} diff --git a/source/infrastructure/lib/use-case-stacks/agent-core/agent-core-base-stack.ts b/source/infrastructure/lib/use-case-stacks/agent-core/agent-core-base-stack.ts new file mode 100644 index 00000000..d97a6ae8 --- /dev/null +++ b/source/infrastructure/lib/use-case-stacks/agent-core/agent-core-base-stack.ts @@ -0,0 +1,950 @@ +#!/usr/bin/env node +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as lambda from 'aws-cdk-lib/aws-lambda'; +import * as dynamodb from 'aws-cdk-lib/aws-dynamodb'; +import * as iam from 'aws-cdk-lib/aws-iam'; +import * as cognito from 'aws-cdk-lib/aws-cognito'; +import { Construct } from 'constructs'; + +import { BaseStack, BaseStackProps } from '../../framework/base-stack'; +import { UseCaseParameters, UseCaseStack } from '../../framework/use-case-stack'; +import { CHAT_PROVIDERS, StackDeploymentSource, USE_CASE_TYPES } from '../../utils/constants'; +import { ComponentCognitoAppClient, ComponentType } from '../../auth/component-cognito-app-client'; +import { AgentExecutionRole } from './components/agent-execution-role'; +import { AgentRuntimeDeployment } from './components/agent-runtime-deployment'; +import { AgentMemoryDeployment } from './components/agent-memory-deployment'; +import { AgentInvocationLambda } from './components/agent-invocation-lambda'; +import { ECRPullThroughCache } from './components/ecr-pull-through-cache'; +import { + determineDeploymentMode, + ECRImageError, + ImageResolutionContext, + sanitizeVersionTag, + resolveImageUriWithConditions +} from './utils/image-uri-resolver'; +import { NagSuppressions } from 'cdk-nag'; +import { LambdaToDynamoDB } from '@aws-solutions-constructs/aws-lambda-dynamodb'; + +/** + * Abstract base class for CloudFormation parameters specific to AgentCore deployments + * Provides common parameters shared across agent and workflow use cases + */ +export abstract class AgentCoreBaseParameters extends UseCaseParameters { + /** + * Enable long-term memory for the AgentCore deployment + */ + public enableLongTermMemory: cdk.CfnParameter; + + /** + * Shared ECR cache prefix from deployment platform (for dashboard deployments) + */ + public sharedEcrCachePrefix: cdk.CfnParameter; + + /** + * Cognito User Pool ID for creating component App Client + * Passed by the deployment dashboard's management lambda when deploying use cases + */ + public cognitoUserPoolId: cdk.CfnParameter; + + /** + * Use inference profile parameter for cross-region model access + */ + public useInferenceProfile: cdk.CfnParameter; + + constructor(stack: BaseStack) { + super(stack); + this.withAdditionalCfnParameters(stack); + } + + /** + * Add AgentCore-specific CloudFormation parameters + */ + protected withAdditionalCfnParameters(stack: BaseStack) { + super.withAdditionalCfnParameters(stack); + + this.createMemoryParameters(stack); + this.createSharedCacheParameter(stack); + this.createAuthParameters(stack); + this.createInferenceProfileParameter(stack); + this.createUseCaseSpecificParameters(stack); + this.validateParameterRelationships(); + this.updateParameterGroups(); + } + + /** + * Create memory configuration parameters + */ + private createMemoryParameters(stack: BaseStack): void { + this.enableLongTermMemory = new cdk.CfnParameter(stack, 'EnableLongTermMemory', { + type: 'String', + description: 'Enable long-term memory for the agent', + allowedValues: ['Yes', 'No'], + allowedPattern: '^Yes|No$', + default: 'Yes' + }); + } + + /** + * Create shared ECR cache prefix parameter (internal use only for v4.0.0) + * This parameter is automatically populated by the deployment platform for dashboard deployments + */ + private createSharedCacheParameter(stack: BaseStack): void { + this.sharedEcrCachePrefix = new cdk.CfnParameter(stack, 'SharedEcrCachePrefix', { + type: 'String', + description: 'Internal parameter - Shared ECR cache prefix automatically provided by deployment platform', + default: '', + allowedPattern: '^.*[^/]$|^$', + constraintDescription: + 'Internal parameter - automatically populated by deployment platform. Must not end with a trailing slash.' + }); + } + + /** + * Create authentication-related parameters for component App Client creation + */ + private createAuthParameters(stack: BaseStack): void { + this.cognitoUserPoolId = new cdk.CfnParameter(stack, 'ComponentCognitoUserPoolId', { + type: 'String', + description: + 'Cognito User Pool ID for creating component App Client - automatically provided by deployment platform', + default: '', + constraintDescription: 'Must be a valid Cognito User Pool ID' + }); + } + + /** + * Create inference profile parameter for cross-region model access + */ + private createInferenceProfileParameter(stack: BaseStack): void { + this.useInferenceProfile = new cdk.CfnParameter(stack, 'UseInferenceProfile', { + type: 'String', + allowedValues: ['Yes', 'No'], + default: 'No', + description: + 'If the model configured is Bedrock, you can indicate if you are using Bedrock Inference Profile. This will ensure that the required IAM policies will be configured during stack deployment. For more details, refer to https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html' + }); + } + + /** + * Abstract method for creating use case-specific parameters + * Must be implemented by concrete parameter classes + * + * @param stack - The CDK stack instance + */ + protected abstract createUseCaseSpecificParameters(stack: BaseStack): void; + + /** + * Abstract method to get the custom image parameter for this use case type + * Used for image URI resolution logic + * + * @returns The custom image URI parameter for this use case + */ + public abstract getCustomImageParameter(): cdk.CfnParameter; + + /** + * Update CloudFormation parameter groups with consistent structure + * Provides a standardized parameter organization across all AgentCore stacks + * + * Standard parameter group structure: + * 1. Base Configuration - Core AgentCore settings + * 2. Authentication Configuration (Internal) - Auth-related parameters + * 3. Use Case Specific Configuration - Concrete class specific parameters + * 4. Custom Image Configuration (Advanced) - Image override settings + * 5. MCP Server Configuration (Advanced) - MCP integration settings + * 6. Internal Configuration - System-managed parameters + */ + protected updateParameterGroups(): void { + this.initializeParameterGroupsMetadata(); + + const parameterGroups = this.buildStandardParameterGroups(); + + // Update the metadata with the structured parameter groups + this.cfnStack.templateOptions.metadata!['AWS::CloudFormation::Interface'].ParameterGroups = parameterGroups; + + // Add parameter labels for better UX + this.addParameterLabels(); + } + + /** + * Initialize CloudFormation Interface metadata structure + */ + private initializeParameterGroupsMetadata(): void { + if (!this.cfnStack.templateOptions.metadata) { + this.cfnStack.templateOptions.metadata = {}; + } + + if (!this.cfnStack.templateOptions.metadata['AWS::CloudFormation::Interface']) { + this.cfnStack.templateOptions.metadata['AWS::CloudFormation::Interface'] = {}; + } + } + + /** + * Build the standard parameter groups structure + * Preserves existing parameter groups from parent class and adds AgentCore-specific groups + */ + private buildStandardParameterGroups(): any[] { + // Get existing parameter groups from parent class (if any) + const existingParameterGroups = + this.cfnStack.templateOptions.metadata!['AWS::CloudFormation::Interface'].ParameterGroups || []; + + // Create new parameter groups array starting with AgentCore groups + const parameterGroups: any[] = []; + + // 1. Base Configuration (always first) + parameterGroups.push({ + Label: { default: this.getBaseConfigurationGroupLabel() }, + Parameters: [this.enableLongTermMemory.logicalId] + }); + + // 2. Authentication Configuration (Internal) + parameterGroups.push({ + Label: { default: 'Authentication Configuration (Internal)' }, + Parameters: [this.cognitoUserPoolId.logicalId] + }); + + // 3. Use Case Specific Configuration (if provided by concrete class) + const useCaseSpecificGroup = this.getUseCaseSpecificParameterGroup(); + if (useCaseSpecificGroup) { + parameterGroups.push(useCaseSpecificGroup); + } + + // 4. Custom Image Configuration (Advanced) + const customImageParameter = this.getCustomImageParameter(); + if (customImageParameter) { + parameterGroups.push({ + Label: { default: 'Custom Image Configuration (Advanced)' }, + Parameters: [customImageParameter.logicalId] + }); + } + + // 6. Internal Configuration (system-managed parameters) + parameterGroups.push({ + Label: { default: 'Internal Configuration (System Managed)' }, + Parameters: [this.sharedEcrCachePrefix.logicalId] + }); + + // 7. Append any existing parameter groups from parent class (like VPC configuration) + parameterGroups.push(...existingParameterGroups); + + return parameterGroups; + } + + /** + * Get the label for the base configuration group + * Can be overridden by concrete classes for use case-specific naming + */ + protected getBaseConfigurationGroupLabel(): string { + return 'AgentCore Configuration'; + } + + /** + * Get use case-specific parameter group configuration + * Should be overridden by concrete classes to provide their specific parameters + * + * @returns Parameter group configuration or undefined if no use case-specific parameters + */ + protected getUseCaseSpecificParameterGroup(): { Label: { default: string }; Parameters: string[] } | undefined { + return undefined; + } + + /** + * Add parameter labels for better CloudFormation console UX + */ + private addParameterLabels(): void { + const parameterLabels = { + [this.enableLongTermMemory.logicalId]: 'Enable Long-Term Memory', + [this.cognitoUserPoolId.logicalId]: 'Cognito User Pool ID', + [this.sharedEcrCachePrefix.logicalId]: 'Shared ECR Cache Prefix' + }; + + // Add custom image parameter label if it exists + const customImageParameter = this.getCustomImageParameter(); + if (customImageParameter) { + parameterLabels[customImageParameter.logicalId] = 'Custom Image URI'; + } + + // Add use case-specific parameter labels + const additionalLabels = this.getUseCaseSpecificParameterLabels(); + Object.assign(parameterLabels, additionalLabels); + + this.cfnStack.templateOptions.metadata!['AWS::CloudFormation::Interface'].ParameterLabels = parameterLabels; + } + + /** + * Get use case-specific parameter labels + * Should be overridden by concrete classes to provide labels for their parameters + * + * @returns Object mapping parameter logical IDs to display labels + */ + protected getUseCaseSpecificParameterLabels(): Record { + return {}; + } + + /** + * Validate parameter relationships and constraints + * Called during parameter creation to ensure consistency + */ + protected validateParameterRelationships(): void { + this.validateMemoryConfiguration(); + this.validateAuthConfiguration(); + this.validateImageConfiguration(); + this.validateMcpConfiguration(); + } + + /** + * Validate memory configuration parameters + */ + private validateMemoryConfiguration(): void { + // Memory parameter validation is handled by allowedValues constraint + // Additional validation can be added here if needed + } + + /** + * Validate authentication configuration parameters + */ + private validateAuthConfiguration(): void { + // Cognito User Pool ID validation + // The parameter allows empty string for standalone deployments + // When provided, it should be a valid Cognito User Pool ID format + // Validation is enforced through CloudFormation conditions + } + + /** + * Validate image configuration parameters + */ + private validateImageConfiguration(): void { + // Custom image URI validation is handled by allowedPattern constraint + // The pattern allows either empty string or valid ECR URI format + // Additional cross-parameter validation can be added here + } + + /** + * Validate MCP server configuration parameters + */ + private validateMcpConfiguration(): void { + // MCP Server IDs validation + // CommaDelimitedList type ensures proper format + // Empty list is allowed for deployments without MCP integration + } + + /** + * Validate naming conventions for new parameters + * Ensures consistent parameter naming across all AgentCore stacks + * + * @param parameterName - The parameter name to validate + * @param parameterType - The parameter type + */ + protected validateParameterNaming(parameterName: string, parameterType: string): void { + // Validate parameter name follows PascalCase convention + if (!/^[A-Z][a-zA-Z0-9]*$/.test(parameterName)) { + throw new Error( + `Parameter name '${parameterName}' does not follow PascalCase naming convention. ` + + 'Parameter names should start with uppercase letter and use PascalCase.' + ); + } + + // Validate parameter type is supported + const supportedTypes = ['String', 'Number', 'CommaDelimitedList', 'List', 'List']; + if (!supportedTypes.includes(parameterType)) { + throw new Error( + `Parameter type '${parameterType}' is not supported. ` + `Supported types: ${supportedTypes.join(', ')}` + ); + } + } + + /** + * Get enhanced constraint description for custom image parameters + * Provides detailed validation information for ECR URI parameters + * + * @param useCaseType - The use case type for context-specific messaging + */ + protected getCustomImageConstraintDescription(useCaseType: string): string { + return ( + `Must be a valid ECR image URI in the format: ` + + `123456789012.dkr.ecr.region.amazonaws.com/repository:tag ` + + `or empty to use default ${useCaseType} image resolution. ` + + `The ECR repository must be accessible from the deployment region.` + ); + } +} + +/** + * Abstract base stack for AgentCore use cases (agents and workflows) + * + * This abstract class provides common functionality for AgentCore deployments including: + * - AgentCore component setup (execution role, runtime deployment, invocation lambda, ECR cache) + * - Authentication components (Cognito app client, OAuth setup, auth policies) + * - Image URI resolution with deployment mode handling + * - Conditional inference profile support + * - Common CloudFormation outputs + * + * Concrete implementations must provide use case-specific behavior through abstract methods. + * + * IMPORTANT: Amazon Bedrock AgentCore (preview service) does not support VPC deployments. + * All Amazon Bedrock AgentCore components run in non-VPC mode regardless of the + * deployment platform's VPC configuration. VPC support will be added in future releases. + */ +export abstract class AgentCoreBaseStack extends UseCaseStack { + /** + * AgentCore execution role helper + */ + protected agentExecutionRole: AgentExecutionRole; + + /** + * AgentCore memory deployment helper + */ + protected agentMemoryDeployment: AgentMemoryDeployment; + + /** + * AgentCore runtime deployment helper + */ + protected agentRuntimeDeployment: AgentRuntimeDeployment; + + /** + * Agent invocation Lambda helper + */ + protected agentInvocationLambda: AgentInvocationLambda; + + /** + * ECR Pull-Through Cache helper + */ + protected ecrPullThroughCache: ECRPullThroughCache; + + /** + * Component Cognito App Client for authentication + */ + protected componentAppClient: ComponentCognitoAppClient; + + /** + * OAuth client custom resource for AgentCore authentication + */ + protected oauthClient: cdk.CustomResource; + + constructor(scope: Construct, id: string, props: BaseStackProps) { + super(scope, id, props); + this.withAdditionalResourceSetup(props); + this.withMetrics(props); + } + + protected withAdditionalResourceSetup(props: BaseStackProps): void { + // Set up AgentCore components first, before calling super which calls llmProviderSetup() + this.setupAgentCoreComponents(); + super.withAdditionalResourceSetup(props); + + // Update AgentCore runtime with multimodal properties after multimodal setup is complete + this.updateAgentRuntimeWithMultimodalProperties(); + } + + /** + * Update the AgentCore runtime deployment with multimodal properties + * This method should be called after the multimodal setup is complete + */ + private updateAgentRuntimeWithMultimodalProperties(): void { + // Always call this method, but use CloudFormation conditions to determine the values + if (this.agentRuntimeDeployment) { + // Use CloudFormation functions to conditionally set multimodal properties + // When multimodal is disabled, these will resolve to empty strings + const multimodalDataMetadataTableName = cdk.Fn.conditionIf( + this.multimodalEnabledCondition.logicalId, + cdk.Fn.conditionIf( + this.createMultimodalResourcesCondition.logicalId, + this.multimodalSetup.multimodalDataMetadataTable.tableName, + this.stackParameters.existingMultimodalDataMetadataTable.valueAsString + ), + '' + ).toString(); + + const multimodalDataBucketName = cdk.Fn.conditionIf( + this.multimodalEnabledCondition.logicalId, + cdk.Fn.conditionIf( + this.createMultimodalResourcesCondition.logicalId, + this.multimodalSetup.multimodalDataBucket.bucketName, + this.stackParameters.existingMultimodalDataBucket.valueAsString + ), + '' + ).toString(); + + this.agentRuntimeDeployment.updateMultimodalProperties( + multimodalDataMetadataTableName, + multimodalDataBucketName + ); + + // Add multimodal permissions to the agent execution role using CloudFormation conditions + // This creates the permissions conditionally based on the multimodal parameter + const agentCoreParams = this.stackParameters as AgentCoreBaseParameters; + + // Create multimodal permissions policy conditionally + this.addConditionalMultimodalPermissions( + multimodalDataMetadataTableName, + multimodalDataBucketName, + agentCoreParams.useCaseUUID.valueAsString + ); + } + } + + /** + * Add multimodal permissions to the agent execution role using CloudFormation conditions + * This follows the same pattern as other conditional resources in the deployment platform + */ + private addConditionalMultimodalPermissions( + multimodalDataMetadataTableName: string, + multimodalDataBucketName: string, + useCaseUUID: string + ): void { + // Create a conditional multimodal permissions policy + const multimodalPermissionsPolicy = new iam.Policy(this, 'AgentCoreMultimodalPermissionsPolicy', { + statements: [ + new iam.PolicyStatement({ + sid: 'multimodalMetadataAccess', + effect: iam.Effect.ALLOW, + actions: ['dynamodb:GetItem'], + resources: [ + `arn:${cdk.Aws.PARTITION}:dynamodb:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:table/${multimodalDataMetadataTableName}` + ] + }), + new iam.PolicyStatement({ + sid: 'MultimodalDataBucketAccess', + effect: iam.Effect.ALLOW, + actions: ['s3:GetObject'], + resources: [`arn:${cdk.Aws.PARTITION}:s3:::${multimodalDataBucketName}/${useCaseUUID}/*`] + }) + ] + }); + + // Apply the multimodal enabled condition to the policy + (multimodalPermissionsPolicy.node.defaultChild as cdk.CfnResource).cfnOptions.condition = + this.multimodalEnabledCondition; + + // Attach the policy to the agent execution role + this.agentExecutionRole.role.attachInlinePolicy(multimodalPermissionsPolicy); + NagSuppressions.addResourceSuppressions(multimodalPermissionsPolicy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'Wildcard permission required to modify AgentCore Auth Table with name from SSM parameter', + appliesTo: [ + 'Resource::arn::s3:::{"Fn::If":["MultimodalEnabledCondition",{"Fn::If":["CreateMultimodalResourcesCondition",{"Ref":"MultimodalSetupFactoriesMultimodalDataBucketS3Bucket2540B5CC"},{"Ref":"ExistingMultimodalDataBucket"}]},""]}//*' + ] + } + ]); + } + + /** + * Abstract method to get the image name for this use case type + * Used for ECR image URI resolution + * + * @returns The image name (e.g., 'gaab-strands-agent', 'gaab-strands-workflow') + */ + public abstract getImageName(): string; + + /** + * Abstract method to get the use case type for this stack + * Used for component configuration and CloudFormation outputs + * + * @returns The use case type enum value + */ + public abstract getUseCaseType(): USE_CASE_TYPES; + + /** + * Abstract method to get the WebSocket route name for this use case + * Used for WebSocket API route configuration + * + * @returns The WebSocket route name (e.g., 'invokeAgentCore', 'invokeWorkflow') + */ + public abstract getWebSocketRouteName(): string; + + /** + * Abstract method to get the LLM provider name for this use case + * Used for provider identification and configuration + * + * @returns The chat provider enum value + */ + public abstract getLlmProviderName(): CHAT_PROVIDERS; + + /** + * Abstract method to get the agent runtime name pattern for this use case + * Used for AgentCore runtime deployment naming + * + * @returns The runtime name pattern (e.g., 'gaab_agent_${useCaseShortId}', 'gaab_workflow_${useCaseShortId}') + */ + public abstract getAgentRuntimeName(): string; + + /** + * Abstract method to determine if this use case supports inference profiles + * Used for conditional inference profile resource creation + * + * @returns True if inference profiles should be supported, false otherwise + */ + public abstract shouldIncludeInferenceProfileSupport(): boolean; + + /** + * Set up WebSocket routes for use case invocation + * Uses the abstract getWebSocketRouteName() method for route configuration + */ + protected getWebSocketRoutes(): Map { + return new Map().set(this.getWebSocketRouteName(), this.chatLlmProviderLambda); + } + + /** + * Set up all AgentCore components using helper classes + * This method orchestrates the creation of all common AgentCore infrastructure + */ + private setupAgentCoreComponents(): void { + const agentCoreParams = this.stackParameters as AgentCoreBaseParameters; + + // Create memory deployment first to get memory ID + this.agentMemoryDeployment = new AgentMemoryDeployment(this, 'AgentMemoryDeployment', { + customResourceLambda: this.applicationSetup.customResourceLambda, + enableLongTermMemory: agentCoreParams.enableLongTermMemory.valueAsString, + agentRuntimeName: this.getAgentRuntimeName() + }); + + // Create agent execution role with memory ID + this.agentExecutionRole = new AgentExecutionRole(this, 'AgentExecutionRole', { + useCaseConfigTableName: agentCoreParams.useCaseConfigTableName.valueAsString, + memoryId: this.agentMemoryDeployment.getMemoryId() + }); + + // Set up conditional inference profile support if enabled for this use case type + if (this.shouldIncludeInferenceProfileSupport()) { + const inferenceProfileProvidedCondition = new cdk.CfnCondition(this, 'InferenceProfileProvidedCondition', { + expression: cdk.Fn.conditionEquals(agentCoreParams.useInferenceProfile.valueAsString, 'Yes') + }); + + this.agentExecutionRole.addInferenceProfileSupport( + this.applicationSetup.customResourceLambda, + this.applicationSetup.customResourceRole, + agentCoreParams.useCaseConfigTableName.valueAsString, + agentCoreParams.useCaseConfigRecordKey.valueAsString, + inferenceProfileProvidedCondition + ); + } + + const solutionVersion = process.env.VERSION ?? this.node.tryGetContext('solution_version'); + + const isStandaloneDeploymentCondition = new cdk.CfnCondition(this, 'IsStandaloneDeploymentCondition', { + expression: cdk.Fn.conditionEquals( + agentCoreParams.stackDeploymentSource, + StackDeploymentSource.STANDALONE_USE_CASE + ) + }); + + // Set up ECR Pull-Through Cache for standalone deployments + this.ecrPullThroughCache = new ECRPullThroughCache(this, 'ECRPullThroughCache', { + gaabVersion: solutionVersion, + customResourceLambda: this.applicationSetup.customResourceLambda, + useCaseShortId: agentCoreParams.useCaseShortId + }); + + if (this.ecrPullThroughCache && this.ecrPullThroughCache.pullThroughCacheRule) { + const ecrCacheRule = this.ecrPullThroughCache.pullThroughCacheRule; + if (ecrCacheRule.cfnOptions) { + ecrCacheRule.cfnOptions.condition = isStandaloneDeploymentCondition; + } + } + + // Resolve image URI using centralized logic + const imageUri = this.getImageUri(); + + // Set up Component App Client for authentication (if User Pool ID is provided) + this.setupComponentAppClient(agentCoreParams); + + // Set up AgentCore runtime deployment + this.agentRuntimeDeployment = new AgentRuntimeDeployment(this, 'AgentRuntimeDeployment', { + customResourceLambda: this.applicationSetup.customResourceLambda, + agentExecutionRole: this.agentExecutionRole.role, + agentRuntimeName: this.getAgentRuntimeName(), + agentImageUri: imageUri, + useCaseUUID: agentCoreParams.useCaseUUID.valueAsString, + useCaseConfigTableName: agentCoreParams.useCaseConfigTableName.valueAsString, + useCaseConfigRecordKey: agentCoreParams.useCaseConfigRecordKey.valueAsString, + cognitoUserPoolId: agentCoreParams.cognitoUserPoolId.valueAsString, + additionalProperties: { + UseCaseType: this.getUseCaseType(), + MemoryId: this.agentMemoryDeployment.getMemoryId(), + MemoryStrategyId: this.agentMemoryDeployment.getMemoryStrategyId() + } + }); + + // Create AgentCore outbound permissions custom resource for MCP server integration + new cdk.CustomResource(this, 'AgentCoreOutboundPermissions', { + resourceType: 'Custom::AgentCoreOutboundPermissions', + serviceToken: this.applicationSetup.customResourceLambda.functionArn, + properties: { + Resource: 'AGENTCORE_OUTBOUND_PERMISSIONS', + USE_CASE_ID: this.stackParameters.useCaseShortId, + USE_CASE_CLIENT_ID: this.componentAppClient.getClientId(), + USE_CASE_CONFIG_TABLE_NAME: agentCoreParams.useCaseConfigTableName.valueAsString, + USE_CASE_CONFIG_RECORD_KEY: agentCoreParams.useCaseConfigRecordKey.valueAsString + } + }); + + new LambdaToDynamoDB(this, 'AgentCoreOutboundPermissionsLambdaToDynamoDB', { + existingLambdaObj: this.applicationSetup.customResourceLambda, + existingTableObj: dynamodb.Table.fromTableName( + this, + 'UseCaseConfigTable', + agentCoreParams.useCaseConfigTableName.valueAsString + ) as dynamodb.Table, + tablePermissions: 'Read' + }); + + // Set up authentication policies and OAuth client + this.createCustomResourceAuthPolicy(agentCoreParams); + this.setupOAuthClient(agentCoreParams); + } + + /** + * Get image URI using centralized image resolution logic + * Uses abstract methods to get use case-specific image name and custom image parameter + */ + private getImageUri(): string { + const agentCoreParams = this.stackParameters as AgentCoreBaseParameters; + + try { + const deploymentMode = determineDeploymentMode(); + const solutionVersion = process.env.VERSION ?? this.node.tryGetContext('solution_version'); + + if (!solutionVersion) { + throw new ECRImageError( + 'GAAB version is required for image URI resolution. Set VERSION environment variable or solution_version context.', + 'resolution', + { deploymentMode } + ); + } + + // Sanitize version to avoid double 'v' prefix and add local suffix + const sanitizedVersion = sanitizeVersionTag(solutionVersion, deploymentMode); + + const context: ImageResolutionContext = { + deploymentMode, + gaabVersion: sanitizedVersion, + customImageUri: agentCoreParams.getCustomImageParameter()?.valueAsString, + sharedEcrCachePrefix: agentCoreParams.sharedEcrCachePrefix?.valueAsString, + useCaseShortId: agentCoreParams.useCaseShortId + }; + + // Use centralized resolver with CloudFormation conditions + return resolveImageUriWithConditions( + this, + this.getImageName(), + context, + agentCoreParams.getCustomImageParameter(), + agentCoreParams.sharedEcrCachePrefix, + this.stackParameters.stackDeploymentSource, + this.ecrPullThroughCache.getCachedImageUri() + ); + } catch (error) { + if (error instanceof ECRImageError) { + throw new Error( + `Image URI resolution failed: ${error.message}\nContext: ${JSON.stringify(error.context)}` + ); + } + throw new Error(`Failed to resolve image URI: ${error instanceof Error ? error.message : 'Unknown error'}`); + } + } + + /** + * Set up Component Cognito App Client for authentication + */ + private setupComponentAppClient(agentCoreParams: AgentCoreBaseParameters): void { + const createAppClientCondition = new cdk.CfnCondition(this, 'CreateAppClientCondition', { + expression: cdk.Fn.conditionNot(cdk.Fn.conditionEquals(agentCoreParams.cognitoUserPoolId.valueAsString, '')) + }); + + const userPool = cognito.UserPool.fromUserPoolId( + this, + 'UserPool', + agentCoreParams.cognitoUserPoolId.valueAsString + ); + + this.componentAppClient = new ComponentCognitoAppClient(this, 'ComponentAppClient', { + userPool: userPool, + useCaseShortId: agentCoreParams.useCaseShortId, + componentType: ComponentType.AGENT // This could be made abstract if needed + }); + + // Apply the condition to the underlying CfnUserPoolClient resource + const cfnAppClient = this.componentAppClient.node.findChild('ComponentAppClient') as cognito.CfnUserPoolClient; + cfnAppClient.cfnOptions.condition = createAppClientCondition; + } + + /** + * Create and attach auth lambda permissions policy for custom resource + */ + private createCustomResourceAuthPolicy(agentCoreParams: AgentCoreBaseParameters): void { + const customResourceAuthPolicy = new iam.Policy(this, 'CustomResourceAuthPolicy', { + statements: [ + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'bedrock-agentcore:CreateOauth2CredentialProvider', + 'bedrock-agentcore:DeleteOauth2CredentialProvider', + 'bedrock-agentcore:CreateTokenVault' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:token-vault/default`, + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:token-vault/default/*` + ] + }), + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['secretsmanager:CreateSecret', 'secretsmanager:DeleteSecret'], + resources: [ + `arn:${cdk.Aws.PARTITION}:secretsmanager:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:secret:bedrock-agentcore*` + ] + }), + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['iam:PassRole'], + resources: [`arn:${cdk.Aws.PARTITION}:iam::${cdk.Aws.ACCOUNT_ID}:role/*`], + conditions: { + 'ForAllValues:StringEquals': { + 'aws:TagKeys': ['createdVia', 'userId'] + }, + 'StringEquals': { + 'iam:PassedToService': 'bedrock-agentcore.amazonaws.com' + } + } + }), + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'bedrock-agentcore:UpdateGateway', + 'bedrock-agentcore:GetGateway', + 'bedrock-agentcore:UpdateAgentRuntime', + 'bedrock-agentcore:GetAgentRuntime', + 'bedrock-agentcore:ListTagsForResource', + 'bedrock-agentcore:TagResource', + 'bedrock-agentcore:UntagResource' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:runtime/*`, + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:runtime/*/runtime-endpoint/*`, + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:gateway/*` + ] + }), + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['iam:CreateServiceLinkedRole'], + resources: [ + `arn:${cdk.Aws.PARTITION}:iam::${cdk.Aws.ACCOUNT_ID}:role/aws-service-role/runtime-identity.bedrock-agentcore.amazonaws.com/AWSServiceRoleForBedrockAgentCoreRuntimeIdentity` + ], + conditions: { + StringEquals: { 'iam:AWSServiceName': 'runtime-identity.bedrock-agentcore.amazonaws.com' } + } + }) + ] + }); + + customResourceAuthPolicy.attachToRole(this.applicationSetup.customResourceRole); + + NagSuppressions.addResourceSuppressions(customResourceAuthPolicy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'Wildcard permission required to modify AgentCore Auth Table with name from SSM parameter', + appliesTo: [ + 'Resource::arn::dynamodb:::table/*AgentCorePermissionStore*' + ] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'Wildcard permission required to modify AgentCore Runtimes and Gateways to add permissions', + appliesTo: [ + 'Resource::arn::bedrock-agentcore:::runtime/*', + 'Resource::arn::bedrock-agentcore:::runtime/*/runtime-endpoint/*', + 'Resource::arn::bedrock-agentcore:::gateway/*' + ] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'Wildcard permission required to pass in role for AgentCore Runtime/Gateway updates through Custom Resource', + appliesTo: ['Resource::arn::iam:::role/*'] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'Wildcard permission required for OAuth2 credential provider operations with dynamic provider names', + appliesTo: [ + 'Resource::arn::bedrock-agentcore:::token-vault/default/*', + 'Resource::arn::secretsmanager:::secret:bedrock-agentcore*' + ] + } + ]); + + const createAppClientCondition = this.node.findChild('CreateAppClientCondition') as cdk.CfnCondition; + (customResourceAuthPolicy.node.defaultChild as cdk.CfnResource).cfnOptions.condition = createAppClientCondition; + } + + /** + * Set up OAuth client for AgentCore authentication + */ + private setupOAuthClient(agentCoreParams: AgentCoreBaseParameters): void { + this.oauthClient = new cdk.CustomResource(this, 'AgentCoreOAuthClient', { + serviceToken: this.applicationSetup.customResourceLambda.functionArn, + properties: { + Resource: 'AGENTCORE_OAUTH_CLIENT', + CLIENT_ID: this.componentAppClient.getClientId(), + CLIENT_SECRET: this.componentAppClient.getClientSecret(), + DISCOVERY_URL: `https://cognito-idp.${cdk.Aws.REGION}.amazonaws.com/${agentCoreParams.cognitoUserPoolId.valueAsString}/.well-known/openid-configuration`, + PROVIDER_NAME: `gaab-oauth-provider-${this.stackParameters.useCaseShortId}` + } + }); + + const createAppClientCondition = this.node.findChild('CreateAppClientCondition') as cdk.CfnCondition; + (this.oauthClient.node.defaultChild as cdk.CfnResource).cfnOptions.condition = createAppClientCondition; + } + + /** + * Set up Lambda for AgentCore invocation using helper class + * This method is called by the parent UseCaseStack during llmProviderSetup() + */ + public llmProviderSetup(): void { + const agentCoreParams = this.stackParameters as AgentCoreBaseParameters; + + this.agentInvocationLambda = new AgentInvocationLambda(this, 'AgentInvocationLambda', { + agentRuntimeArn: this.agentRuntimeDeployment.getAgentRuntimeArn(), + useCaseUUID: agentCoreParams.useCaseUUID.valueAsString + }); + + this.chatLlmProviderLambda = this.agentInvocationLambda.function; + + this.addStackOutputs(); + } + + /** + * Add CloudFormation outputs for the AgentCore deployment + * Uses abstract methods to provide use case-specific output names + * Can be overridden by concrete classes for custom output descriptions + */ + protected addStackOutputs(): void { + const useCaseType = this.getUseCaseType(); + const outputPrefix = useCaseType === USE_CASE_TYPES.AGENT_BUILDER ? 'Agent' : 'Workflow'; + + new cdk.CfnOutput(this, `${outputPrefix}RuntimeArn`, { + value: this.agentRuntimeDeployment.getAgentRuntimeArn(), + description: `ARN of the deployed Agentcore Runtime` + }); + + new cdk.CfnOutput(this, `${outputPrefix}ExecutionRoleArn`, { + value: this.agentExecutionRole.role.roleArn, + description: `ARN of the Agentcore execution role` + }); + + new cdk.CfnOutput(this, `${outputPrefix}InvocationLambdaArn`, { + value: this.agentInvocationLambda.function.functionArn, + description: `ARN of the ${useCaseType} invocation Lambda function` + }); + + new cdk.CfnOutput(this, `${outputPrefix}MemoryId`, { + value: this.agentMemoryDeployment.getMemoryId(), + description: `ID of the deployed ${useCaseType} Memory` + }); + + new cdk.CfnOutput(this, `${outputPrefix}ComponentAppClientId`, { + value: this.componentAppClient.getClientId(), + description: `Cognito App Client ID for the component authentication` + }); + } +} diff --git a/source/infrastructure/lib/use-case-stacks/agent-core/components/agent-execution-role.ts b/source/infrastructure/lib/use-case-stacks/agent-core/components/agent-execution-role.ts new file mode 100644 index 00000000..4bbed894 --- /dev/null +++ b/source/infrastructure/lib/use-case-stacks/agent-core/components/agent-execution-role.ts @@ -0,0 +1,445 @@ +#!/usr/bin/env node +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as iam from 'aws-cdk-lib/aws-iam'; +import * as lambda from 'aws-cdk-lib/aws-lambda'; +import { Construct } from 'constructs'; +import { NagSuppressions } from 'cdk-nag'; + +/** + * Properties for Agent Execution Role + */ +export interface AgentExecutionRoleProps { + /** + * Use case configuration table name for scoped DynamoDB permissions + */ + useCaseConfigTableName: string; + + /** + * Use cases table name for workflow agent discovery (optional) + * When provided, enables workflows to query use cases table for agent configurations + */ + useCasesTableName?: string; + + /** + * Memory ID for scoped memory permissions (optional) + * When provided, grants access to specific memory instance instead of wildcard + */ + memoryId?: string; +} + +/** + * Helper class to create and configure the AgentCore Runtime execution role + * with all required permissions as specified in the GAAB v4.0.0 design document + */ +export class AgentExecutionRole extends Construct { + public readonly role: iam.Role; + + constructor(scope: Construct, id: string, props: AgentExecutionRoleProps) { + super(scope, id); + + this.role = this.createExecutionRole(props); + this.addSecurityConditions(); + this.addNagSuppressions(); + if (props.memoryId) { + // This permission is separated out as a separate Policy to ensure that no we don't create a dependency between memory and runtime deployment. + const memoryPolicy = this.createAgentCoreMemoryPolicy(props.memoryId); + memoryPolicy.attachToRole(this.role); + } + } + + /** + * Create the AgentCore Runtime execution role with comprehensive permissions + */ + private createExecutionRole(props: AgentExecutionRoleProps): iam.Role { + const statements = [ + this.createECRPermissions(), + this.createCloudWatchLogsPermissions(), + this.createXRayPermissions(), + this.createCloudWatchMetricsPermissions(), + this.createWorkloadIdentityPermissions(), // Includes OAuth2 token permissions + this.createSecretsManagerPermissions(), + this.createBedrockPermissions(), + this.createBedrockGuardrailPermissions(), + this.createDynamoDBPermissions(props.useCaseConfigTableName, props.useCasesTableName) + ]; + + return new iam.Role(this, 'AgentCoreRuntimeExecutionRole', { + assumedBy: new iam.ServicePrincipal('bedrock-agentcore.amazonaws.com'), + description: 'Execution role for AgentCore Runtime', + inlinePolicies: { + AgentCoreRuntimePolicy: new iam.PolicyDocument({ + statements: statements + }) + } + }); + } + + /** + * Create the AgentCore Runtime execution role with memory access permissions + * @param memoryId The AgentCore memory instance to be access + * @returns An IAM policy providing the permission. + */ + private createAgentCoreMemoryPolicy(memoryId: string): iam.Policy { + return new iam.Policy(this, 'AgentCoreRuntimeMemoryPolicy', { + statements: [this.createAgentCoreMemoryPermissions(memoryId)] + }); + } + + /** + * Create ECR permissions for image access + */ + private createECRPermissions(): iam.PolicyStatement { + return new iam.PolicyStatement({ + sid: 'ECRAccess', + effect: iam.Effect.ALLOW, + actions: ['ecr:BatchGetImage', 'ecr:GetDownloadUrlForLayer', 'ecr:GetAuthorizationToken'], + resources: [ + `arn:${cdk.Aws.PARTITION}:ecr:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:repository/gaab-agents-*/*`, + '*' // GetAuthorizationToken requires wildcard + ] + }); + } + + /** + * Create comprehensive CloudWatch Logs permissions + */ + private createCloudWatchLogsPermissions(): iam.PolicyStatement { + return new iam.PolicyStatement({ + sid: 'CloudWatchLogs', + effect: iam.Effect.ALLOW, + actions: [ + 'logs:CreateLogGroup', + 'logs:CreateLogStream', + 'logs:PutLogEvents', + 'logs:DescribeLogStreams', + 'logs:DescribeLogGroups' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:logs:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:log-group:/aws/bedrock-agentcore/runtimes/*`, + `arn:${cdk.Aws.PARTITION}:logs:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:log-group:/aws/bedrock-agentcore/runtimes/*:log-stream:*`, + `arn:${cdk.Aws.PARTITION}:logs:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:log-group:*` + ] + }); + } + + /** + * Create X-Ray tracing permissions + */ + private createXRayPermissions(): iam.PolicyStatement { + return new iam.PolicyStatement({ + sid: 'XRayTracing', + effect: iam.Effect.ALLOW, + actions: [ + 'xray:PutTraceSegments', + 'xray:PutTelemetryRecords', + 'xray:GetSamplingRules', + 'xray:GetSamplingTargets' + ], + resources: ['*'] + }); + } + + /** + * Create CloudWatch metrics permissions + */ + private createCloudWatchMetricsPermissions(): iam.PolicyStatement { + return new iam.PolicyStatement({ + sid: 'CloudWatchMetrics', + effect: iam.Effect.ALLOW, + actions: ['cloudwatch:PutMetricData'], + resources: ['*'], + conditions: { + StringEquals: { + 'cloudwatch:namespace': 'bedrock-agentcore' + } + } + }); + } + + /** + * Create AgentCore workload identity and OAuth2 token permissions + * Includes both workload identity management and OAuth2 token access + * GetResourceOauth2Token requires access to both workload-identity and token-vault resources + */ + private createWorkloadIdentityPermissions(): iam.PolicyStatement { + return new iam.PolicyStatement({ + sid: 'AgentCoreWorkloadIdentity', + effect: iam.Effect.ALLOW, + actions: [ + 'bedrock-agentcore:CreateWorkloadIdentity', + 'bedrock-agentcore:GetWorkloadAccessToken', + 'bedrock-agentcore:GetWorkloadAccessTokenForJWT', + 'bedrock-agentcore:GetWorkloadAccessTokenForUserId', + 'bedrock-agentcore:GetResourceOauth2Token' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:workload-identity-directory/default`, + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:workload-identity-directory/default/workload-identity/*`, + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:token-vault/default`, + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:token-vault/default/oauth2credentialprovider/*` + ] + }); + } + + /** + * Create Secrets Manager permissions for AgentCore identity secrets + * Scoped to secrets with the bedrock-agentcore-identity! prefix for OAuth2 credentials + */ + private createSecretsManagerPermissions(): iam.PolicyStatement { + return new iam.PolicyStatement({ + sid: 'SecretsManagerAccess', + effect: iam.Effect.ALLOW, + actions: ['secretsmanager:GetSecretValue'], + resources: [ + `arn:${cdk.Aws.PARTITION}:secretsmanager:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:secret:bedrock-agentcore-identity!*` + ] + }); + } + + /** + * Create Bedrock model invocation permissions + * Provides baseline permissions for foundation models in the deployment region + */ + private createBedrockPermissions(): iam.PolicyStatement { + return new iam.PolicyStatement({ + sid: 'BedrockModelInvocation', + effect: iam.Effect.ALLOW, + actions: ['bedrock:InvokeModel', 'bedrock:InvokeModelWithResponseStream'], + resources: [ + cdk.Fn.join('', ['arn:', cdk.Aws.PARTITION, ':bedrock:', cdk.Aws.REGION, '::foundation-model/*']) + ] + }); + } + + /** + * Create Bedrock Guardrail permissions for content filtering and safety + */ + private createBedrockGuardrailPermissions(): iam.PolicyStatement { + return new iam.PolicyStatement({ + sid: 'BedrockGuardrailAccess', + effect: iam.Effect.ALLOW, + actions: ['bedrock:ApplyGuardrail'], + resources: [`arn:${cdk.Aws.PARTITION}:bedrock:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:guardrail/*`] + }); + } + + /** + * Create AgentCore Memory permissions for event and semantic memory operations if memoryId exists + */ + private createAgentCoreMemoryPermissions(memoryId: string): iam.PolicyStatement { + return new iam.PolicyStatement({ + sid: 'AgentCoreMemoryAccess', + effect: iam.Effect.ALLOW, + actions: [ + 'bedrock-agentcore:CreateEvent', + 'bedrock-agentcore:ListEvents', + 'bedrock-agentcore:RetrieveMemoryRecords', + 'bedrock-agentcore:GetEvent' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:memory/${memoryId}` + ] + }); + } + + /** + * Create DynamoDB permissions for use case configuration access + * Scoped to the specific configuration table and optionally use cases table for workflows + */ + private createDynamoDBPermissions(useCaseConfigTableName: string, useCasesTableName?: string): iam.PolicyStatement { + const resources = [ + `arn:${cdk.Aws.PARTITION}:dynamodb:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:table/${useCaseConfigTableName}` + ]; + + // Add use cases table access for workflows to discover agent configurations + if (useCasesTableName) { + resources.push( + `arn:${cdk.Aws.PARTITION}:dynamodb:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:table/${useCasesTableName}` + ); + } + + return new iam.PolicyStatement({ + sid: 'DynamoDBConfigAccess', + effect: iam.Effect.ALLOW, + actions: ['dynamodb:GetItem', 'dynamodb:Query'], + resources: resources + }); + } + + /** + * Add enhanced security conditions to the assume role policy + */ + private addSecurityConditions(): void { + this.role.assumeRolePolicy?.addStatements( + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + principals: [new iam.ServicePrincipal('bedrock-agentcore.amazonaws.com')], + actions: ['sts:AssumeRole'], + conditions: { + StringEquals: { + 'aws:SourceAccount': cdk.Aws.ACCOUNT_ID + }, + ArnLike: { + 'aws:SourceArn': `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:*:*:*` + } + } + }) + ); + } + + /** + * Add CDK NAG suppressions for wildcard permissions + */ + private addNagSuppressions(): void { + // Add comprehensive suppressions for all wildcard permissions used by AgentCore Runtime + NagSuppressions.addResourceSuppressions( + this.role, + [ + { + id: 'AwsSolutions-IAM5', + reason: 'AgentCore Runtime requires wildcard permissions for ECR GetAuthorizationToken, CloudWatch Logs, X-Ray tracing, and CloudWatch metrics as specified in GAAB v4.0.0 design document.', + appliesTo: ['Resource::*'] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'Bedrock Guardrail access requires wildcard to support all guardrails in the account. Required to enable single runtime workflows to invoke all underlying guardrails of the specialized agents.', + appliesTo: ['Resource::arn::bedrock:::guardrail/*'] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'Secrets Manager wildcard is scoped to bedrock-agentcore-identity! prefix for OAuth2 credentials. The full secret ID is not known at stack creation time as it is dynamically created by AgentCore.', + appliesTo: [ + 'Resource::arn::secretsmanager:::secret:bedrock-agentcore-identity!*' + ] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'AgentCore Runtime requires broad permissions for CloudWatch Logs, ECR repositories, Bedrock foundation models, Bedrock Guardrails, Bedrock AgentCore workload identity, OAuth2 token vault (including both base and credential provider paths), and AgentCore Memory.', + appliesTo: [ + 'Resource::arn::bedrock:::foundation-model/*', + 'Resource::arn::bedrock:::guardrail/*', + 'Resource::arn::logs:::log-group:*', + 'Resource::arn::logs:::log-group:/aws/bedrock-agentcore/runtimes/*', + 'Resource::arn::logs:::log-group:/aws/bedrock-agentcore/runtimes/*:log-stream:*', + 'Resource::arn::ecr:::repository/gaab-agents-*/*', + 'Resource::arn::bedrock-agentcore:::workload-identity-directory/default/workload-identity/*', + 'Resource::arn::bedrock-agentcore:::token-vault/default/oauth2credentialprovider/*', + 'Resource::arn::bedrock-agentcore:::memory/*' + ] + } + ], + true // Apply to children + ); + } + + /** + * Add inference profile support for cross-region model access + * + * This method creates a custom resource that resolves specific model ARNs from the + * inference profile configuration stored in DynamoDB. If an inference profile is configured, + * it adds additional permissions for the specific models (which may be in different regions). + * + * The custom resource and policy are conditionally created based on the provided condition. + * + * @param customResourceLambda - Lambda function for custom resource operations + * @param customResourceRole - IAM role for the custom resource lambda + * @param useCaseConfigTableName - DynamoDB table containing use case configuration + * @param useCaseConfigRecordKey - Key for the specific use case configuration record + * @param condition - CloudFormation condition to control whether resources are created + * @returns Custom resource that resolves inference profile model ARNs + */ + public addInferenceProfileSupport( + customResourceLambda: lambda.IFunction, + customResourceRole: iam.IRole, + useCaseConfigTableName: string, + useCaseConfigRecordKey: string, + condition: cdk.CfnCondition + ): cdk.CustomResource { + // Grant custom resource permissions to get inference profile information + const getInferenceProfilePolicy = new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['bedrock:GetInferenceProfile'], + resources: [`arn:${cdk.Aws.PARTITION}:bedrock:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:inference-profile/*`] + }); + + // Grant custom resource permissions to read use case configuration from DynamoDB + const customResourceUseCaseTablePolicy = new iam.PolicyStatement({ + actions: ['dynamodb:GetItem'], + resources: [ + `arn:${cdk.Aws.PARTITION}:dynamodb:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:table/${useCaseConfigTableName}` + ], + conditions: { + 'ForAllValues:StringEquals': { + 'dynamodb:LeadingKeys': [useCaseConfigRecordKey] + } + }, + effect: iam.Effect.ALLOW + }); + + // Attach policies to custom resource role + // Cast to Role to access addToPolicy method + const role = customResourceRole as iam.Role; + role.addToPolicy(getInferenceProfilePolicy); + role.addToPolicy(customResourceUseCaseTablePolicy); + + // Add CDK Nag suppression for the custom resource role's inference profile permissions + NagSuppressions.addResourceSuppressions( + role, + [ + { + id: 'AwsSolutions-IAM5', + reason: 'Custom resource requires GetInferenceProfile permission with wildcard to resolve inference profile model ARNs. The specific inference profile IDs are not known at deployment time and are configured by users.', + appliesTo: [ + 'Resource::arn::bedrock:::inference-profile/*' + ] + } + ], + true // Apply to children (including the DefaultPolicy) + ); + + // Create custom resource to resolve model ARNs from inference profile + const inferenceProfileArnsForPolicy = new cdk.CustomResource(this, 'GetModelResourceArns', { + resourceType: 'Custom::GetModelResourceArns', + serviceToken: customResourceLambda.functionArn, + properties: { + Resource: 'GET_MODEL_RESOURCE_ARNS', + USE_CASE_CONFIG_TABLE_NAME: useCaseConfigTableName, + USE_CASE_CONFIG_RECORD_KEY: useCaseConfigRecordKey + } + }); + + // Create policy with resolved model ARNs from inference profile + // This allows cross-region model access when using inference profiles + const inferenceProfileModelPolicy = new iam.Policy(this, 'InferenceProfileModelPolicy', { + statements: [ + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['bedrock:InvokeModelWithResponseStream', 'bedrock:InvokeModel'], + resources: cdk.Fn.split(',', inferenceProfileArnsForPolicy.getAttString('Arns')) + }) + ] + }); + + // Apply the condition to both the custom resource and the policy + // They are only created when UseInferenceProfile parameter is set to 'Yes' + (inferenceProfileModelPolicy.node.defaultChild as cdk.CfnResource).cfnOptions.condition = condition; + (inferenceProfileArnsForPolicy.node.defaultChild as cdk.CfnResource).cfnOptions.condition = condition; + + this.role.attachInlinePolicy(inferenceProfileModelPolicy); + inferenceProfileArnsForPolicy.node.addDependency(this.role); + + // Add NAG suppressions for the inference profile policy + NagSuppressions.addResourceSuppressions(inferenceProfileModelPolicy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'Inference profile model ARNs are dynamically resolved from the configured inference profile and may include cross-region foundation models.', + appliesTo: ['Resource::*'] + } + ]); + + return inferenceProfileArnsForPolicy; + } +} diff --git a/source/infrastructure/lib/use-case-stacks/agent-core/components/agent-invocation-lambda.ts b/source/infrastructure/lib/use-case-stacks/agent-core/components/agent-invocation-lambda.ts new file mode 100644 index 00000000..a24f9d9a --- /dev/null +++ b/source/infrastructure/lib/use-case-stacks/agent-core/components/agent-invocation-lambda.ts @@ -0,0 +1,140 @@ +#!/usr/bin/env node +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as iam from 'aws-cdk-lib/aws-iam'; +import * as lambda from 'aws-cdk-lib/aws-lambda'; + +import { Construct } from 'constructs'; +import { NagSuppressions } from 'cdk-nag'; +import { ApplicationAssetBundler } from '../../../framework/bundler/asset-options-factory'; +import { createDefaultLambdaRole } from '../../../utils/common-utils'; +import { + CHAT_LAMBDA_PYTHON_RUNTIME, + LAMBDA_TIMEOUT_MINS, + LANGCHAIN_LAMBDA_PYTHON_RUNTIME, + USE_CASE_UUID_ENV_VAR +} from '../../../utils/constants'; + +/** + * Properties for Agent Invocation Lambda + */ +export interface AgentInvocationLambdaProps { + /** + * AgentCore Runtime ARN + */ + agentRuntimeArn: string; + + /** + * Use case UUID for logging and identification + */ + useCaseUUID: string; +} + +/** + * Helper class to create and configure the Agent Invocation Lambda + * with streaming support and proper IAM permissions + * + * Note: As of dev of GAAB v4.0.0, Amazon Bedrock AgentCore does not support + * VPC mode. This is a known limitation and will be addressed in future releases. + */ +export class AgentInvocationLambda extends Construct { + public readonly function: lambda.Function; + public readonly role: iam.Role; + + constructor(scope: Construct, id: string, props: AgentInvocationLambdaProps) { + super(scope, id); + + this.role = this.createInvocationRole(); + this.function = this.createLambdaFunction(props); + this.addAgentCorePermissions(); + this.addNagSuppressions(); + } + + /** + * Create IAM role for Agent invocation using the common utility function + * Note: Agent Core v4.0.0 runs in non-VPC mode only + */ + private createInvocationRole(): iam.Role { + return createDefaultLambdaRole(this, 'AgentInvocationLambdaRole'); + } + + /** + * Add AgentCore-specific permissions to the Lambda role + */ + private addAgentCorePermissions(): void { + // AgentCore Runtime invocation permissions (both sync and streaming) + this.role.addToPolicy( + new iam.PolicyStatement({ + sid: 'AgentCoreRuntimeInvocation', + effect: iam.Effect.ALLOW, + actions: ['bedrock-agentcore:InvokeAgentRuntime', 'bedrock-agentcore:InvokeAgentRuntimeForUser'], + resources: [ + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:runtime/*` + ] + }) + ); + + // WebSocket connection management permissions (wildcard since API ID not available at this time) + this.role.addToPolicy( + new iam.PolicyStatement({ + sid: 'WebSocketManagement', + effect: iam.Effect.ALLOW, + actions: ['execute-api:ManageConnections'], + resources: [`arn:${cdk.Aws.PARTITION}:execute-api:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:*/*/*`] + }) + ); + } + + /** + * Create the Lambda function for agent invocation + */ + private createLambdaFunction(props: AgentInvocationLambdaProps): lambda.Function { + return new lambda.Function(this, 'AgentInvocationLambda', { + code: lambda.Code.fromAsset( + '../lambda/agentcore-invocation', + ApplicationAssetBundler.assetBundlerFactory() + .assetOptions(CHAT_LAMBDA_PYTHON_RUNTIME) + .options(this, '../lambda/agentcore-invocation') + ), + role: this.role, + runtime: LANGCHAIN_LAMBDA_PYTHON_RUNTIME, + handler: 'handler.lambda_handler', + timeout: cdk.Duration.minutes(LAMBDA_TIMEOUT_MINS), + memorySize: 1024, + environment: { + POWERTOOLS_SERVICE_NAME: 'AGENT_CORE_INVOCATION', + AGENT_RUNTIME_ARN: props.agentRuntimeArn, + [USE_CASE_UUID_ENV_VAR]: props.useCaseUUID + }, + description: 'Lambda for AgentCore Runtime invocation via WebSocket with streaming support' + }); + } + + /** + * Add CDK NAG suppressions for AgentCore-specific permissions + * Note: Basic Lambda permissions are already suppressed by createDefaultLambdaRole + */ + private addNagSuppressions(): void { + NagSuppressions.addResourceSuppressions(this.role.node.tryFindChild('DefaultPolicy') as iam.Policy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'Lambda requires permissions to invoke AgentCore Runtime with wildcard for any agent runtime instance', + appliesTo: ['Resource::arn::bedrock-agentcore:::runtime/*'] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'Lambda requires permissions to manage WebSocket connections across all API Gateway WebSocket APIs', + appliesTo: ['Resource::arn::execute-api:::*/*/*'] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'Lambda requires permissions to manage WebSocket connections for specific API Gateway endpoint', + appliesTo: [ + 'Resource::arn::execute-api:::/*/*/@connections/*' + ] + } + ]); + } +} diff --git a/source/infrastructure/lib/use-case-stacks/agent-core/components/agent-memory-deployment.ts b/source/infrastructure/lib/use-case-stacks/agent-core/components/agent-memory-deployment.ts new file mode 100644 index 00000000..2dd60dd2 --- /dev/null +++ b/source/infrastructure/lib/use-case-stacks/agent-core/components/agent-memory-deployment.ts @@ -0,0 +1,119 @@ +#!/usr/bin/env node +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as iam from 'aws-cdk-lib/aws-iam'; +import * as lambda from 'aws-cdk-lib/aws-lambda'; +import { Construct } from 'constructs'; +import { NagSuppressions } from 'cdk-nag'; + +/** + * Properties for AgentCore Memory deployment + */ +export interface AgentMemoryDeploymentProps { + /** + * Custom resource Lambda function for deployment operations + */ + customResourceLambda: lambda.Function; + + /** + * Agent runtime name + */ + agentRuntimeName: string; + + /** + * Enable long-term memory flag + */ + enableLongTermMemory: string; +} + +/** + * Helper class to manage AgentCore Memory deployment via custom resource + */ +export class AgentMemoryDeployment extends Construct { + public readonly customResource: cdk.CustomResource; + public readonly managementPolicy: iam.Policy; + + constructor(scope: Construct, id: string, props: AgentMemoryDeploymentProps) { + super(scope, id); + + this.managementPolicy = this.createMemoryManagementPolicy(); + this.attachPolicyToCustomResourceLambda(props.customResourceLambda); + this.customResource = this.createMemoryCustomResource(props); + this.addNagSuppressions(); + } + + /** + * Create IAM policy for AgentCore Memory management operations + */ + private createMemoryManagementPolicy(): iam.Policy { + return new iam.Policy(this, 'AgentCoreMemoryManagementPolicy', { + statements: [ + new iam.PolicyStatement({ + sid: 'AgentCoreMemoryManagement', + effect: iam.Effect.ALLOW, + actions: [ + 'bedrock-agentcore:CreateMemory', + 'bedrock-agentcore:UpdateMemory', + 'bedrock-agentcore:DeleteMemory', + 'bedrock-agentcore:GetMemory', + 'bedrock-agentcore:ListMemories' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:memory/*` + ] + }) + ] + }); + } + + /** + * Attach the management policy to the custom resource Lambda + */ + private attachPolicyToCustomResourceLambda(customResourceLambda: lambda.Function): void { + this.managementPolicy.attachToRole(customResourceLambda.role!); + } + + /** + * Create the custom resource for AgentCore Memory deployment + */ + private createMemoryCustomResource(props: AgentMemoryDeploymentProps): cdk.CustomResource { + const customResource = new cdk.CustomResource(this, 'AgentCoreMemory', { + resourceType: 'Custom::AgentCoreMemory', + serviceToken: props.customResourceLambda.functionArn, + properties: { + Resource: 'DEPLOY_AGENT_CORE_MEMORY', + AgentRuntimeName: props.agentRuntimeName, + EnableLongTermMemory: props.enableLongTermMemory + } + }); + + customResource.node.addDependency(this.managementPolicy); + return customResource; + } + + /** + * Add CDK NAG suppressions + */ + private addNagSuppressions(): void { + NagSuppressions.addResourceSuppressions(this.managementPolicy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'Custom resource requires permissions to manage AgentCore Memory resources with wildcard for resource instances', + appliesTo: ['Resource::arn::bedrock-agentcore:::memory/*'] + } + ]); + } + + /** + * Get the Memory ID from the custom resource + */ + public getMemoryId(): string { + return this.customResource.getAttString('MemoryId'); + } + + public getMemoryStrategyId(): string { + return this.customResource.getAttString('MemoryStrategyId'); + } +} diff --git a/source/infrastructure/lib/use-case-stacks/agent-core/components/agent-runtime-deployment.ts b/source/infrastructure/lib/use-case-stacks/agent-core/components/agent-runtime-deployment.ts new file mode 100644 index 00000000..cfeb233b --- /dev/null +++ b/source/infrastructure/lib/use-case-stacks/agent-core/components/agent-runtime-deployment.ts @@ -0,0 +1,227 @@ +#!/usr/bin/env node +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as iam from 'aws-cdk-lib/aws-iam'; +import * as lambda from 'aws-cdk-lib/aws-lambda'; +import { Construct } from 'constructs'; +import { NagSuppressions } from 'cdk-nag'; + +/** + * Properties for AgentCore Runtime deployment + */ +export interface AgentRuntimeDeploymentProps { + /** + * Custom resource Lambda function for deployment operations + */ + customResourceLambda: lambda.Function; + + /** + * AgentCore execution role + */ + agentExecutionRole: iam.Role; + + /** + * Agent runtime name + */ + agentRuntimeName: string; + + /** + * Agent image URI + */ + agentImageUri: string; + + /** + * Use case UUID + */ + useCaseUUID: string; + + /** + * Use case configuration table name + */ + useCaseConfigTableName: string; + + /** + * Use case configuration record key + */ + useCaseConfigRecordKey: string; + + /** + * Cognito user pool ID for authorizer configuration + */ + cognitoUserPoolId: string; + + /** + * Additional properties to pass to the custom resource + * This allows flexibility for different use cases (Agent, Workflow, etc.) + */ + additionalProperties?: Record; +} + +/** + * Helper class to manage AgentCore Runtime deployment via custom resource + */ +export class AgentRuntimeDeployment extends Construct { + public customResource: cdk.CustomResource; + public readonly managementPolicy: iam.Policy; + + constructor(scope: Construct, id: string, props: AgentRuntimeDeploymentProps) { + super(scope, id); + + this.managementPolicy = this.createManagementPolicy(props.agentExecutionRole, props.useCaseConfigTableName); + this.attachPolicyToCustomResourceLambda(props.customResourceLambda); + this.customResource = this.createCustomResource(props); + + // Ensure the custom resource depends on the policy being attached to the Lambda role + this.customResource.node.addDependency(this.managementPolicy); + + this.addNagSuppressions(); + } + + /** + * Create IAM policy for AgentCore management operations + */ + private createManagementPolicy(agentExecutionRole: iam.Role, useCaseConfigTableName: string): iam.Policy { + return new iam.Policy(this, 'AgentCoreManagementPolicy', { + statements: [ + // AgentCore Runtime management permissions + new iam.PolicyStatement({ + sid: 'AgentCoreRuntimeManagement', + effect: iam.Effect.ALLOW, + actions: [ + 'bedrock-agentcore:CreateAgentRuntime', + 'bedrock-agentcore:CreateAgentRuntimeEndpoint', + 'bedrock-agentcore:CreateWorkloadIdentity', + 'bedrock-agentcore:UpdateAgentRuntime', + 'bedrock-agentcore:DeleteAgentRuntime', + 'bedrock-agentcore:GetAgentRuntime', + 'bedrock-agentcore:ListAgentRuntimes', + 'bedrock-agentcore:ListAgentRuntimeEndpoints', + 'bedrock-agentcore:ListAgentRuntimeVersions' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:runtime/*`, + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:workload-identity-directory/*` + ] + }), + // ECR permissions for pull-through cache triggering + new iam.PolicyStatement({ + sid: 'ECRPullThroughCache', + effect: iam.Effect.ALLOW, + actions: [ + 'ecr:DescribeRepositories', + 'ecr:BatchGetImage', + 'ecr:DescribeImages', + 'ecr:CreateRepository', + 'ecr:BatchImportUpstreamImage' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:ecr:*:${cdk.Aws.ACCOUNT_ID}:repository/*` + ] + }), + // DynamoDB permissions to update use case config with memory ID + new iam.PolicyStatement({ + sid: 'DynamoDBConfigUpdate', + effect: iam.Effect.ALLOW, + actions: ['dynamodb:UpdateItem', 'dynamodb:GetItem'], + resources: [ + `arn:${cdk.Aws.PARTITION}:dynamodb:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:table/${useCaseConfigTableName}` + ] + }), + // Permission to pass the execution role to AgentCore + new iam.PolicyStatement({ + sid: 'PassRoleToAgentCore', + effect: iam.Effect.ALLOW, + actions: ['iam:PassRole'], + resources: [agentExecutionRole.roleArn] + }) + ] + }); + } + + /** + * Attach the management policy to the custom resource Lambda + */ + private attachPolicyToCustomResourceLambda(customResourceLambda: lambda.Function): void { + this.managementPolicy.attachToRole(customResourceLambda.role!); + } + + /** + * Create the custom resource for AgentCore Runtime deployment + */ + private createCustomResource(props: AgentRuntimeDeploymentProps): cdk.CustomResource { + // Build base properties + const baseProperties = { + Resource: 'DEPLOY_AGENT_CORE', + AgentRuntimeName: props.agentRuntimeName, + AgentImageUri: props.agentImageUri, + ExecutionRoleArn: props.agentExecutionRole.roleArn, + UseCaseUUID: props.useCaseUUID, + UseCaseConfigTableName: props.useCaseConfigTableName, + UseCaseConfigRecordKey: props.useCaseConfigRecordKey, + CognitoUserPoolId: props.cognitoUserPoolId, + // Initialize multimodal properties as empty strings - will be updated later if multimodal setup is available + MultimodalDataMetadataTable: '', + MultimodalDataBucket: '' + }; + + // Merge with additional properties if provided + const allProperties = { + ...baseProperties, + ...(props.additionalProperties || {}) + }; + + const customResource = new cdk.CustomResource(this, 'AgentCoreRuntimeCustomResource', { + resourceType: 'Custom::AgentCoreRuntime', + serviceToken: props.customResourceLambda.functionArn, + properties: allProperties + }); + + // Ensure the policy is attached before the custom resource is created + customResource.node.addDependency(this.managementPolicy); + + return customResource; + } + + /** + * Add CDK NAG suppressions + */ + private addNagSuppressions(): void { + NagSuppressions.addResourceSuppressions(this.managementPolicy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'Custom resource requires permissions to manage AgentCore Runtime resources with wildcard for resource instances', + appliesTo: [ + 'Resource::arn::bedrock-agentcore:::runtime/*', + 'Resource::arn::bedrock-agentcore:::workload-identity-directory/*' + ] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'Custom resource requires permissions to trigger ECR pull-through cache with wildcard for repository names and regions', + appliesTo: [ + 'Resource::arn::ecr:*::repository/*' + ] + } + ]); + } + + /** + * Get the AgentCore Runtime ARN from the custom resource + */ + public getAgentRuntimeArn(): string { + return this.customResource.getAttString('AgentRuntimeArn'); + } + + /** + * Update multimodal properties after multimodal setup is created + * This method should be called after the multimodal setup is complete + */ + public updateMultimodalProperties(multimodalDataMetadataTable: string, multimodalDataBucket: string): void { + const cfnCustomResource = this.customResource.node.defaultChild as cdk.CfnCustomResource; + + cfnCustomResource.addPropertyOverride('MultimodalDataMetadataTable', multimodalDataMetadataTable); + cfnCustomResource.addPropertyOverride('MultimodalDataBucket', multimodalDataBucket); + } +} diff --git a/source/infrastructure/lib/use-case-stacks/agent-core/components/ecr-pull-through-cache.ts b/source/infrastructure/lib/use-case-stacks/agent-core/components/ecr-pull-through-cache.ts new file mode 100644 index 00000000..c264a786 --- /dev/null +++ b/source/infrastructure/lib/use-case-stacks/agent-core/components/ecr-pull-through-cache.ts @@ -0,0 +1,144 @@ +#!/usr/bin/env node +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as ecr from 'aws-cdk-lib/aws-ecr'; +import * as lambda from 'aws-cdk-lib/aws-lambda'; +import { Construct } from 'constructs'; +import { + ECR_UPSTREAM_REGISTRY, + GAAB_STRANDS_AGENT_IMAGE_NAME, + GAAB_STRANDS_WORKFLOW_IMAGE_NAME +} from '../../../utils/constants'; +import { + resolveUpstreamRegistryUrl, + resolveUpstreamRepositoryPrefix, + constructPullThroughCacheImageUri +} from '../utils/image-uri-resolver'; + +/** + * Properties for ECR Pull-Through Cache setup + */ +export interface ECRPullThroughCacheProps { + /** + * GAAB version for image tagging + */ + gaabVersion: string; + + /** + * Custom resource Lambda function for generating repository prefix + * Always required - ensures all prefixes are generated at deployment time + */ + customResourceLambda: lambda.Function; + + /** + * Use case short ID for standalone deployments + * When provided, generates gaab-agents-{useCaseShortId} prefix + * When not provided, uses stack name for prefix generation (shared cache) + */ + useCaseShortId?: string; +} + +/** + * Helper class to manage ECR Pull-Through Cache for AgentCore images + * + * This component creates pull-through cache rules with unique prefixes to avoid conflicts. + * Supports both shared caches (deployment platform) and standalone caches (individual use cases). + * + * The cache is configured with namespace isolation using the 'aws-solutions' upstream repository prefix, + * ensuring only images from the aws-solutions namespace are cached for security and cost optimization. + * + * Enhanced with CI/CD integration support for environment variable overrides: + * - PUBLIC_ECR_REGISTRY: Override upstream registry URL + * - PUBLIC_ECR_TAG: Override image tag resolution + */ +export class ECRPullThroughCache extends Construct { + public readonly pullThroughCacheRule: ecr.CfnPullThroughCacheRule; + private readonly cacheRepositoryPrefix: string; + private readonly gaabVersion: string; + private readonly isSharedCache: boolean; + + constructor(scope: Construct, id: string, props: ECRPullThroughCacheProps) { + super(scope, id); + + this.gaabVersion = props.gaabVersion; + // Determine if this is a shared cache based on whether useCaseShortId is provided + this.isSharedCache = !props.useCaseShortId; + + this.cacheRepositoryPrefix = this.createEcrRepoPrefixCustomResource( + props.customResourceLambda, + props.useCaseShortId + ); + + this.pullThroughCacheRule = this.createPullThroughCacheRule(); + } + + /** + * Create custom resource to generate ECR repository prefix + * Supports both stack name-based (deployment platform) and UUID-based (standalone) prefixes + */ + private createEcrRepoPrefixCustomResource(customResourceLambda: lambda.Function, useCaseShortId?: string): string { + const properties: { [key: string]: any } = { + Resource: 'GEN_ECR_REPO_PREFIX' + }; + + if (useCaseShortId) { + // For standalone deployments: use UUID-based prefix + properties.UseCaseShortId = useCaseShortId; + } else { + // For deployment platform: use stack name-based prefix + properties.StackName = cdk.Aws.STACK_NAME; + } + + const customResource = new cdk.CustomResource(this, 'EcrRepoPrefixGenerator', { + resourceType: 'Custom::GenEcrRepoPrefix', + serviceToken: customResourceLambda.functionArn, + properties + }); + + return customResource.getAttString('EcrRepoPrefix'); + } + + /** + * Create the ECR Pull-Through Cache rule + */ + private createPullThroughCacheRule(): ecr.CfnPullThroughCacheRule { + const cacheRuleName = this.isSharedCache ? 'SharedAgentImageCache' : 'GaabAgentImageCache'; + + return new ecr.CfnPullThroughCacheRule(this, cacheRuleName, { + ecrRepositoryPrefix: this.cacheRepositoryPrefix, + upstreamRegistry: ECR_UPSTREAM_REGISTRY, + upstreamRegistryUrl: resolveUpstreamRegistryUrl(), + upstreamRepositoryPrefix: resolveUpstreamRepositoryPrefix() + }); + } + + /** + * Get the cached image URI for the agent image + */ + public getCachedImageUri(): string { + return this.constructCachedImageUri(GAAB_STRANDS_AGENT_IMAGE_NAME); + } + + /** + * Get the cached image URI for the workflow image + */ + public getCachedWorkflowImageUri(): string { + return this.constructCachedImageUri(GAAB_STRANDS_WORKFLOW_IMAGE_NAME); + } + + /** + * Get the repository prefix used for caching + */ + public getRepositoryPrefix(): string { + return this.cacheRepositoryPrefix; + } + + /** + * Construct cached image URI for a specific image type + */ + private constructCachedImageUri(imageType: string): string { + return constructPullThroughCacheImageUri(this.cacheRepositoryPrefix, imageType, this.gaabVersion); + } +} diff --git a/source/infrastructure/lib/use-case-stacks/agent-core/utils/image-uri-resolver.ts b/source/infrastructure/lib/use-case-stacks/agent-core/utils/image-uri-resolver.ts new file mode 100644 index 00000000..052daf7b --- /dev/null +++ b/source/infrastructure/lib/use-case-stacks/agent-core/utils/image-uri-resolver.ts @@ -0,0 +1,466 @@ +#!/usr/bin/env node +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import { Construct } from 'constructs'; +import { + ECR_UPSTREAM_REGISTRY_URL, + ECR_UPSTREAM_REPOSITORY_PREFIX, + GAAB_STRANDS_AGENT_IMAGE_NAME, + GAAB_STRANDS_WORKFLOW_IMAGE_NAME, + StackDeploymentSource +} from '../../../utils/constants'; + +/** + * Error class for ECR image resolution failures + */ +export class ECRImageError extends Error { + constructor( + message: string, + public category: 'build' | 'push' | 'resolution' | 'validation', + public context: Record + ) { + super(message); + this.name = 'ECRImageError'; + } +} + +/** + * Interface for image resolution context + */ +export interface ImageResolutionContext { + deploymentMode: 'local' | 'pipeline'; + customImageUri?: string; + sharedEcrCachePrefix?: string; + useCaseShortId?: string; + gaabVersion: string; // Should be in format like 'v4.0.0' or 'v4.0.0-local' (already sanitized) +} + +/** + * Interface for image resolution result + */ +export interface ImageResolutionResult { + imageUri: string; + resolutionStrategy: 'custom' | 'local-ecr' | 'pull-through-cache'; + metadata: { + version: string; + registry: string; + repository: string; + tag: string; + }; +} + +/** + * Resolves the solution version from multiple sources + * Priority: VERSION environment variable > CDK context (which includes cdk.json) + * + * @param construct - CDK construct for accessing context + * @returns resolved version string + */ +export function resolveSolutionVersion(construct: Construct): string { + try { + // Priority 1: VERSION environment variable (CI/CD pipelines) + if (process.env.VERSION) { + return process.env.VERSION; + } + + // Priority 2: CDK context (automatically includes cdk.json context values) + const contextVersion = construct.node.tryGetContext('solution_version'); + if (contextVersion) { + return contextVersion; + } + + throw new ECRImageError('Unable to resolve solution version from environment or CDK context', 'resolution', { + envVersion: process.env.VERSION, + contextVersion + }); + } catch (error) { + if (error instanceof ECRImageError) { + throw error; + } + throw new ECRImageError( + `Failed to resolve solution version: ${error instanceof Error ? error.message : String(error)}`, + 'resolution', + { originalError: error } + ); + } +} + +/** + * Sanitizes version tag to handle CI/CD pipeline version formats and local deployments + * Removes double 'v' prefix that can occur in CI/CD environments and adds local suffix + */ +export function sanitizeVersionTag(versionTag: string, deploymentMode?: 'local' | 'pipeline'): string { + if (!versionTag || typeof versionTag !== 'string') { + return 'latest'; + } + + // Remove all leading 'v' characters to handle double/triple v prefixes + let cleanVersion = versionTag.replace(/^v+/, ''); + + // Add local suffix for local deployments if not already present + if (deploymentMode === 'local' && !cleanVersion.includes('-local')) { + cleanVersion = `${cleanVersion}-local`; + } + + // Add single 'v' prefix back for consistency + return `v${cleanVersion}`; +} + +/** + * Resolve upstream registry URL from environment variable or default + * Extracts the registry domain from the full registry URL (e.g., 'public.ecr.aws' from 'public.ecr.aws/prefix') + */ +export function resolveUpstreamRegistryUrl(): string { + try { + const envRegistry = process.env.PUBLIC_ECR_REGISTRY; + if (envRegistry) { + const registryDomain = envRegistry.split('/')[0]; + return registryDomain || ECR_UPSTREAM_REGISTRY_URL; + } + } catch (error) { + // Fall through to default on any error + console.warn( + `Failed to resolve upstream registry URL from PUBLIC_ECR_REGISTRY environment variable: ${ + error instanceof Error ? error.message : String(error) + }. Using default: ${ECR_UPSTREAM_REGISTRY_URL}` + ); + } + return ECR_UPSTREAM_REGISTRY_URL; +} + +/** + * Validates repository prefix format, skipping validation for CDK tokens + */ +function validateRepositoryPrefix(prefix: string): void { + // Skip validation if the string contains unresolved CDK tokens + if (cdk.Token.isUnresolved(prefix)) { + console.log(`Skipping validation for unresolved CDK token in repository prefix`); + return; + } + + // Basic validation for ECR repository prefix format + // ECR prefixes are typically alphanumeric with hyphens, underscores, or forward slashes + const validPrefixPattern = /^[a-z0-9][a-z0-9\-_\/]*$/i; + + if (!validPrefixPattern.test(prefix)) { + throw new Error(`Repository prefix contains invalid characters: ${prefix}`); + } +} + +/** + * Resolve upstream repository prefix from environment variable or default + */ +export function resolveUpstreamRepositoryPrefix(): string { + // Priority 1: Extract from PUBLIC_ECR_REGISTRY environment variable + const registryUrl = process.env.PUBLIC_ECR_REGISTRY; + if (registryUrl) { + // Parse URL to extract prefix + // Format: public.ecr.aws/prefix or public.ecr.aws + const parts = registryUrl.split('/'); + if (parts.length > 1) { + const prefix = parts[1]; + try { + validateRepositoryPrefix(prefix); + console.log(`Extracted repository prefix from PUBLIC_ECR_REGISTRY: ${prefix}`); + return prefix; + } catch (validationError) { + // Log warning but don't throw - fall back to default + console.warn( + `Invalid repository prefix extracted from PUBLIC_ECR_REGISTRY: ${ + validationError instanceof Error ? validationError.message : String(validationError) + }. Using default: ${ECR_UPSTREAM_REPOSITORY_PREFIX}` + ); + return ECR_UPSTREAM_REPOSITORY_PREFIX; + } + } + console.log(`PUBLIC_ECR_REGISTRY has no prefix, using default: ${ECR_UPSTREAM_REPOSITORY_PREFIX}`); + return ECR_UPSTREAM_REPOSITORY_PREFIX; + } + + // Priority 2: Default constant + console.log(`Using default repository prefix: ${ECR_UPSTREAM_REPOSITORY_PREFIX}`); + return ECR_UPSTREAM_REPOSITORY_PREFIX; +} + +/** + * Resolve image tag from environment variable or GAAB version + * Note: gaabVersion is already sanitized by the calling stack + */ +export function resolveImageTag(gaabVersion: string): string { + try { + if (process.env.PUBLIC_ECR_TAG) { + return sanitizeVersionTag(process.env.PUBLIC_ECR_TAG, 'pipeline'); + } + + // gaabVersion is already sanitized by agent-builder-stack, use as-is + return gaabVersion; + } catch (error) { + throw new ECRImageError( + `Failed to resolve image tag: ${error instanceof Error ? error.message : String(error)}`, + 'resolution', + { gaabVersion, envTag: process.env.PUBLIC_ECR_TAG, originalError: error } + ); + } +} + +/** + * Determines deployment mode based on environment variables + * + * @returns 'local' if DIST_OUTPUT_BUCKET is not set, 'pipeline' otherwise + */ +export function determineDeploymentMode(): 'local' | 'pipeline' { + return process.env.DIST_OUTPUT_BUCKET ? 'pipeline' : 'local'; +} + +/** + * Constructs local ECR image URI for development deployments + */ +export function constructLocalEcrImageUri(imageName: string, version: string): string { + if (!imageName || !version) { + throw new ECRImageError('Image name and version are required for local ECR URI construction', 'validation', { + imageName, + version + }); + } + + const versionTag = resolveImageTag(version); + return `${cdk.Aws.ACCOUNT_ID}.dkr.ecr.${cdk.Aws.REGION}.amazonaws.com/${imageName}:${versionTag}`; +} + +/** + * Constructs pull-through cache image URI for pipeline deployments + */ +export function constructPullThroughCacheImageUri( + repositoryPrefix: string, + imageName: string, + version: string +): string { + if (!repositoryPrefix || !imageName || !version) { + throw new ECRImageError( + 'Repository prefix, image name, and version are required for pull-through cache URI construction', + 'validation', + { repositoryPrefix, imageName, version } + ); + } + + const resolvedTag = resolveImageTag(version); + + return cdk.Fn.sub( + '${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${RepositoryPrefix}/${ImageName}:${Tag}', + { + RepositoryPrefix: repositoryPrefix, + ImageName: imageName, + Tag: resolvedTag + } + ); +} + +/** + * Resolves image URI for local deployments only + * This is a simplified version used by workflow-stack for local ECR resolution + * + * @param construct - CDK construct for accessing context + * @param imageName - The ECR image name + * @param context - Image resolution context + * @returns resolved image URI result + */ +export function resolveImageUri( + construct: Construct, + imageName: string, + context: ImageResolutionContext +): ImageResolutionResult { + try { + // Validate inputs + if (!imageName) { + throw new ECRImageError('Image name is required for URI resolution', 'validation', { imageName, context }); + } + + // This simplified version only handles local deployments + if (context.deploymentMode === 'local') { + const resolvedVersion = resolveSolutionVersion(construct); + const localUri = constructLocalEcrImageUri(imageName, resolvedVersion); + + return { + imageUri: localUri, + resolutionStrategy: 'local-ecr', + metadata: { + version: resolvedVersion, + registry: `${cdk.Aws.ACCOUNT_ID}.dkr.ecr.${cdk.Aws.REGION}.amazonaws.com`, + repository: imageName, + tag: resolveImageTag(resolvedVersion) + } + }; + } + + throw new ECRImageError( + `This simplified resolver only supports local deployments. Use resolveImageUriWithConditions for pipeline deployments.`, + 'resolution', + { imageName, context } + ); + } catch (error) { + if (error instanceof ECRImageError) { + throw error; + } + throw new ECRImageError( + `Image URI resolution failed: ${error instanceof Error ? error.message : String(error)}`, + 'resolution', + { imageName, context, originalError: error } + ); + } +} + +/** + * Resolves image URI with CloudFormation conditions for deployment-time parameter handling + * This function creates the proper CloudFormation conditional logic for image URI resolution + * + * Priority logic: + * 1. Local deployment: Always use local ECR (highest priority) + * 2. Pipeline deployment with custom URI: Use custom URI parameter if provided + * 3. Pipeline deployment fallback: Use pull-through cache (standalone vs shared) + * + * @param construct - CDK construct for creating conditions + * @param imageName - The ECR image name + * @param context - Image resolution context with CloudFormation parameters + * @param customImageUriParam - CloudFormation parameter for custom image URI + * @param sharedEcrCachePrefixParam - CloudFormation parameter for shared cache prefix + * @param stackDeploymentSource - The deployment source for this use case stack + * @param pullThroughCacheUri - Pre-built pull-through cache URI for standalone deployments + * @returns CloudFormation-compatible image URI string + */ +export function resolveImageUriWithConditions( + construct: Construct, + imageName: string, + context: ImageResolutionContext, + customImageUriParam: cdk.CfnParameter, + sharedEcrCachePrefixParam: cdk.CfnParameter, + stackDeploymentSource: string, + pullThroughCacheUri: string +): string { + try { + // Priority 1: Local deployment (highest priority) + if (context.deploymentMode === 'local') { + // For local deployments, always use local ECR regardless of parameters + return cdk.Fn.sub('${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${ImageName}:${Version}', { + ImageName: imageName, + Version: context.gaabVersion + }); + } + + // For pipeline deployments, create CloudFormation conditions + const isStandaloneDeploymentCondition = new cdk.CfnCondition( + construct, + 'IsStandaloneDeploymentConditionForImageUri', + { + expression: cdk.Fn.conditionEquals(stackDeploymentSource, StackDeploymentSource.STANDALONE_USE_CASE) + } + ); + + // Check if custom image URI is provided (not empty) + const hasCustomImageCondition = new cdk.CfnCondition(construct, 'HasCustomAgentImageCondition', { + expression: cdk.Fn.conditionNot(cdk.Fn.conditionEquals(customImageUriParam?.valueAsString ?? '', '')) + }); + + // Shared deployment image URI (shared pull-through cache) + const sharedImageUri = cdk.Fn.sub( + '${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${RepositoryPrefix}/${ImageName}:${Version}', + { + RepositoryPrefix: sharedEcrCachePrefixParam?.valueAsString, + ImageName: imageName, + Version: context.gaabVersion + } + ); + + // Default image URI based on deployment type (standalone vs shared) + const defaultImageUri = cdk.Fn.conditionIf( + isStandaloneDeploymentCondition.logicalId, + pullThroughCacheUri, + sharedImageUri + ); + + // Final URI: Custom image if provided, otherwise default + return cdk.Fn.conditionIf( + hasCustomImageCondition.logicalId, + customImageUriParam?.valueAsString ?? '', + defaultImageUri + ).toString(); + } catch (error) { + throw new ECRImageError( + `CloudFormation image URI resolution failed: ${error instanceof Error ? error.message : String(error)}`, + 'resolution', + { imageName, context, originalError: error } + ); + } +} + +/** + * Convenience function to resolve workflow image URI + * + * @param construct - CDK construct for accessing context + * @param context - Image resolution context + * @returns resolved workflow image URI result + */ +export function resolveWorkflowImageUri(construct: Construct, context: ImageResolutionContext): ImageResolutionResult { + return resolveImageUri(construct, GAAB_STRANDS_WORKFLOW_IMAGE_NAME, context); +} + +/** + * Convenience function to resolve agent image URI with CloudFormation conditions + * + * @param construct - CDK construct for creating conditions + * @param context - Image resolution context with CloudFormation parameters + * @param customImageUriParam - CloudFormation parameter for custom image URI + * @param sharedEcrCachePrefixParam - CloudFormation parameter for shared cache prefix + * @param stackDeploymentSource - The deployment source for this use case stack + * @param pullThroughCacheUri - Pre-built pull-through cache URI for standalone deployments + * @returns CloudFormation-compatible agent image URI string + */ +export function resolveAgentImageUriWithConditions( + construct: Construct, + context: ImageResolutionContext, + customImageUriParam: cdk.CfnParameter, + sharedEcrCachePrefixParam: cdk.CfnParameter, + stackDeploymentSource: string, + pullThroughCacheUri: string +): string { + return resolveImageUriWithConditions( + construct, + GAAB_STRANDS_AGENT_IMAGE_NAME, + context, + customImageUriParam, + sharedEcrCachePrefixParam, + stackDeploymentSource, + pullThroughCacheUri + ); +} + +/** + * Convenience function to resolve workflow image URI with CloudFormation conditions + * + * @param construct - CDK construct for creating conditions + * @param context - Image resolution context with CloudFormation parameters + * @param customImageUriParam - CloudFormation parameter for custom workflow image URI + * @param sharedEcrCachePrefixParam - CloudFormation parameter for shared cache prefix + * @param stackDeploymentSource - The deployment source for this use case stack + * @param pullThroughCacheUri - Pre-built pull-through cache URI for standalone deployments + * @returns CloudFormation-compatible workflow image URI string + */ +export function resolveWorkflowImageUriWithConditions( + construct: Construct, + context: ImageResolutionContext, + customImageUriParam: cdk.CfnParameter, + sharedEcrCachePrefixParam: cdk.CfnParameter, + stackDeploymentSource: string, + pullThroughCacheUri: string +): string { + return resolveImageUriWithConditions( + construct, + GAAB_STRANDS_WORKFLOW_IMAGE_NAME, + context, + customImageUriParam, + sharedEcrCachePrefixParam, + stackDeploymentSource, + pullThroughCacheUri + ); +} diff --git a/source/infrastructure/lib/use-case-stacks/agent-core/workflow-stack.ts b/source/infrastructure/lib/use-case-stacks/agent-core/workflow-stack.ts new file mode 100644 index 00000000..1a8d98ed --- /dev/null +++ b/source/infrastructure/lib/use-case-stacks/agent-core/workflow-stack.ts @@ -0,0 +1,214 @@ +#!/usr/bin/env node +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as iam from 'aws-cdk-lib/aws-iam'; + +import { Construct } from 'constructs'; + +import { BaseStack, BaseStackProps } from '../../framework/base-stack'; +import { + CHAT_PROVIDERS, + ECR_URI_PATTERN, + GAAB_STRANDS_WORKFLOW_IMAGE_NAME, + USE_CASE_TYPES +} from '../../utils/constants'; +import { AgentCoreBaseParameters, AgentCoreBaseStack } from './agent-core-base-stack'; +import { VPCSetup } from '../../vpc/vpc-setup'; + +/** + * CloudFormation parameters specific to multi-agent workflow deployment + * Extends the base AgentCoreBaseParameters with workflow-specific configuration + */ +export class WorkflowParameters extends AgentCoreBaseParameters { + /** + * Use cases table name for workflow agent discovery (for dashboard deployments) + */ + public useCasesTableName: cdk.CfnParameter; + + /** + * Optional custom ECR image URI for workflows + */ + public customWorkflowImageUri: cdk.CfnParameter; + + constructor(stack: BaseStack) { + super(stack); + } + + /** + * Create workflow-specific CloudFormation parameters + */ + protected createUseCaseSpecificParameters(stack: BaseStack): void { + this.createWorkflowSpecificParameters(stack); + this.createCustomImageParameters(stack); + } + + /** + * Create workflow-specific parameters + */ + private createWorkflowSpecificParameters(stack: BaseStack): void { + this.useCasesTableName = new cdk.CfnParameter(stack, 'UseCasesTableName', { + type: 'String', + description: + 'Internal parameter - Use cases table name for workflow agent discovery, automatically provided by deployment platform', + default: '', + constraintDescription: + 'Internal parameter - automatically populated by deployment platform for dashboard deployments' + }); + } + + /** + * Create custom image URI parameters + */ + private createCustomImageParameters(stack: BaseStack): void { + this.customWorkflowImageUri = new cdk.CfnParameter(stack, 'CustomWorkflowImageUri', { + type: 'String', + description: + 'Optional custom ECR image URI for workflows. If provided, overrides default image resolution.', + default: '', + allowedPattern: ECR_URI_PATTERN + '|^$', + constraintDescription: this.getCustomImageConstraintDescription(USE_CASE_TYPES.WORKFLOW) + }); + } + + /** + * Get the custom image parameter for this use case type + */ + public getCustomImageParameter(): cdk.CfnParameter { + return this.customWorkflowImageUri; + } + + /** + * Update CloudFormation parameter groups to include workflow configuration + */ + protected getBaseConfigurationGroupLabel(): string { + return 'Workflow Configuration'; + } + + /** + * Get workflow-specific parameter group configuration + */ + protected getUseCaseSpecificParameterGroup(): { Label: { default: string }; Parameters: string[] } | undefined { + return { + Label: { default: 'Workflow Agent Discovery (Advanced)' }, + Parameters: [this.useCasesTableName.logicalId] + }; + } + + /** + * Get workflow-specific parameter labels for better CloudFormation console UX + */ + protected getUseCaseSpecificParameterLabels(): Record { + return { + [this.useCasesTableName.logicalId]: 'Use Cases Table Name', + [this.customWorkflowImageUri.logicalId]: 'Custom Workflow Image URI' + }; + } +} + +/** + * The main stack creating the multi-agent workflow infrastructure + * + * This stack orchestrates the deployment of multi-agent workflows using the "Agents as Tools" pattern, + * where a supervisor agent coordinates with specialized sub-agents to handle complex workflows. + * It extends the AgentCoreBaseStack to inherit common AgentCore functionality while providing + * workflow-specific behavior through abstract method implementations. + */ +export class WorkflowStack extends AgentCoreBaseStack { + constructor(scope: Construct, id: string, props: BaseStackProps) { + super(scope, id, props); + } + + /** + * Get the image name for workflow deployments + */ + public getImageName(): string { + return GAAB_STRANDS_WORKFLOW_IMAGE_NAME; + } + + /** + * Get the use case type for workflow deployments + */ + public getUseCaseType(): USE_CASE_TYPES { + return USE_CASE_TYPES.WORKFLOW; + } + + /** + * Get the WebSocket route name for workflow invocation + */ + public getWebSocketRouteName(): string { + return 'invokeWorkflow'; + } + + /** + * Get the LLM provider name for workflow deployments + * Appends 'Workflow' to distinguish from single agents + */ + public getLlmProviderName(): CHAT_PROVIDERS { + return `${CHAT_PROVIDERS.AGENT_CORE}Workflow` as CHAT_PROVIDERS; + } + + /** + * Get the agent runtime name pattern for workflow deployments + */ + public getAgentRuntimeName(): string { + return `gaab_workflow_${this.stackParameters.useCaseShortId}`; + } + + /** + * Determine if workflow deployments should include inference profile support + * Initially set to false as workflows may not need inference profiles + */ + public shouldIncludeInferenceProfileSupport(): boolean { + return true; + } + + /** + * Initialize CloudFormation parameters + */ + protected initializeCfnParameters(): void { + this.stackParameters = new WorkflowParameters(this); + } + + /** + * Override to provide workflow-specific execution role permissions + * Workflows need access to the use cases table for agent discovery + */ + protected getAdditionalAgentExecutionRolePermissions(): iam.PolicyStatement[] | undefined { + const workflowBuilderParams = this.stackParameters as WorkflowParameters; + const useCasesTableName = workflowBuilderParams.useCasesTableName.valueAsString; + + // Only add permissions if use cases table name is provided (dashboard deployments) + if (useCasesTableName && useCasesTableName !== '') { + return [ + new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['dynamodb:Query', 'dynamodb:GetItem', 'dynamodb:Scan'], + resources: [ + `arn:${cdk.Aws.PARTITION}:dynamodb:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:table/${useCasesTableName}` + ] + }) + ]; + } + + return undefined; + } + + /** + * Set up VPC configuration for Workflow stack + * Note: Amazon Bedrock AgentCore (preview) does not support VPC deployments. + * The VPC setup will create minimal infrastructure for future compatibility. + */ + protected setupVPC(): VPCSetup { + return new VPCSetup(this, 'VPC', { + stackType: 'workflow', + deployVpcCondition: this.deployVpcCondition, + customResourceLambdaArn: this.applicationSetup.customResourceLambda.functionArn, + customResourceRoleArn: this.applicationSetup.customResourceLambda.role!.roleArn, + iPamPoolId: this.iPamPoolId.valueAsString, + accessLogBucket: this.applicationSetup.accessLoggingBucket, + ...this.baseStackProps + }); + } +} diff --git a/source/infrastructure/lib/utils/app-registry-aspects.ts b/source/infrastructure/lib/utils/app-registry-aspects.ts deleted file mode 100644 index 9ae1159e..00000000 --- a/source/infrastructure/lib/utils/app-registry-aspects.ts +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/env node -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -import * as appreg from '@aws-cdk/aws-servicecatalogappregistry-alpha'; -import * as cdk from 'aws-cdk-lib'; -import { CfnResourceAssociation } from 'aws-cdk-lib/aws-servicecatalogappregistry'; -import { Construct, IConstruct } from 'constructs'; -import { hashValues } from './common-utils'; - -export interface AppRegistryProps { - /** - * Name of the solution as set through from cdk.json - */ - solutionName: string; - - /** - * Name of the application used to create an entry in AppRegistry as set through cdk.json - */ - applicationName: string; - - /** - * Solution ID associated with the application - */ - solutionID: string; - /** - * Solution version of the application - */ - solutionVersion: string; - /** - * An application type attribute initialized in the constructor of this class - */ - applicationType: string; -} - -/** - * A CDK Aspect to add App Registry constructs - */ -export class AppRegistry extends Construct implements cdk.IAspect { - /** - * Name of the solution as set through from cdk.json - */ - private solutionName: string; - - /** - * Name of the application used to create an entry in AppRegistry as set through cdk.json - */ - private applicationName: string; - - /** - * Solution ID as set through cdk.json - */ - private solutionID: string; - - /** - * Solution version as set through cdk.json - */ - private solutionVersion: string; - - /** - * An application type attribute initialized in the constructor of this class - */ - private applicationType: string; - - /** - * The instance of application that the solution stacks should be associated with - */ - private application: Map; - - /** - * The instance of attribute group that the solution stacks should be associated with - */ - private attributeGroup: appreg.AttributeGroup; - - constructor(scope: Construct, id: string, props: AppRegistryProps) { - super(scope, id); - this.solutionName = props.solutionName; - this.applicationName = `App-${props.applicationName}`; - this.solutionID = props.solutionID; - this.solutionVersion = props.solutionVersion; - this.applicationType = props.applicationType; - this.application = new Map(); - } - - /** - * Method invoked as a `Visitor` pattern to inject aspects during cdk synthesis - * - * @param node - */ - public visit(node: IConstruct): void { - if (node instanceof cdk.Stack) { - if (!node.nested) { - // parent stack - if (!this.application.get(node.stackId)) { - this.createAppForAppRegistry(node); - } - const stack = node; - this.application.get(node.stackId)!.associateApplicationWithStack(stack); - if (!this.attributeGroup) { - this.createAttributeGroup(node); - } - this.addTagsforApplication(node); - } else { - if (!this.application.get(node.nestedStackParent!.stackId)) { - this.createAppForAppRegistry(node.nestedStackParent!); - } - - const nestedStack = node; - - // prettier-ignore - new CfnResourceAssociation( - nestedStack, - `ResourceAssociation${hashValues(cdk.Names.nodeUniqueId(nestedStack.node))}`, - { - application: this.application.get(node.nestedStackParent!.stackId)!.applicationId, - resource: node.stackId, - resourceType: 'CFN_STACK' - } - ); - - (nestedStack.node.defaultChild as cdk.CfnResource).addDependency( - this.application.get(node.nestedStackParent!.stackId)!.node.defaultChild as cdk.CfnResource - ); - } - } - } - - /** - * Method to initialize an Application in AppRegistry service - * - * @returns - Instance of AppRegistry's Application class - */ - private createAppForAppRegistry(stack: cdk.Stack): void { - this.application.set( - stack.stackId, - new appreg.Application(stack, `RegistrySetup`, { - applicationName: this.applicationNameValue, - description: `Service Catalog application to track and manage all your resources for the solution ${this.solutionName}` - }) - ); - } - - private get applicationNameValue(): string { - return `${this.applicationName}-${cdk.Aws.STACK_NAME}`; - } - - /** - * Method to add tags to the AppRegistry's Application instance - * - */ - private addTagsforApplication(node: cdk.Stack): void { - if (!this.application.get(node.stackId)) { - this.createAppForAppRegistry(node); - } - - cdk.Tags.of(this.application.get(node.stackId)!).add('Solutions:SolutionID', this.solutionID); - cdk.Tags.of(this.application.get(node.stackId)!).add('Solutions:SolutionName', this.solutionName); - cdk.Tags.of(this.application.get(node.stackId)!).add('Solutions:SolutionVersion', this.solutionVersion); - cdk.Tags.of(this.application.get(node.stackId)!).add('Solutions:ApplicationType', this.applicationType); - } - - /** - * Method to create AttributeGroup to be associated with the Application's instance in AppRegistry - * - */ - private createAttributeGroup(node: cdk.Stack): void { - if (!this.application.get(node.stackId)) { - this.createAppForAppRegistry(node); - } - this.attributeGroup = new appreg.AttributeGroup(node, `AppAttributes`, { - attributeGroupName: `AttrGrp-${cdk.Aws.STACK_NAME}`, - description: 'Attributes for Solutions Metadata', - attributes: { - applicationType: this.applicationType, - version: this.solutionVersion, - solutionID: this.solutionID, - solutionName: this.solutionName - } - }); - this.attributeGroup.associateWith(this.application.get(node.stackId)!); - } -} diff --git a/source/infrastructure/lib/utils/common-utils.ts b/source/infrastructure/lib/utils/common-utils.ts index c5cf9eb3..395bb58d 100644 --- a/source/infrastructure/lib/utils/common-utils.ts +++ b/source/infrastructure/lib/utils/common-utils.ts @@ -190,7 +190,7 @@ export function getResourceProperties( asset: s3_asset.Asset, customResourceLambda?: lambda.Function, customResourceRole?: iam.IRole -): { properties: { [key: string]: any }, policy: iam.Policy } { +): { properties: { [key: string]: any }; policy: iam.Policy } { let assetReadPolicy: iam.Policy; let resourcePropertiesJson; @@ -250,7 +250,7 @@ export function getResourceProperties( return { properties: resourcePropertiesJson, policy: assetReadPolicy - } + }; } /** * Generates the CFN template URL to add it to the IAM policy condition. The intent is to restrict the policy to only @@ -516,3 +516,101 @@ export function createCustomResourceForLambdaLogRetention( } }); } + +/** + * Setup Bedrock Agent Core permissions for the custom resource lambda + */ +export function setupAgentCorePermissions(role: iam.Role): iam.PolicyStatement { + const runtimePolicyStatement = new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 'bedrock-agentcore:CreateAgentRuntime', + 'bedrock-agentcore:UpdateAgentRuntime', + 'bedrock-agentcore:DeleteAgentRuntime', + 'bedrock-agentcore:GetAgentRuntime', + 'bedrock-agentcore:ListAgentRuntimes', + 'bedrock-agentcore:CreateAgentRuntimeEndpoint', + 'bedrock-agentcore:UpdateAgentRuntimeEndpoint', + 'bedrock-agentcore:DeleteAgentRuntimeEndpoint', + 'bedrock-agentcore:GetAgentRuntimeEndpoint', + 'bedrock-agentcore:ListAgentRuntimeEndpoints', + 'bedrock-agentcore:ListAgentRuntimeVersions', + 'bedrock-agentcore:GetGateway', + 'bedrock-agentcore:UpdateGateway' + ], + resources: [ + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:runtime/*`, + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:runtime/*/runtime-endpoint/*`, + `arn:${cdk.Aws.PARTITION}:bedrock-agentcore:${cdk.Aws.REGION}:${cdk.Aws.ACCOUNT_ID}:gateway/*` + ] + }, +); + + const serviceRolePolicyStatement = new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['iam:CreateServiceLinkedRole'], + resources: [ + `arn:${cdk.Aws.PARTITION}:iam::${cdk.Aws.ACCOUNT_ID}:role/aws-service-role/runtime-identity.bedrock-agentcore.amazonaws.com/AWSServiceRoleForBedrockAgentCoreRuntimeIdentity` + ], + conditions: { + StringEquals: { 'iam:AWSServiceName': 'runtime-identity.bedrock-agentcore.amazonaws.com' } + } + }); + + role.addToPolicy(runtimePolicyStatement); + role.addToPolicy(serviceRolePolicyStatement); + + NagSuppressions.addResourceSuppressions(role.node.tryFindChild('DefaultPolicy') as iam.Policy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the custom resource lambda to manage Bedrock AgentCore runtime resources for MCP server operations', + appliesTo: ['Resource::arn::bedrock-agentcore:::runtime/*'] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the custom resource lambda to manage Bedrock AgentCore runtime endpoint resources for MCP server operations', + appliesTo: [ + 'Resource::arn::bedrock-agentcore:::runtime/*/runtime-endpoint/*' + ] + }, + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role allows the custom resource lambda to manage Bedrock AgentCore gateway endpoint resources for permission manipulation', + appliesTo: ['Resource::arn::bedrock-agentcore:::gateway/*'] + } + ]); + + return runtimePolicyStatement; +} +/** + * Setup Agent Core permissions for the custom resource lambda with optional PassRole policy + */ +export function setupAgentCorePermissionsWithPassRole(customResourceRole: iam.Role, executionRoleArn?: string): void { + // Setup the basic Agent Core Runtime permissions + setupAgentCorePermissions(customResourceRole); + + // Add PassRole policy if execution role ARN is provided + if (executionRoleArn) { + const passRolePolicy = new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['iam:PassRole'], + resources: [executionRoleArn], + conditions: { + StringEquals: { + 'iam:PassedToService': 'bedrock-agentcore.amazonaws.com' + } + } + }); + + customResourceRole.addToPolicy(passRolePolicy); + + // Add CDK NAG suppression for the PassRole policy + NagSuppressions.addResourceSuppressions(customResourceRole.node.tryFindChild('DefaultPolicy') as iam.Policy, [ + { + id: 'AwsSolutions-IAM5', + reason: 'The IAM role needs PassRole permission to allow Bedrock AgentCore to assume the execution role for MCP runtime operations', + appliesTo: [`Resource::${executionRoleArn}`] + } + ]); + } +} diff --git a/source/infrastructure/lib/utils/constants.ts b/source/infrastructure/lib/utils/constants.ts index 91e7c8c7..484366b0 100644 --- a/source/infrastructure/lib/utils/constants.ts +++ b/source/infrastructure/lib/utils/constants.ts @@ -6,7 +6,7 @@ import * as lambda from 'aws-cdk-lib/aws-lambda'; import * as logs from 'aws-cdk-lib/aws-logs'; import { Duration } from 'aws-cdk-lib'; -export const ANONYMOUS_METRICS_SCHEDULE = Duration.hours(3); +export const METRICS_SCHEDULE = Duration.hours(3); export const PLACEHOLDER_EMAIL = 'placeholder@example.com'; export const INTERNAL_EMAIL_DOMAIN = 'amazon'; @@ -44,7 +44,8 @@ export enum CloudWatchNamespace { USE_CASE_DEPLOYMENTS = 'Solution/UseCaseDeployments', USE_CASE_DETAILS = 'Solution/UseCaseDetails', FEEDBACK_MANAGEMENT = 'Solution/FeedbackManagement', - COLD_STARTS = 'Solution/ColdStarts' + COLD_STARTS = 'Solution/ColdStarts', + FILE_HANDLING = 'Solution/FileHandling' } export enum LLMStopReasons { @@ -119,7 +120,20 @@ export enum CloudWatchMetrics { INACCURATE_FEEDBACK_COUNT = 'InaccurateFeedbackCount', INCOMPLETE_OR_INSUFFICIENT_FEEDBACK_COUNT = 'IncompleteOrInsufficientFeedbackCount', HARMFUL_FEEDBACK_COUNT = 'HarmfulFeedbackCount', - OTHER_NEGATIVE_FEEDBACK_COUNT = 'OtherNegativeFeedbackCount' + OTHER_NEGATIVE_FEEDBACK_COUNT = 'OtherNegativeFeedbackCount', + + // Multimodal File Metrics + FILES_UPLOADED = 'FilesUploaded', + FILE_UPLOAD_FAILURE = 'FileUploadFailure', + FILE_ACCESS_FAILURES = 'FileAccessFailures', + FILE_DELETE = 'FileDelete', + FILE_DOWNLOAD = 'FileDownload', + FILE_EXTENSION = 'FileExtension', + FILE_SIZE = 'FileSize', + METADATA_UPDATE_FAILURE = 'MetadataUpdateFailure', + METADATA_VALIDATION_FAILURE = 'MetadataValidationFailure', + MULTIMODAL_DISABLED_ERROR = 'MultimodalDisabledError', + FILES_UPLOADED_WITH_EXTENSION = 'FilesExtUploaded' } export const ADDITIONAL_LLM_LIBRARIES = 'AdditionalLLMLibraries'; @@ -136,19 +150,45 @@ export enum LLM_LIBRARY_LAYER_TYPES { export const OPTIONAL_EMAIL_REGEX_PATTERN = "^$|[A-Za-z0-9_!#$%&'*+/=?`{|}~^.-]+@[A-Za-z0-9.-]+$"; export const MANDATORY_EMAIL_REGEX_PATTERN = "[A-Za-z0-9_!#$%&'*+/=?`{|}~^.-]+@[A-Za-z0-9.-]+$"; +// AWS Resource ARN patterns +export const LAMBDA_ARN_PATTERN = + '^arn:aws[a-zA-Z-]*:lambda:[a-z0-9-]+:[0-9]{12}:function:[a-zA-Z0-9-_]+(?::[a-zA-Z0-9-_]+)?$'; +export const BEDROCK_AGENTCORE_OAUTH_ARN_PATTERN = + '^arn:aws[a-zA-Z-]*:bedrock-agentcore:[a-z0-9-]+:[0-9]{12}:token-vault\\/[A-Za-z0-9._-]+\\/oauth2credentialprovider\\/[A-Za-z0-9._-]+$'; +export const BEDROCK_AGENTCORE_API_KEY_ARN_PATTERN = + '^arn:aws[a-zA-Z-]*:bedrock-agentcore:[a-z0-9-]+:[0-9]{12}:token-vault\\/[A-Za-z0-9._-]+\\/apikeycredentialprovider\\/[A-Za-z0-9._-]+$'; export enum UseCaseNames { CHAT = 'chat' } export enum USE_CASE_TYPES { TEXT = 'Text', - AGENT = 'Agent' + AGENT = 'Agent', + AGENT_BUILDER = 'AgentBuilder', + MCP_SERVER = 'MCPServer', + WORKFLOW = 'Workflow' } +export enum AGENTCORE_INSTANCE_TYPES { + RUNTIME = 'Runtime', + GATEWAY = 'Gateway' +} + +// Use case types that support chat functionality (conversation storage) +export const CHAT_ENABLED_USE_CASE_TYPES = [USE_CASE_TYPES.TEXT, USE_CASE_TYPES.AGENT]; + export enum CONVERSATION_MEMORY_TYPES { DYNAMODB = 'DynamoDB' } export const SUPPORTED_CONVERSATION_MEMORY_TYPES = [CONVERSATION_MEMORY_TYPES.DYNAMODB]; + +// Workflow-specific constants +export const WORKFLOW_MAX_SELECTED_AGENTS = 10; +export enum WORKFLOW_ORCHESTRATION_PATTERNS { + AGENT_AS_TOOLS = 'agents-as-tools' +} +export const SUPPORTED_WORKFLOW_ORCHESTRATION_PATTERNS: string[] = Object.values(WORKFLOW_ORCHESTRATION_PATTERNS); + export const DEFAULT_CONVERSATION_MEMORY_TYPE = CONVERSATION_MEMORY_TYPES.DYNAMODB; export enum KNOWLEDGE_BASE_TYPES { @@ -167,9 +207,14 @@ export enum BEDROCK_INFERENCE_TYPES { QUICK_START = 'QUICK_START', OTHER_FOUNDATION = 'OTHER_FOUNDATION', INFERENCE_PROFILE = 'INFERENCE_PROFILE', - PROVISIONED = 'PROVISIONED', + PROVISIONED = 'PROVISIONED' } -export const SUPPORTED_BEDROCK_INFERENCE_TYPES = [BEDROCK_INFERENCE_TYPES.QUICK_START, BEDROCK_INFERENCE_TYPES.OTHER_FOUNDATION, BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILE, BEDROCK_INFERENCE_TYPES.PROVISIONED]; +export const SUPPORTED_BEDROCK_INFERENCE_TYPES = [ + BEDROCK_INFERENCE_TYPES.QUICK_START, + BEDROCK_INFERENCE_TYPES.OTHER_FOUNDATION, + BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILE, + BEDROCK_INFERENCE_TYPES.PROVISIONED +]; export enum DynamoDBAttributes { CONVERSATION_TABLE_PARTITION_KEY = 'UserId', @@ -190,12 +235,13 @@ export enum DynamoDBAttributes { export const enum CHAT_PROVIDERS { BEDROCK = 'Bedrock', SAGEMAKER = 'SageMaker', - BEDROCK_AGENT = 'BedrockAgent' + BEDROCK_AGENT = 'BedrockAgent', + AGENT_CORE = 'AgentCore' } export const enum AUTHENTICATION_PROVIDERS { COGNITO = 'Cognito' } -export const SUPPORTED_CHAT_PROVIDERS = [CHAT_PROVIDERS.BEDROCK, CHAT_PROVIDERS.SAGEMAKER]; +export const SUPPORTED_CHAT_PROVIDERS = [CHAT_PROVIDERS.BEDROCK, CHAT_PROVIDERS.SAGEMAKER, CHAT_PROVIDERS.AGENT_CORE]; export const SUPPORTED_AUTHENTICATION_PROVIDERS = [AUTHENTICATION_PROVIDERS.COGNITO]; export const KENDRA_EDITIONS = ['DEVELOPER_EDITION', 'ENTERPRISE_EDITION']; @@ -203,6 +249,7 @@ export const DEFAULT_KENDRA_EDITION = 'DEVELOPER_EDITION'; export const DEFAULT_KNOWLEDGE_BASE_TYPE = KNOWLEDGE_BASE_TYPES.BEDROCK; // Environment variables used for configuring lambdas +export const POWERTOOLS_SERVICE_NAME_ENV_VAR = 'POWERTOOLS_SERVICE_NAME'; export const USE_CASE_CONFIG_RECORD_KEY_ENV_VAR = 'USE_CASE_CONFIG_RECORD_KEY'; export const USE_CASE_CONFIG_TABLE_NAME_ENV_VAR = 'USE_CASE_CONFIG_TABLE_NAME'; export const CONVERSATION_TABLE_NAME_ENV_VAR = 'CONVERSATION_TABLE_NAME'; @@ -222,9 +269,16 @@ export const TEMPLATE_FILE_EXTN_ENV_VAR = 'TEMPLATE_FILE_EXTN'; export const USE_CASE_API_KEY_SUFFIX_ENV_VAR = 'API_KEY_SUFFIX'; export const USE_CASE_UUID_ENV_VAR = 'USE_CASE_UUID'; export const WEBSOCKET_API_ID_ENV_VAR = 'WEBSOCKET_API_ID'; -export const FEEDBACK_ENABLED_ENV_VAR = 'FEEDBACK_ENABLED' +export const FEEDBACK_ENABLED_ENV_VAR = 'FEEDBACK_ENABLED'; export const REST_API_NAME_ENV_VAR = 'REST_API_NAME'; export const IS_INTERNAL_USER_ENV_VAR = 'IS_INTERNAL_USER'; +export const GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR = 'GAAB_DEPLOYMENTS_BUCKET'; +export const DEPLOYMENT_PLATFORM_STACK_NAME_ENV_VAR = 'DEPLOYMENT_PLATFORM_STACK_NAME'; +export const SHARED_ECR_CACHE_PREFIX_ENV_VAR = 'SHARED_ECR_CACHE_PREFIX'; +export const STRANDS_TOOLS_SSM_PARAM_ENV_VAR = 'STRANDS_TOOLS_SSM_PARAM'; +export const MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR = 'MULTIMODAL_METADATA_TABLE_NAME'; +export const MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR = 'MULTIMODAL_DATA_BUCKET'; +export const MULTIMODAL_ENABLED_ENV_VAR = 'MULTIMODAL_ENABLED'; // values defining defaults and requirements for parameters export const DEFAULT_NEW_KENDRA_INDEX_NAME = 'GAABKnowledgeBaseIndex'; @@ -266,8 +320,92 @@ export const INVALID_REQUEST_HEADER_RESPONSE_CODE = 403; export const CUSTOM_RULE_PRIORITY = 7; export const HEADERS_NOT_ALLOWED_KEY = 'HeadersNotAllowed'; +// ECR Pull-Through Cache constants +export const ECR_REPOSITORY_PREFIX_MAX_LENGTH = 30; +export const GAAB_AGENTS_PREFIX = 'gaab-agents-'; +export const GAAB_AGENTS_PREFIX_LENGTH = GAAB_AGENTS_PREFIX.length; // 12 characters +export const ECR_HASH_LENGTH = 6; +export const ECR_UPSTREAM_REGISTRY = 'ecr-public'; +export const ECR_UPSTREAM_REGISTRY_URL = 'public.ecr.aws'; +export const ECR_UPSTREAM_REPOSITORY_PREFIX = 'aws-solutions'; +export const GAAB_STRANDS_AGENT_IMAGE_NAME = 'gaab-strands-agent'; +export const GAAB_STRANDS_WORKFLOW_IMAGE_NAME = 'gaab-strands-workflow-agent'; + +export const DEPLOYMENTS_BUCKET_ENV_VAR = 'DEPLOYMENTS_BUCKET'; + // Feedback related constants export const FEEDBACK_REASON_OPTIONS = ['Inaccurate', 'Incomplete or insufficient', 'Harmful', 'Other']; export const MAX_REPHRASED_QUERY_LENGTH = 1000; export const MAX_COMMENT_LENGTH = 500; export const FEEDBACK_VALUES = ['positive', 'negative']; + +// File upload related constants - for MCP schema files and multimodal support +export const UPLOADED_FILE_NAME_MIN_LENGTH = 1; +export const UPLOADED_FILE_NAME_MAX_LENGTH = 255; + +// MCP (Model Context Protocol) related constants for validations & AWS Service limits +// Reference: https://docs.aws.amazon.com/bedrock-agentcore-control/latest/APIReference/API_CreateGatewayTarget.html +export const MCP_INACTIVE_SCHEMA_EXPIRATION_DAYS = 1; +export const MCP_GATEWAY_MAX_TARGETS_PER_GATEWAY = 10; +export const MCP_GATEWAY_TARGET_NAME_MAX_LENGTH = 100; +export const MCP_GATEWAY_TARGET_DESCRIPTION_MAX_LENGTH = 200; +export const MCP_GATEWAY_API_KEY_MAX_LENGTH = 65536; +export const MCP_GATEWAY_OAUTH_CLIENT_ID_MAX_LENGTH = 256; +export const MCP_GATEWAY_OAUTH_CLIENT_SECRET_MAX_LENGTH = 2048; +export const MCP_GATEWAY_TARGET_NAME_PATTERN = '([0-9a-zA-Z][-]?){1,100}'; +export const MCP_GATEWAY_TARGET_TYPES = ['openApiSchema', 'smithyModel', 'lambda']; +export const MCP_ALLOWED_FILE_EXTENSIONS = ['json', 'yaml', 'yml', 'smithy']; +export const MCP_SCHEMA_FILE_NAME_PATTERN = `^.+\\.(${MCP_ALLOWED_FILE_EXTENSIONS.join('|')})$`; +export const MCP_GATEWAY_AUTH_TYPES = ['OAUTH', 'API_KEY']; + +// OAuth configuration validation +export const OAUTH_SCOPE_MAX_LENGTH = 64; +export const OAUTH_SCOPES_MAX_COUNT = 100; +export const OAUTH_CUSTOM_PARAM_KEY_MAX_LENGTH = 256; +export const OAUTH_CUSTOM_PARAM_VALUE_MAX_LENGTH = 2048; +export const OAUTH_CUSTOM_PARAMS_MAX_COUNT = 10; + +// API Key configuration validation +export const API_KEY_PARAM_NAME_MAX_LENGTH = 64; +export const API_KEY_PREFIX_MAX_LENGTH = 64; + +// MCP Runtime environment variables validation +export const MCP_RUNTIME_ENV_VARS_MAX_COUNT = 50; + +// MCP Schema key pattern - validates paths like: mcp/schemas/smithy/e9b1801d-2516-40fe-859e-a0c7d81da2f3.smithy +export const MCP_SCHEMA_KEY_PATTERN = + '^mcp/schemas/(lambda|openApiSchema|smithyModel)/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}\\.(json|yaml|yml|smithy)$'; + +// ECR image URI pattern +export const ECR_URI_PATTERN = + '^(\\d{12})\\.dkr\\.ecr\\.([a-z\\d-]+)\\.amazonaws\\.com\\/(?=.{2,256}:)((?:[a-z\\d]+(?:[._-][a-z\\d]+)*\\/)*[a-z\\d]+(?:[._-][a-z\\d]+)*):([a-zA-Z\\d._-]{1,300})$'; + +// Agent-specific limits for schema validation +// Note: System prompt length limit not explicitly documented in AWS AgentCore docs +// This is a reasonable application-level limit for UI validation +export const AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH = 60000; + +// Multimodal file operations constants +export const MAX_FILE_UPLOADS_PER_BATCH = 25; // Up to 20 images and 5 documents allowed by Converse API +export const MAX_FILE_DELETES_PER_BATCH = 25; // DDB Batch delete can include up to 25 individual delete operations +export const UUID_PATTERN = '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$'; +export const SUPPORTED_MULTIMODAL_IMAGE_EXTENSIONS = ['png', 'jpeg', 'jpg', 'gif', 'webp']; +export const SUPPORTED_MULTIMODAL_DOCUMENT_EXTENSIONS = [ + 'pdf', + 'csv', + 'doc', + 'docx', + 'xls', + 'xlsx', + 'html', + 'txt', + 'md' +]; +export const SUPPORTED_MULTIMODAL_FILE_EXTENSIONS = [ + ...SUPPORTED_MULTIMODAL_IMAGE_EXTENSIONS, + ...SUPPORTED_MULTIMODAL_DOCUMENT_EXTENSIONS +]; +// Pattern that allows safe file names while preventing path traversal attacks +// Must end with a supported file extension and cannot contain path separators (/ or \) +export const MULTIMODAL_FILENAME_PATTERN = `^[a-zA-Z0-9](?:[a-zA-Z0-9_-]|[\x20](?=[a-zA-Z0-9_-]))*\.(${SUPPORTED_MULTIMODAL_FILE_EXTENSIONS.join('|')})$`; +export const MULTIMODAL_FILE_EXPIRATION_DAYS = 2; diff --git a/source/infrastructure/lib/utils/custom-infra-setup.ts b/source/infrastructure/lib/utils/custom-infra-setup.ts index 2b0be1fd..ccc93889 100644 --- a/source/infrastructure/lib/utils/custom-infra-setup.ts +++ b/source/infrastructure/lib/utils/custom-infra-setup.ts @@ -11,7 +11,7 @@ import { Construct } from 'constructs'; import { ApplicationAssetBundler } from '../framework/bundler/asset-options-factory'; import * as cfn_guard from '../utils/cfn-guard-suppressions'; import { createCustomResourceForLambdaLogRetention, createDefaultLambdaRole } from './common-utils'; -import { ANONYMOUS_METRICS_SCHEDULE, COMMERCIAL_REGION_LAMBDA_PYTHON_RUNTIME } from './constants'; +import { METRICS_SCHEDULE, COMMERCIAL_REGION_LAMBDA_PYTHON_RUNTIME } from './constants'; export interface CustomInfraProps { /** @@ -24,11 +24,6 @@ export interface CustomInfraProps { */ solutionVersion: string; - /** - * Condition to determine if anonymous metrics should be collected - */ - sendAnonymousMetricsCondition: cdk.CfnCondition; - /** * Use case UUID passed as CFN parameter */ @@ -92,7 +87,7 @@ export class CustomInfraSetup extends Construct { const scheduledMetricsRole = createDefaultLambdaRole(scope, 'ScheduledMetricsLambdaRole'); - this.scheduledMetricsLambda = new lambda.Function(this, 'ScheduledAnonymousMetrics', { + this.scheduledMetricsLambda = new lambda.Function(this, 'ScheduledMetrics', { code: lambda.Code.fromAsset( '../lambda/custom-resource', ApplicationAssetBundler.assetBundlerFactory() @@ -105,7 +100,7 @@ export class CustomInfraSetup extends Construct { tracing: lambda.Tracing.ACTIVE, description: 'A lambda function that runs as per defined schedule to publish metrics', environment: { - POWERTOOLS_SERVICE_NAME: 'ANONYMOUS-CW-METRICS', + POWERTOOLS_SERVICE_NAME: 'CW-METRICS', SOLUTION_ID: props.solutionID, SOLUTION_VERSION: props.solutionVersion, ...(props.useCaseUUID && { USE_CASE_UUID_ENV_VAR: props.useCaseUUID }) @@ -113,7 +108,7 @@ export class CustomInfraSetup extends Construct { timeout: cdk.Duration.minutes(15) }); - const logRetentionForSchedule = createCustomResourceForLambdaLogRetention( + createCustomResourceForLambdaLogRetention( this, 'ScheduleLogRetention', this.scheduledMetricsLambda.functionName, @@ -131,33 +126,14 @@ export class CustomInfraSetup extends Construct { this.scheduledMetricsLambda.role!.attachInlinePolicy(getMetricsDataPolicy); - (this.scheduledMetricsLambda.node.tryFindChild('Resource') as cdk.CfnCustomResource).cfnOptions.condition = - props.sendAnonymousMetricsCondition; - (logRetentionForSchedule.node.defaultChild as cdk.CfnCustomResource).cfnOptions.condition = - props.sendAnonymousMetricsCondition; - - // eventbridge rule to the default event-bus to push anonymous metrics + // eventbridge rule to the default event-bus to push metrics const rule = new events.Rule(this, 'MetricsPublishFrequency', { - schedule: events.Schedule.rate(ANONYMOUS_METRICS_SCHEDULE) + schedule: events.Schedule.rate(METRICS_SCHEDULE) }); - (rule.node.tryFindChild('Resource') as cdk.CfnCustomResource).cfnOptions.condition = - props.sendAnonymousMetricsCondition; const ruleTarget = new LambdaFunction(this.scheduledMetricsLambda); rule.addTarget(ruleTarget); - if ( - rule.node.tryFindChild( - 'AllowEventRuleDeploymentPlatformStackDeploymentPlatformSetupInfraSetupScheduledAnonymousMetricsCE3BF485' - ) - ) { - ( - rule.node.tryFindChild( - 'AllowEventRuleDeploymentPlatformStackDeploymentPlatformSetupInfraSetupScheduledAnonymousMetricsCE3BF485' - ) as cdk.CfnCustomResource - ).cfnOptions.condition = props.sendAnonymousMetricsCondition; - } - NagSuppressions.addResourceSuppressions(getMetricsDataPolicy, [ { id: 'AwsSolutions-IAM5', diff --git a/source/infrastructure/lib/utils/solution-helper.ts b/source/infrastructure/lib/utils/solution-helper.ts index 8a349158..cc0b5d7c 100644 --- a/source/infrastructure/lib/utils/solution-helper.ts +++ b/source/infrastructure/lib/utils/solution-helper.ts @@ -9,7 +9,7 @@ import { Construct } from 'constructs'; export interface SolutionHelperProps { /** - * The custom resource lambda function to be used for pushing anonymous metrics data + * The custom resource lambda function to be used for pushing metrics data */ customResource: lambda.Function; @@ -23,11 +23,6 @@ export interface SolutionHelperProps { */ version: string; - /** - * Condition to determine if anonymous metrics should be collected - */ - sendAnonymousMetricsCondition: cdk.CfnCondition; - /** * additional resource properties that should be passed to the solution helper */ @@ -35,18 +30,18 @@ export interface SolutionHelperProps { } /** - * This construct creates the custom resource required to publish anonymous metrics data to the solution builder + * This construct creates the custom resource required to publish metrics data to the solution builder * endpoint */ export class SolutionHelper extends Construct { constructor(scope: Construct, id: string, props: SolutionHelperProps) { super(scope, id); - const anonymousData = new cdk.CustomResource(this, 'AnonymousData', { - resourceType: 'Custom::AnonymousData', + new cdk.CustomResource(this, 'Data', { + resourceType: 'Custom::Data', serviceToken: props.customResource.functionArn, properties: { - Resource: 'ANONYMOUS_METRIC', + Resource: 'METRIC', SolutionId: props.solutionID, Version: props.version, ...(props.resourceProperties && props.resourceProperties) // NOSONAR - use of `&&` in conjunction with spread operator. @@ -67,8 +62,5 @@ export class SolutionHelper extends Construct { ddbPolicy.attachToRole(props.customResource.role!); } - - (anonymousData.node.tryFindChild('Default') as cdk.CfnCustomResource).cfnOptions.condition = - props.sendAnonymousMetricsCondition; } } diff --git a/source/infrastructure/lib/vpc/agent-builder-vpc.ts b/source/infrastructure/lib/vpc/agent-builder-vpc.ts new file mode 100644 index 00000000..f46f5e11 --- /dev/null +++ b/source/infrastructure/lib/vpc/agent-builder-vpc.ts @@ -0,0 +1,55 @@ +#!/usr/bin/env node + +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { CustomVPC, CustomVPCProps } from './custom-vpc'; + +/** + * VPC for AgentBuilder use case deployment + * + * IMPORTANT: Amazon Bedrock AgentCore (preview service) does not currently support VPC deployments. + * This class creates minimal VPC infrastructure for future compatibility only. + * No actual VPC resources are created to avoid unnecessary costs and complexity. + * VPC support will be added in future releases of Amazon Bedrock AgentCore. + */ +export class AgentBuilderVPC extends CustomVPC { + constructor(scope: any, id: string, props: CustomVPCProps) { + super(scope, id, props); + + // Agent Core v4.0.0 does not support VPC deployments + // Create minimal infrastructure for future compatibility + this.createMinimalInfrastructure(); + this.setMinimalOutputs(); + } + + /** + * Create minimal infrastructure for Amazon Bedrock AgentCore + * Since Amazon Bedrock AgentCore (preview) doesn't support VPC deployments, we create no actual resources + */ + private createMinimalInfrastructure(): void { + // No VPC resources created for Amazon Bedrock AgentCore preview + // This method exists for future compatibility when VPC support is added + // Future releases will create: + // - VPC with subnets + // - Security groups + // - VPC endpoints for bedrock-agentcore service + // - Private connectivity to Amazon Bedrock AgentCore Runtime + } + + /** + * Set minimal outputs for Agent Core v4.0.0 + * No outputs needed since Agent Core runs in non-VPC mode + */ + private setMinimalOutputs(): void { + // No outputs needed for Agent Core v4.0.0 + // Agent Core components run in non-VPC mode and don't require VPC configuration + } + + /** + * Returns the stack type that the VPC is being used in + */ + public getStackType(): string { + return 'agent-builder'; + } +} diff --git a/source/infrastructure/lib/vpc/vpc-setup.ts b/source/infrastructure/lib/vpc/vpc-setup.ts index a7e965b7..3a87b033 100644 --- a/source/infrastructure/lib/vpc/vpc-setup.ts +++ b/source/infrastructure/lib/vpc/vpc-setup.ts @@ -6,6 +6,7 @@ import * as cdk from 'aws-cdk-lib'; import * as s3 from 'aws-cdk-lib/aws-s3'; import { Construct } from 'constructs'; import { BaseStackProps } from '../framework/base-stack'; +import { AgentBuilderVPC } from './agent-builder-vpc'; import { BedrockAgentVPC } from './bedrock-agent-vpc'; import { BedrockUseCaseVPC } from './bedrock-vpc'; import { CustomVPC } from './custom-vpc'; @@ -125,6 +126,22 @@ export class VPCSetup extends Construct { }); break; } + case 'agent-builder': { + // Create minimal VPC stack for interface compatibility only + this.nestedVPCStack = new AgentBuilderVPC(this, 'AgentBuilderVPC', { + description: `Minimal VPC stack for AgentBuilder (Amazon Bedrock AgentCore runs in non-VPC mode) - Version ${props.solutionVersion}`, + parameters: coreParameters + }); + break; + } + case 'workflow': { + // Create minimal VPC stack for Workflow for interface compatibility only + this.nestedVPCStack = new AgentBuilderVPC(this, 'WorkflowVPC', { + description: `Minimal VPC stack for Workflow (Amazon Bedrock AgentCore runs in non-VPC mode) - Version ${props.solutionVersion}`, + parameters: coreParameters + }); + break; + } default: { throw new Error('Invalid VPC config'); } diff --git a/source/infrastructure/package-lock.json b/source/infrastructure/package-lock.json index a9f8cc1b..e89f55ec 100644 --- a/source/infrastructure/package-lock.json +++ b/source/infrastructure/package-lock.json @@ -1,18 +1,18 @@ { "name": "@amzn/gen-ai-app-builder-on-aws-infrastructure", - "version": "3.0.7", + "version": "4.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@amzn/gen-ai-app-builder-on-aws-infrastructure", - "version": "3.0.7", + "version": "4.0.0", "license": "Apache-2.0", "dependencies": { - "@aws-cdk/aws-servicecatalogappregistry-alpha": "2.167.2-alpha.0", "@aws-solutions-constructs/aws-apigateway-lambda": "^2.74.0", "@aws-solutions-constructs/aws-apigatewayv2websocket-sqs": "^2.74.0", "@aws-solutions-constructs/aws-cloudfront-s3": "^2.74.0", + "@aws-solutions-constructs/aws-constructs-factories": "^2.92.1", "@aws-solutions-constructs/aws-lambda-dynamodb": "^2.74.0", "@aws-solutions-constructs/aws-sqs-lambda": "^2.74.0", "@aws-solutions-constructs/aws-wafwebacl-apigateway": "^2.74.0", @@ -42,7 +42,7 @@ "jsonschema": "^1.4.1", "mock-fs": "^5.4.1", "portfinder": "1.0.32", - "prettier": "^3.4.2", + "prettier": "^3.6.2", "ts-jest": "^29.2.4", "ts-node": "^10.9.2", "typescript": "^5.7.2" @@ -72,18 +72,6 @@ "resolved": "https://registry.npmjs.org/@aws-cdk/asset-node-proxy-agent-v6/-/asset-node-proxy-agent-v6-2.1.0.tgz", "integrity": "sha512-7bY3J8GCVxLupn/kNmpPc5VJz8grx+4RKfnnJiO1LG+uxkZfANZG3RMHhE+qQxxwkyQ9/MfPtTpf748UhR425A==" }, - "node_modules/@aws-cdk/aws-servicecatalogappregistry-alpha": { - "version": "2.167.2-alpha.0", - "resolved": "https://registry.npmjs.org/@aws-cdk/aws-servicecatalogappregistry-alpha/-/aws-servicecatalogappregistry-alpha-2.167.2-alpha.0.tgz", - "integrity": "sha512-g10nDQwePk5xKf8fRvF4QcgjfLwvqfjFN8K2eQl7G3qqNOPLGzfO7NFKxE2puPbZ6kyUON5ADjI6Ly81d+XzMQ==", - "engines": { - "node": ">= 14.15.0" - }, - "peerDependencies": { - "aws-cdk-lib": "^2.167.2", - "constructs": "^10.0.0" - } - }, "node_modules/@aws-cdk/cloud-assembly-schema": { "version": "48.3.0", "resolved": "https://registry.npmjs.org/@aws-cdk/cloud-assembly-schema/-/cloud-assembly-schema-48.3.0.tgz", @@ -179,6 +167,69 @@ "constructs": "^10.0.0" } }, + "node_modules/@aws-solutions-constructs/aws-constructs-factories": { + "version": "2.92.1", + "resolved": "https://registry.npmjs.org/@aws-solutions-constructs/aws-constructs-factories/-/aws-constructs-factories-2.92.1.tgz", + "integrity": "sha512-06WrdVlKu1qPVZBMP8zM6W9tiTGROVw/0/QLyoYRI68G/1vQPBwTOa5IyLk4AXQZfnQk29sGhpVINdf+0w7tGA==", + "license": "Apache-2.0", + "dependencies": { + "@aws-solutions-constructs/core": "2.92.1", + "constructs": "^10.0.0" + }, + "peerDependencies": { + "@aws-solutions-constructs/core": "2.92.1", + "aws-cdk-lib": "^2.212.0", + "constructs": "^10.0.0" + } + }, + "node_modules/@aws-solutions-constructs/aws-constructs-factories/node_modules/@aws-solutions-constructs/core": { + "version": "2.92.1", + "resolved": "https://registry.npmjs.org/@aws-solutions-constructs/core/-/core-2.92.1.tgz", + "integrity": "sha512-qLWlGV+VcQ0BMzt0ADKz8T1burzBMEJvjY62JTTQaa9VhOkqxRqK8Yk9xtinS5mbf+d7EkV9OJCKpVJ7QcwngQ==", + "bundleDependencies": [ + "deepmerge", + "npmlog", + "deep-diff" + ], + "license": "Apache-2.0", + "dependencies": { + "constructs": "^10.0.0", + "deep-diff": "^1.0.2", + "deepmerge": "^4.0.0", + "npmlog": "^7.0.0" + }, + "peerDependencies": { + "aws-cdk-lib": "^2.212.0", + "constructs": "^10.0.0" + } + }, + "node_modules/@aws-solutions-constructs/aws-constructs-factories/node_modules/@aws-solutions-constructs/core/node_modules/deep-diff": { + "version": "1.0.2", + "inBundle": true, + "license": "MIT" + }, + "node_modules/@aws-solutions-constructs/aws-constructs-factories/node_modules/@aws-solutions-constructs/core/node_modules/deepmerge": { + "version": "4.3.1", + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@aws-solutions-constructs/aws-constructs-factories/node_modules/@aws-solutions-constructs/core/node_modules/npmlog": { + "version": "7.0.1", + "inBundle": true, + "license": "ISC", + "dependencies": { + "are-we-there-yet": "^4.0.0", + "console-control-strings": "^1.1.0", + "gauge": "^5.0.0", + "set-blocking": "^2.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, "node_modules/@aws-solutions-constructs/aws-lambda-dynamodb": { "version": "2.74.0", "resolved": "https://registry.npmjs.org/@aws-solutions-constructs/aws-lambda-dynamodb/-/aws-lambda-dynamodb-2.74.0.tgz", @@ -1189,10 +1240,11 @@ } }, "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -2153,9 +2205,9 @@ } }, "node_modules/aws-cdk-lib": { - "version": "2.210.0", - "resolved": "https://registry.npmjs.org/aws-cdk-lib/-/aws-cdk-lib-2.210.0.tgz", - "integrity": "sha512-TSXaZLHKak1bk144PblhqBHZwyh9lvlN2yM4g52aR/HCuxlw2nkn+eupjuOtiNe0JJFwSudvO/qke65dN2WlEg==", + "version": "2.212.0", + "resolved": "https://registry.npmjs.org/aws-cdk-lib/-/aws-cdk-lib-2.212.0.tgz", + "integrity": "sha512-7vy3/fSwmkJe6hmPpX2DXeDIr/VhMjhOPRH4Y0IUjC0c+W6S4XwQU2urRq3DFJRKRWXDwKidqMZlF1m0ZY1wMw==", "bundleDependencies": [ "@balena/dockerignore", "case", @@ -2173,10 +2225,10 @@ "dependencies": { "@aws-cdk/asset-awscli-v1": "2.2.242", "@aws-cdk/asset-node-proxy-agent-v6": "^2.1.0", - "@aws-cdk/cloud-assembly-schema": "^48.2.0", + "@aws-cdk/cloud-assembly-schema": "^48.3.0", "@balena/dockerignore": "^1.0.2", "case": "1.6.3", - "fs-extra": "^11.3.0", + "fs-extra": "^11.3.1", "ignore": "^5.3.2", "jsonschema": "^1.5.0", "mime-types": "^2.1.35", @@ -2312,7 +2364,7 @@ "license": "BSD-3-Clause" }, "node_modules/aws-cdk-lib/node_modules/fs-extra": { - "version": "11.3.0", + "version": "11.3.1", "inBundle": true, "license": "MIT", "dependencies": { @@ -2351,7 +2403,7 @@ "license": "MIT" }, "node_modules/aws-cdk-lib/node_modules/jsonfile": { - "version": "6.1.0", + "version": "6.2.0", "inBundle": true, "license": "MIT", "dependencies": { @@ -4553,9 +4605,9 @@ "dev": true }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -5151,9 +5203,9 @@ } }, "node_modules/prettier": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.4.2.tgz", - "integrity": "sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ==", + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", "dev": true, "bin": { "prettier": "bin/prettier.cjs" diff --git a/source/infrastructure/package.json b/source/infrastructure/package.json index 9936ed2c..777a6d1f 100644 --- a/source/infrastructure/package.json +++ b/source/infrastructure/package.json @@ -1,6 +1,6 @@ { "name": "@amzn/gen-ai-app-builder-on-aws-infrastructure", - "version": "3.0.7", + "version": "4.0.0", "bin": { "infrastructure": "bin/gen-ai-app-builder.js" }, @@ -11,10 +11,13 @@ "test-debug": "jest --coverage --runInBand", "unit-test": "jest --coverage --silent --verbose --runInBand --testPathIgnorePatterns=integration", "integ-test": "jest --coverage --silent --verbose --runInBand -- integration", - "cdk": "./node_modules/cdk/bin/cdk", + "test:changed": "jest $(git status --porcelain | grep -E '\\.(test|spec)\\.(js|ts|jsx|tsx)$' | awk '{print $2}' | tr '\\n' ' ')", + "cdk": "cdk", + "synth": "cdk synth", "clean": "rm -rf node_modules", "code-linter": "./node_modules/eslint/bin/eslint.js . -c .eslintrc.js --ext .ts", - "code-formatter": "./node_modules/prettier/bin/prettier.cjs . --config ../.prettierrc.yml --ignore-path ../.prettierignore '**/*.{ts,json,css,md}' !**/*.js --write" + "code-formatter": "git -C ../.. diff --name-only --diff-filter=AM HEAD | grep -E '\\.(js|ts|json|css|md)$' | grep '^source/infrastructure/' | sed 's|^source/infrastructure/||' | xargs -r prettier --config ../../.prettierrc.yml --ignore-path ../../.prettierignore --write", + "code-formatter-changed": "git -C ../.. diff --name-only --diff-filter=AM HEAD | grep -E '\\.(js|ts|json|css|md)$' | grep '^source/infrastructure/' | sed 's|^source/infrastructure/||' | xargs -r prettier --config ../../.prettierrc.yml --ignore-path ../../.prettierignore --write" }, "devDependencies": { "@types/jest": "^29.5.14", @@ -26,16 +29,16 @@ "jsonschema": "^1.4.1", "mock-fs": "^5.4.1", "portfinder": "1.0.32", - "prettier": "^3.4.2", + "prettier": "^3.6.2", "ts-jest": "^29.2.4", "ts-node": "^10.9.2", "typescript": "^5.7.2" }, "dependencies": { - "@aws-cdk/aws-servicecatalogappregistry-alpha": "2.167.2-alpha.0", "@aws-solutions-constructs/aws-apigateway-lambda": "^2.74.0", "@aws-solutions-constructs/aws-apigatewayv2websocket-sqs": "^2.74.0", "@aws-solutions-constructs/aws-cloudfront-s3": "^2.74.0", + "@aws-solutions-constructs/aws-constructs-factories": "^2.92.1", "@aws-solutions-constructs/aws-lambda-dynamodb": "^2.74.0", "@aws-solutions-constructs/aws-sqs-lambda": "^2.74.0", "@aws-solutions-constructs/aws-wafwebacl-apigateway": "^2.74.0", diff --git a/source/infrastructure/test/api/deployment-platform-rest-api-helper.test.ts b/source/infrastructure/test/api/deployment-platform-rest-api-helper.test.ts new file mode 100644 index 00000000..2355066d --- /dev/null +++ b/source/infrastructure/test/api/deployment-platform-rest-api-helper.test.ts @@ -0,0 +1,461 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as api from 'aws-cdk-lib/aws-apigateway'; +import * as lambda from 'aws-cdk-lib/aws-lambda'; +import { JsonSchemaType } from 'aws-cdk-lib/aws-apigateway'; + +import { Template } from 'aws-cdk-lib/assertions'; + +import { + DeploymentRestApiHelper, + DeploymentApiContext, + DeploymentSchema +} from '../../lib/api/deployment-platform-rest-api-helper'; +import { COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME } from '../../lib/utils/constants'; + +describe('DeploymentRestApiHelper', () => { + let stack: cdk.Stack; + let restApi: api.RestApi; + let parentResource: api.Resource; + let mockLambda: lambda.Function; + let mockAuthorizer: api.IAuthorizer; + let requestValidator: api.RequestValidator; + let context: DeploymentApiContext; + + beforeEach(() => { + stack = new cdk.Stack(); + + // Create mock lambda + mockLambda = new lambda.Function(stack, 'MockLambda', { + code: lambda.Code.fromAsset('../infrastructure/test/mock-lambda-func/node-lambda'), + runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, + handler: 'index.handler' + }); + + // Create REST API + restApi = new api.RestApi(stack, 'TestApi', { + restApiName: 'test-api' + }); + + // Create parent resource + parentResource = restApi.root.addResource('test'); + + // Add a dummy method to make the REST API valid for CDK synthesis + parentResource.addMethod( + 'GET', + new api.MockIntegration({ + integrationResponses: [{ statusCode: '200' }], + passthroughBehavior: api.PassthroughBehavior.NEVER, + requestTemplates: { + 'application/json': '{"statusCode": 200}' + } + }), + { + methodResponses: [{ statusCode: '200' }] + } + ); + + // Create request validator + requestValidator = new api.RequestValidator(stack, 'MockValidator', { + restApi: restApi, + validateRequestBody: true, + validateRequestParameters: true + }); + + // Create mock authorizer (using interface to avoid CDK validation issues in tests) + mockAuthorizer = { + authorizerId: 'test-authorizer-id', + authorizationType: api.AuthorizationType.CUSTOM + } as api.IAuthorizer; + + // Create context + context = { + scope: stack, + requestValidator: requestValidator, + authorizer: mockAuthorizer, + integration: new api.LambdaIntegration(mockLambda) + }; + }); + + describe('configureCors', () => { + it('should configure CORS with specified methods', () => { + const resource = parentResource.addResource('cors-test'); + const allowedMethods = ['GET', 'POST', 'OPTIONS']; + + // This should not throw an error + expect(() => { + DeploymentRestApiHelper.configureCors(resource, allowedMethods); + }).not.toThrow(); + + // Verify the resource has CORS configured by checking it has the addCorsPreflight method called + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'OPTIONS' + }); + }); + }); + + describe('createModel', () => { + it('should create an API Gateway model with correct properties', () => { + const modelName = 'TestModel'; + const description = 'Test model description'; + const schema = { + type: JsonSchemaType.OBJECT, + properties: { + name: { type: JsonSchemaType.STRING } + } + }; + + const model = DeploymentRestApiHelper.createModel(context, restApi, modelName, description, schema); + + expect(model).toBeDefined(); + expect(model).toBeInstanceOf(api.Model); + + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::ApiGateway::Model', { + ContentType: 'application/json', + Description: description, + Name: `${modelName}Model`, + Schema: schema + }); + }); + }); + + describe('createResourceStructure', () => { + it('should create collection and item resources with CORS', () => { + const collectionPath = 'items'; + const singularName = 'item-id'; + + const { collectionResource, itemResource } = DeploymentRestApiHelper.createResourceStructure( + parentResource, + collectionPath, + singularName + ); + + expect(collectionResource).toBeDefined(); + expect(itemResource).toBeDefined(); + expect(collectionResource).toBeInstanceOf(api.Resource); + expect(itemResource).toBeInstanceOf(api.Resource); + + const template = Template.fromStack(stack); + + // Verify collection resource + template.hasResourceProperties('AWS::ApiGateway::Resource', { + PathPart: collectionPath + }); + + // Verify item resource with path parameter + template.hasResourceProperties('AWS::ApiGateway::Resource', { + PathPart: '{item-id}' + }); + + // Verify CORS methods are created + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'OPTIONS' + }); + }); + }); + + describe('createCrudModels', () => { + it('should create all CRUD models when schemas provided', () => { + const operationPrefix = 'TestEntity'; + const schemas: DeploymentSchema = { + deploy: { type: JsonSchemaType.OBJECT, properties: { name: { type: JsonSchemaType.STRING } } }, + deployResponse: { type: JsonSchemaType.OBJECT, properties: { id: { type: JsonSchemaType.STRING } } }, + update: { type: JsonSchemaType.OBJECT, properties: { name: { type: JsonSchemaType.STRING } } }, + updateResponse: { type: JsonSchemaType.OBJECT, properties: { id: { type: JsonSchemaType.STRING } } } + }; + + const models = DeploymentRestApiHelper.createCrudModels(context, restApi, operationPrefix, schemas); + + expect(models.createRequestModel).toBeDefined(); + expect(models.createResponseModel).toBeDefined(); + expect(models.updateRequestModel).toBeDefined(); + expect(models.updateResponseModel).toBeDefined(); + + // Verify models are created in CloudFormation + const template = Template.fromStack(stack); + template.resourceCountIs('AWS::ApiGateway::Model', 4); + }); + + it('should return empty object when no schemas provided', () => { + const operationPrefix = 'TestEntity'; + + const models = DeploymentRestApiHelper.createCrudModels(context, restApi, operationPrefix); + + expect(Object.keys(models)).toHaveLength(0); + }); + + it('should create only specified models when partial schemas provided', () => { + const operationPrefix = 'TestEntity'; + const schemas: DeploymentSchema = { + deploy: { type: JsonSchemaType.OBJECT, properties: { name: { type: JsonSchemaType.STRING } } } + }; + + const models = DeploymentRestApiHelper.createCrudModels(context, restApi, operationPrefix, schemas); + + expect(models.createRequestModel).toBeDefined(); + expect(models.createResponseModel).toBeUndefined(); + expect(models.updateRequestModel).toBeUndefined(); + expect(models.updateResponseModel).toBeUndefined(); + + const template = Template.fromStack(stack); + template.resourceCountIs('AWS::ApiGateway::Model', 1); + }); + }); + + describe('addCrudOperations', () => { + let collectionResource: api.Resource; + let itemResource: api.Resource; + + beforeEach(() => { + const structure = DeploymentRestApiHelper.createResourceStructure(parentResource, 'entities', 'entity-id'); + collectionResource = structure.collectionResource; + itemResource = structure.itemResource; + }); + + it('should create all CRUD methods with correct operation names', () => { + const operationPrefix = 'Entity'; + + const resources = DeploymentRestApiHelper.addCrudOperations( + context, + collectionResource, + itemResource, + operationPrefix + ); + + expect(resources).toHaveLength(2); + expect(resources[0]).toBe(collectionResource); + expect(resources[1]).toBe(itemResource); + + // Verify GET /entities (list) + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'GET', + OperationName: 'GetEntitys', + RequestParameters: { + 'method.request.header.authorization': true, + 'method.request.querystring.pageNumber': true, + 'method.request.querystring.searchFilter': false + } + }); + + // Verify POST /entities (create) + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'POST', + OperationName: 'DeployEntity' + }); + + // Verify GET /entities/{id} (get) + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'GET', + OperationName: 'GetEntity' + }); + + // Verify PATCH /entities/{id} (update) + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'PATCH', + OperationName: 'UpdateEntity' + }); + + // Verify DELETE /entities/{id} (delete) + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'DELETE', + OperationName: 'DeleteEntity', + RequestParameters: { + 'method.request.header.authorization': true, + 'method.request.querystring.permanent': false + } + }); + }); + + it('should create CRUD operations with models when schemas provided', () => { + const operationPrefix = 'Entity'; + const schemas: DeploymentSchema = { + deploy: { type: JsonSchemaType.OBJECT, properties: { name: { type: JsonSchemaType.STRING } } }, + deployResponse: { type: JsonSchemaType.OBJECT, properties: { id: { type: JsonSchemaType.STRING } } } + }; + + DeploymentRestApiHelper.addCrudOperations( + context, + collectionResource, + itemResource, + operationPrefix, + restApi, + schemas + ); + + // Verify models are created + const template = Template.fromStack(stack); + template.resourceCountIs('AWS::ApiGateway::Model', 2); + + // Check what models were created + const models = template.findResources('AWS::ApiGateway::Model'); + const modelNames = Object.keys(models).map((key) => models[key].Properties?.Name); + expect(modelNames).toContain('DeployEntityApiBodyModel'); + expect(modelNames).toContain('DeployEntityResponseModel'); + + // Check how many methods are created and what they are + const methods = template.findResources('AWS::ApiGateway::Method'); + + // We should have 5 CRUD methods + OPTIONS methods + expect(Object.keys(methods)).toHaveLength(8); // 5 CRUD + 3 OPTIONS (collection, item, and one more) + + // Find the POST method and check its properties + const postMethodKey = Object.keys(methods).find( + (key) => + methods[key].Properties?.HttpMethod === 'POST' && + methods[key].Properties?.OperationName === 'DeployEntity' + ); + + expect(postMethodKey).toBeDefined(); + + const postMethod = methods[postMethodKey!]; + + // Check if it has request models + expect(postMethod.Properties?.RequestModels).toBeDefined(); + expect(postMethod.Properties?.RequestModels?.['application/json']).toBeDefined(); + + // Verify the request model reference + const requestModelRef = postMethod.Properties?.RequestModels?.['application/json']?.Ref; + expect(requestModelRef).toMatch(/DeployEntityApiBodyModel/); + }); + }); + + describe('addCustomEndpoint', () => { + it('should add method directly to resource when no customPath provided', () => { + const testResource = parentResource.addResource('endpoint-test'); + const resource = DeploymentRestApiHelper.addCustomEndpoint(context, testResource, 'POST', 'TestOperation'); + + expect(resource).toBe(testResource); + + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'POST', + OperationName: 'TestOperation', + RequestParameters: { + 'method.request.header.authorization': true + } + }); + }); + + it('should create new sub-resource when customPath provided', () => { + const customPath = 'custom-endpoint'; + + const resource = DeploymentRestApiHelper.addCustomEndpoint( + context, + parentResource, + 'POST', + 'CustomOperation', + customPath + ); + + expect(resource).not.toBe(parentResource); + + // Verify new resource is created + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::ApiGateway::Resource', { + PathPart: customPath + }); + + // Verify method is added to new resource + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'POST', + OperationName: 'CustomOperation' + }); + }); + + it('should support additional parameters', () => { + const testResource = parentResource.addResource('params-test'); + const additionalParams = { + 'method.request.querystring.filter': true, + 'method.request.header.custom': false + }; + + DeploymentRestApiHelper.addCustomEndpoint( + context, + testResource, + 'GET', + 'TestWithParams', + undefined, + additionalParams + ); + + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'GET', + RequestParameters: { + 'method.request.header.authorization': true, + 'method.request.querystring.filter': true, + 'method.request.header.custom': false + } + }); + }); + }); + + describe('collectResourcePaths', () => { + it('should collect resource paths correctly', () => { + const resource1 = parentResource.addResource('path1'); + const resource2 = resource1.addResource('path2'); + const resource3 = parentResource.addResource('path3'); + + const resources = [resource1, resource2, resource3]; + const paths = DeploymentRestApiHelper.collectResourcePaths(resources); + + expect(paths).toEqual(['test/path1', 'test/path1/path2', 'test/path3']); + }); + + it('should handle empty resource array', () => { + const paths = DeploymentRestApiHelper.collectResourcePaths([]); + expect(paths).toEqual([]); + }); + }); + + describe('integration tests', () => { + it('should create a complete API structure with CRUD and custom endpoints', () => { + // Create resource structure + const { collectionResource, itemResource } = DeploymentRestApiHelper.createResourceStructure( + parentResource, + 'products', + 'product-id' + ); + + // Add CRUD operations + const crudResources = DeploymentRestApiHelper.addCrudOperations( + context, + collectionResource, + itemResource, + 'Product' + ); + + // Add custom endpoints + const uploadResource = DeploymentRestApiHelper.addCustomEndpoint( + context, + collectionResource, + 'POST', + 'UploadProducts', + 'upload' + ); + + const exportResource = DeploymentRestApiHelper.addCustomEndpoint( + context, + collectionResource, + 'GET', + 'ExportProducts', + 'export' + ); + + // Verify all resources are created + expect(crudResources).toHaveLength(2); + expect(uploadResource).toBeDefined(); + expect(exportResource).not.toBe(collectionResource); + + // Verify complete API structure + const template = Template.fromStack(stack); + template.resourceCountIs('AWS::ApiGateway::Resource', 5); // root + test + products + {product-id} + upload + export + template.resourceCountIs('AWS::ApiGateway::Method', 12); // 5 CRUD + 2 custom + 1 dummy + 4 OPTIONS + }); + }); +}); diff --git a/source/infrastructure/test/api/deployment-platform-rest-endpoint.test.ts b/source/infrastructure/test/api/deployment-platform-rest-endpoint.test.ts index f40cc3d3..53250473 100644 --- a/source/infrastructure/test/api/deployment-platform-rest-endpoint.test.ts +++ b/source/infrastructure/test/api/deployment-platform-rest-endpoint.test.ts @@ -3,7 +3,6 @@ import * as cdk from 'aws-cdk-lib'; import * as api from 'aws-cdk-lib/aws-apigateway'; -import * as cognito from 'aws-cdk-lib/aws-cognito'; import * as lambda from 'aws-cdk-lib/aws-lambda'; import { Capture, Match, Template } from 'aws-cdk-lib/assertions'; @@ -36,6 +35,13 @@ describe('When creating rest endpoints', () => { new DeploymentPlatformRestEndpoint(stack, 'TestEndpointCreation', { useCaseManagementAPILambda: new lambda.Function(stack, 'MockGetRequestFunction', mockLambdaFuncProps), modelInfoApiLambda: new lambda.Function(stack, 'MockModelInfoFunction', mockLambdaFuncProps), + mcpManagementAPILambda: new lambda.Function(stack, 'MockMCPManagementFunction', mockLambdaFuncProps), + agentManagementAPILambda: new lambda.Function(stack, 'MockAgentManagementFunction', mockLambdaFuncProps), + workflowManagementAPILambda: new lambda.Function( + stack, + 'MockWorkflowManagementFunction', + mockLambdaFuncProps + ), deploymentPlatformAuthorizer: testAuthorizer }); @@ -114,7 +120,7 @@ describe('When creating rest endpoints', () => { const restApiStageCapture = new Capture(); const lambdaCapture = new Capture(); - template.resourceCountIs('AWS::Lambda::Permission', 19); + template.resourceCountIs('AWS::Lambda::Permission', 51); template.hasResourceProperties('AWS::Lambda::Permission', { Action: 'lambda:InvokeFunction', FunctionName: { @@ -603,10 +609,11 @@ describe('When creating rest endpoints', () => { 'OrStatement': { 'Statements': [ { - 'ByteMatchStatement': { - 'FieldToMatch': { 'UriPath': {} }, - 'PositionalConstraint': 'ENDS_WITH', - 'SearchString': '/deployments', + 'RegexMatchStatement': { + 'FieldToMatch': { + 'UriPath': {} + }, + 'RegexString': '/deployments(/mcp|/agents|/workflows)?$', 'TextTransformations': [{ 'Priority': 0, 'Type': 'NONE' }] } }, @@ -616,7 +623,7 @@ describe('When creating rest endpoints', () => { 'UriPath': {} }, 'RegexString': - '/deployments/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$', + '/deployments(/mcp|/agents|/workflows)?/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$', 'TextTransformations': [{ 'Priority': 0, 'Type': 'NONE' }] } } @@ -679,7 +686,7 @@ describe('When creating rest endpoints', () => { it('should create deployments path based resources', () => { const restApiCapture = new Capture(); - template.resourceCountIs('AWS::ApiGateway::Resource', 8); + template.resourceCountIs('AWS::ApiGateway::Resource', 15); template.hasResourceProperties('AWS::ApiGateway::Resource', { ParentId: { @@ -747,7 +754,7 @@ describe('When creating rest endpoints', () => { it('should create model-info path based resources', () => { const restApiCapture = new Capture(); - template.resourceCountIs('AWS::ApiGateway::Resource', 8); + template.resourceCountIs('AWS::ApiGateway::Resource', 15); template.hasResourceProperties('AWS::ApiGateway::Resource', { ParentId: { @@ -831,7 +838,7 @@ describe('When creating rest endpoints', () => { const authorizerCapture = new Capture(); const validatorCapture = new Capture(); - template.resourceCountIs('AWS::ApiGateway::Method', 15); + template.resourceCountIs('AWS::ApiGateway::Method', 38); template.hasResourceProperties('AWS::ApiGateway::Method', { AuthorizationType: 'CUSTOM', @@ -975,3 +982,323 @@ describe('When creating rest endpoints', () => { }); }); }); + +describe('When creating rest endpoints with MCP lambda', () => { + let template: Template; + + beforeAll(() => { + const stack = new cdk.Stack(); + const mockLambdaFuncProps = { + code: lambda.Code.fromAsset('../infrastructure/test/mock-lambda-func/node-lambda'), + runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, + handler: 'index.handler' + }; + + const testAuthorizer = new api.RequestAuthorizer(stack, 'CustomRequestAuthorizers', { + handler: new lambda.Function(stack, 'MockAuthorizerFunction', mockLambdaFuncProps), + identitySources: [api.IdentitySource.header('Authorization')], + resultsCacheTtl: cdk.Duration.seconds(0) + }); + + new DeploymentPlatformRestEndpoint(stack, 'TestEndpointCreation', { + useCaseManagementAPILambda: new lambda.Function(stack, 'MockGetRequestFunction', mockLambdaFuncProps), + modelInfoApiLambda: new lambda.Function(stack, 'MockModelInfoFunction', mockLambdaFuncProps), + mcpManagementAPILambda: new lambda.Function(stack, 'MockMCPFunction', mockLambdaFuncProps), + agentManagementAPILambda: new lambda.Function(stack, 'MockAgentFunction2', mockLambdaFuncProps), + workflowManagementAPILambda: new lambda.Function(stack, 'MockWorkflowFunction2', mockLambdaFuncProps), + deploymentPlatformAuthorizer: testAuthorizer + }); + + template = Template.fromStack(stack); + }); + + it('should create MCP API resources when MCP lambda is provided', () => { + const restApiCapture = new Capture(); + + // Should have additional resources for MCP endpoints + template.resourceCountIs('AWS::ApiGateway::Resource', 15); // 8 base + 3 MCP + 2 agents + 2 workflows + + // Check MCP collection resource + template.hasResourceProperties('AWS::ApiGateway::Resource', { + ParentId: { + Ref: Match.stringLikeRegexp('TestEndpointCreationDeploymentRestEndPointLambdaRestApideployments*') + }, + PathPart: 'mcp', + RestApiId: { + Ref: restApiCapture + } + }); + + // Check MCP item resource + template.hasResourceProperties('AWS::ApiGateway::Resource', { + ParentId: { + Ref: Match.stringLikeRegexp('.*mcp.*') + }, + PathPart: '{useCaseId}', + RestApiId: { + Ref: restApiCapture.asString() + } + }); + + // Check MCP custom endpoints + template.hasResourceProperties('AWS::ApiGateway::Resource', { + ParentId: { + Ref: Match.stringLikeRegexp('.*mcp.*') + }, + PathPart: 'upload-schemas', + RestApiId: { + Ref: restApiCapture.asString() + } + }); + }); + + it('should create MCP CRUD methods with correct operation names', () => { + // Check MCP collection methods + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'GET', + OperationName: 'GetMCPs' + }); + + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'POST', + OperationName: 'DeployMCP' + }); + + // Check MCP item methods + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'GET', + OperationName: 'GetMCP' + }); + + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'PATCH', + OperationName: 'UpdateMCP' + }); + + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'DELETE', + OperationName: 'DeleteMCP' + }); + + // Check MCP custom endpoints + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'POST', + OperationName: 'UploadMCPSchemas' + }); + }); + + it('should create MCP upload schemas endpoint with request and response models', () => { + // Verify request model exists + template.hasResourceProperties('AWS::ApiGateway::Model', { + RestApiId: { + Ref: Match.anyValue() + }, + Name: 'UploadMCPSchemasApiRequestModel', + Description: 'Defines the required JSON structure for uploading MCP schemas' + }); + + // Verify response model exists + template.hasResourceProperties('AWS::ApiGateway::Model', { + RestApiId: { + Ref: Match.anyValue() + }, + Name: 'UploadMCPSchemasResponseModel', + Description: 'Defines the response structure for MCP schema upload requests' + }); + + // Verify method uses the request model + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'POST', + OperationName: 'UploadMCPSchemas', + RequestModels: { + 'application/json': { + Ref: Match.stringLikeRegexp('.*UploadMCPSchemasApiRequestModel.*') + } + } + }); + + // Verify method uses the response model + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'POST', + OperationName: 'UploadMCPSchemas', + MethodResponses: [ + { + ResponseModels: { + 'application/json': { + Ref: Match.stringLikeRegexp('.*UploadMCPSchemasResponseModel.*') + } + }, + StatusCode: '200' + } + ] + }); + }); +}); + +describe('When creating rest endpoints with Agent lambda', () => { + let template: Template; + + beforeAll(() => { + const stack = new cdk.Stack(); + const mockLambdaFuncProps = { + code: lambda.Code.fromAsset('../infrastructure/test/mock-lambda-func/node-lambda'), + runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, + handler: 'index.handler' + }; + + const testAuthorizer = new api.RequestAuthorizer(stack, 'CustomRequestAuthorizers', { + handler: new lambda.Function(stack, 'MockAuthorizerFunction', mockLambdaFuncProps), + identitySources: [api.IdentitySource.header('Authorization')], + resultsCacheTtl: cdk.Duration.seconds(0) + }); + + new DeploymentPlatformRestEndpoint(stack, 'TestEndpointCreation', { + useCaseManagementAPILambda: new lambda.Function(stack, 'MockGetRequestFunction', mockLambdaFuncProps), + modelInfoApiLambda: new lambda.Function(stack, 'MockModelInfoFunction', mockLambdaFuncProps), + agentManagementAPILambda: new lambda.Function(stack, 'MockAgentFunction', mockLambdaFuncProps), + mcpManagementAPILambda: new lambda.Function(stack, 'MockMCPFunction', mockLambdaFuncProps), + workflowManagementAPILambda: new lambda.Function(stack, 'MockWorkflowFunction3', mockLambdaFuncProps), + deploymentPlatformAuthorizer: testAuthorizer + }); + + template = Template.fromStack(stack); + }); + + it('should create Agent API resources when Agent lambda is provided', () => { + // Should have additional resources for Agent endpoints + // Base has 8 resources, agents add 2 more (agents, {agent-id}), MCP adds 3, workflows adds 2 + template.resourceCountIs('AWS::ApiGateway::Resource', 15); + + // Check Agent collection resource exists + template.hasResourceProperties('AWS::ApiGateway::Resource', { + PathPart: 'agents' + }); + + // Check Agent item resource exists + template.hasResourceProperties('AWS::ApiGateway::Resource', { + PathPart: '{useCaseId}' + }); + }); + + it('should create Agent CRUD methods with correct operation names', () => { + // Check Agent collection methods + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'GET', + OperationName: 'GetAgents' + }); + + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'POST', + OperationName: 'DeployAgent' + }); + + // Check Agent item methods + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'GET', + OperationName: 'GetAgent' + }); + + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'PATCH', + OperationName: 'UpdateAgent' + }); + + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'DELETE', + OperationName: 'DeleteAgent' + }); + }); +}); + +describe('When creating rest endpoints with Workflow lambda', () => { + let template: Template; + + beforeAll(() => { + const stack = new cdk.Stack(); + const mockLambdaFuncProps = { + code: lambda.Code.fromAsset('../infrastructure/test/mock-lambda-func/node-lambda'), + runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, + handler: 'index.handler' + }; + + const testAuthorizer = new api.RequestAuthorizer(stack, 'CustomRequestAuthorizers', { + handler: new lambda.Function(stack, 'MockAuthorizerFunction', mockLambdaFuncProps), + identitySources: [api.IdentitySource.header('Authorization')], + resultsCacheTtl: cdk.Duration.seconds(0) + }); + + new DeploymentPlatformRestEndpoint(stack, 'TestEndpointCreation', { + useCaseManagementAPILambda: new lambda.Function(stack, 'MockGetRequestFunction', mockLambdaFuncProps), + modelInfoApiLambda: new lambda.Function(stack, 'MockModelInfoFunction', mockLambdaFuncProps), + mcpManagementAPILambda: new lambda.Function(stack, 'MockMCPFunction', mockLambdaFuncProps), + agentManagementAPILambda: new lambda.Function(stack, 'MockAgentFunction', mockLambdaFuncProps), + workflowManagementAPILambda: new lambda.Function(stack, 'MockWorkflowFunction', mockLambdaFuncProps), + deploymentPlatformAuthorizer: testAuthorizer + }); + + template = Template.fromStack(stack); + }); + + it('should create Workflow API resources when Workflow lambda is provided', () => { + const restApiCapture = new Capture(); + + template.hasResourceProperties('AWS::ApiGateway::RestApi', { + Name: { + 'Fn::Join': [ + '', + [ + { + Ref: 'AWS::StackName' + }, + '-UseCaseManagementAPI' + ] + ] + } + }); + + // Check workflows collection resource + template.hasResourceProperties('AWS::ApiGateway::Resource', { + PathPart: 'workflows', + RestApiId: { + Ref: restApiCapture + } + }); + + // Check workflow item resource + template.hasResourceProperties('AWS::ApiGateway::Resource', { + PathPart: '{useCaseId}', + RestApiId: { + Ref: restApiCapture.asString() + } + }); + + // Check that CRUD methods are created for workflows + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'POST', + ResourceId: { + Ref: Match.anyValue() + } + }); + + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'GET', + ResourceId: { + Ref: Match.anyValue() + } + }); + + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'PATCH', + ResourceId: { + Ref: Match.anyValue() + } + }); + + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'DELETE', + ResourceId: { + Ref: Match.anyValue() + } + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/deploy-usecase-body.test.ts b/source/infrastructure/test/api/model-schema/deploy-usecase-body.test.ts deleted file mode 100644 index 6053676c..00000000 --- a/source/infrastructure/test/api/model-schema/deploy-usecase-body.test.ts +++ /dev/null @@ -1,1737 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -import { deployUseCaseBodySchema } from '../../../lib/api/model-schema/deploy-usecase-body'; -import { checkValidationSucceeded, checkValidationFailed } from './utils'; -import { Validator } from 'jsonschema'; -import { - AGENT_TYPES, - AUTHENTICATION_PROVIDERS, - BEDROCK_INFERENCE_TYPES, - CHAT_PROVIDERS, - CONVERSATION_MEMORY_TYPES, - DEFAULT_KENDRA_EDITION, - KNOWLEDGE_BASE_TYPES, - MAX_KENDRA_NUMBER_OF_DOCS, - MAX_SCORE_THRESHOLD, - MIN_KENDRA_NUMBER_OF_DOCS, - MIN_SCORE_THRESHOLD, - USE_CASE_TYPES -} from '../../../lib/utils/constants'; - -describe('Testing API schema validation', () => { - let schema: any; - let validator: Validator; - const testKendraIndexId = '11111111-1111-1111-1111-111111111111'; - - beforeAll(() => { - schema = deployUseCaseBodySchema; - validator = new Validator(); - }); - - describe('LlmParamsValidations', () => { - describe('Bedrock deployments', () => { - it('Test Bedrock deployment', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment with a provisioned model', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelArn: 'arn:aws:bedrock:us-east-1:111111111111:custom-model/test.1/111111111111', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.PROVISIONED - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment with a guardrail', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - GuardrailIdentifier: 'fakeid', - GuardrailVersion: 'DRAFT', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment with an InferenceProfileId', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - InferenceProfileId: 'fakeprofile', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILE - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment failed, missing ModelId for QUICK_START', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment failed, missing ModelId for OTHER_FOUNDATION', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.OTHER_FOUNDATION - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment failed, missing InferenceProfileId for INFERENCE_PROFILE', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILE - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment failed, missing ModelArn for PROVISIONED', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.PROVISIONED - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment failed, missing BedrockInferenceType', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment failed, missing params', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment failed, bad arn', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - ModelArn: 'garbage' - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment failed, no guardrail version', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - GuardrailIdentifier: 'fakeid' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment failed, no guardrail id', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - GuardrailVersion: 'DRAFT' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment failed, provided ModelId and InferenceProfileId', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - InferenceProfileId: 'fakeprofile' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment failed, bad InferenceProfileId', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - InferenceProfileId: '_garbage' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment failed, bad guardrail version', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - GuardrailIdentifier: 'fakeid', - GuardrailVersion: 'garbage' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment failed, bad guardrail id', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - GuardrailIdentifier: '_garbage', - GuardrailVersion: 'DRAFT' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment, FeedbackEnabled passes', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - FeedbackParams: { - FeedbackEnabled: true - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment, FeedbackParams additional fields fail', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - FeedbackParams: { - FeedbackEnabled: true, - FeedbackParameters: { 'key': 'value' } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Bedrock deployment, RestApi Id resources pass', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - ExistingRestApiId: 'test-id', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - }); - - describe('SageMaker deployments', () => { - it('Test SageMaker deployment', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - SageMakerLlmParams: { - EndpointName: 'fake-endpoint', - ModelInputPayloadSchema: {}, - ModelOutputJSONPath: '$[0].generated_text' - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Test SageMaker deployment failed, missing EndpointName', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - SageMakerLlmParams: { - ModelInputPayloadSchema: {}, - ModelOutputJSONPath: '$[0].generated_text' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test SageMaker deployment failed, invalid EndpointName', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - SageMakerLlmParams: { - EndpointName: '$%', - ModelInputPayloadSchema: {}, - ModelOutputJSONPath: '$[0].generated_text' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test SageMaker deployment failed, missing ModelInputPayloadSchema', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - SageMakerLlmParams: { - EndpointName: 'fake-endpoint', - ModelOutputJSONPath: '$[0].generated_text' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test SageMaker deployment failed, invalid ModelInputPayloadSchema', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - SageMakerLlmParams: { - EndpointName: 'fake-endpoint', - ModelInputPayloadSchema: 'garbage', - ModelOutputJSONPath: '$[0].generated_text' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test SageMaker deployment failed, invalid ModelOutputJSONPath', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - SageMakerLlmParams: { - EndpointName: 'fake-endpoint', - ModelInputPayloadSchema: 'garbage', - ModelOutputJSONPath: '{}' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test SageMaker deployment failed, missing ModelOutputJSONPath', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - SageMakerLlmParams: { - EndpointName: 'fake-endpoint', - ModelInputPayloadSchema: {} - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - - describe('Advanced model params', () => { - it('Succeeds with advanced model params of all compatible types', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - }, - ModelParams: { - Param1: { Value: 'hello', Type: 'string' }, - Param2: { Value: '1', Type: 'integer' }, - Param3: { Value: '1.0', Type: 'float' }, - Param4: { Value: 'true', Type: 'boolean' }, - Param5: { Value: JSON.stringify(['hello', 'world']), Type: 'list' }, - Param6: { Value: JSON.stringify({ 'hello': 'world' }), Type: 'dictionary' } - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Fails with advanced model params of incompatible types', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - ModelParams: { - Param1: { Value: 'hello', Type: 'othertype' } - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Fails with advanced model params with non-string value', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - ModelParams: { - Param1: { Value: 1.0, Type: 'float' } - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - }); - - describe('KnowledgeBaseParams validations', () => { - describe('Kendra validations', () => { - it('New Kendra index succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, - KendraKnowledgeBaseParams: { - KendraIndexName: 'test' - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('New Kendra index fails for no name', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, - KendraKnowledgeBaseParams: { - QueryCapacityUnits: 2, - StorageCapacityUnits: 1, - KendraIndexEdition: DEFAULT_KENDRA_EDITION - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('New Kendra index succeeds with additional params', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, - KendraKnowledgeBaseParams: { - KendraIndexName: 'test', - QueryCapacityUnits: 2, - StorageCapacityUnits: 1, - KendraIndexEdition: DEFAULT_KENDRA_EDITION - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Existing Kendra index succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, - KendraKnowledgeBaseParams: { - ExistingKendraIndexId: testKendraIndexId - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Existing Kendra index fails when providing extra params', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, - KendraKnowledgeBaseParams: { - ExistingKendraIndexId: testKendraIndexId, - StorageCapacityUnits: 1 - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Kendra index fails when providing bad index id', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, - KendraKnowledgeBaseParams: { - ExistingKendraIndexId: 'garbage' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Kendra index fails when providing both new and existing params', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, - KendraKnowledgeBaseParams: { - KendraIndexName: 'test', - ExistingKendraIndexId: testKendraIndexId - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Kendra index succeeds when RBAC enabled flag is provided', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, - KendraKnowledgeBaseParams: { - KendraIndexName: 'test', - RoleBasedAccessControlEnabled: true - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('fails when NoDocsFoundResponse is empty', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, - NoDocsFoundResponse: '', - KendraKnowledgeBaseParams: { - KendraIndexName: 'test' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('passes when NoDocsFoundResponse has valid string', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, - KendraKnowledgeBaseParams: { - KendraIndexName: 'test' - }, - NoDocsFoundResponse: 'test message' - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - }); - - describe('Bedrock knowledge base validations', () => { - it('Bedrock succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, - BedrockKnowledgeBaseParams: { - BedrockKnowledgeBaseId: 'testid' - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Bedrock with optional params', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, - BedrockKnowledgeBaseParams: { - BedrockKnowledgeBaseId: 'testid', - RetrievalFilter: {}, - OverrideSearchType: 'SEMANTIC' - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Bedrock fails for missing id', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, - BedrockKnowledgeBaseParams: {} - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Bedrock fails for bad id', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, - BedrockKnowledgeBaseParams: { - BedrockKnowledgeBaseId: '?!' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Bedrock fails for a bad retrieval filter type', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, - BedrockKnowledgeBaseParams: { - BedrockKnowledgeBaseId: 'testid', - RetrievalFilter: 'garbage' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Bedrock fails for a bad OverrideSearchType', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, - BedrockKnowledgeBaseParams: { - BedrockKnowledgeBaseId: 'testid', - OverrideSearchType: 'garbage' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('fails when NoDocsFoundResponse is empty', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, - NoDocsFoundResponse: '', - BedrockKnowledgeBaseParams: { - BedrockKnowledgeBaseId: 'testid' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('passes when NoDocsFoundResponse has valid string', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, - BedrockKnowledgeBaseParams: { - BedrockKnowledgeBaseId: 'testid' - }, - NoDocsFoundResponse: 'test message' - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - }); - - describe('General knowledge base validations', () => { - it('setting misc parameters succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, - BedrockKnowledgeBaseParams: { - BedrockKnowledgeBaseId: 'testid' - }, - NumberOfDocs: 3, - ScoreThreshold: 0.5, - ReturnSourceDocs: true - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('setting NumberOfDocs below range fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, - BedrockKnowledgeBaseParams: { - BedrockKnowledgeBaseId: 'testid' - }, - NumberOfDocs: MIN_KENDRA_NUMBER_OF_DOCS - 1 - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('setting NumberOfDocs above range fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, - BedrockKnowledgeBaseParams: { - BedrockKnowledgeBaseId: 'testid' - }, - NumberOfDocs: MAX_KENDRA_NUMBER_OF_DOCS + 1 - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('setting ScoreThreshold below range fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, - BedrockKnowledgeBaseParams: { - BedrockKnowledgeBaseId: 'testid' - }, - ScoreThreshold: MIN_SCORE_THRESHOLD - 1 - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('setting ScoreThreshold above range fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, - BedrockKnowledgeBaseParams: { - BedrockKnowledgeBaseId: 'testid' - }, - ScoreThreshold: MAX_SCORE_THRESHOLD + 1 - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - - it('Can not provide KnowledgeBaseParams if not using RAG', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: false - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, - KendraKnowledgeBaseParams: { - ExistingKendraIndexId: testKendraIndexId - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Can not provide BedrockKnowledgeBaseParams if not using Kendra', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, - KendraKnowledgeBaseParams: { - ExistingKendraIndexId: testKendraIndexId - }, - BedrockKnowledgeBaseParams: { - BedrockKnowledgeBaseId: 'testid' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Can not provide KendraKnowledgeBaseParams if not using Bedrock', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' }, - RAGEnabled: true - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, - KendraKnowledgeBaseParams: { - ExistingKendraIndexId: testKendraIndexId - }, - BedrockKnowledgeBaseParams: { - BedrockKnowledgeBaseId: 'testid' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Can not validate a bad KnowledgeBaseType', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - }, - RAGEnabled: false - }, - KnowledgeBaseParams: { - KnowledgeBaseType: 'garbage' - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - - describe('VpcParams validations', () => { - const testVpcId = 'vpc-11111111'; - const testSubnetId = 'subnet-11111111'; - const testSgId = 'sg-11111111'; - - it('No VPC succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - }, - }, - VpcParams: { - VpcEnabled: false - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('No VPC fails due to a mismatch of params', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' } - }, - VpcParams: { - VpcEnabled: false, - CreateNewVpc: true - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Create a VPC succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - } - }, - VpcParams: { - VpcEnabled: true, - CreateNewVpc: true - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Create a VPC fails due to extra params', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - }, - }, - VpcParams: { - VpcEnabled: true, - CreateNewVpc: true, - ExistingVpcId: testVpcId - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Using an existing VPC succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - } - }, - VpcParams: { - VpcEnabled: true, - CreateNewVpc: false, - ExistingVpcId: testVpcId, - ExistingPrivateSubnetIds: [testSubnetId], - ExistingSecurityGroupIds: [testSgId] - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Using an existing VPC fails due to missing VPC ID', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' } - }, - VpcParams: { - VpcEnabled: true, - CreateNewVpc: false, - ExistingPrivateSubnetIds: [testSubnetId], - ExistingSecurityGroupIds: [testSgId] - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Using an existing VPC fails due to bad VPC ID', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' } - }, - VpcParams: { - VpcEnabled: true, - CreateNewVpc: false, - ExistingVpcId: 'garbage', - ExistingPrivateSubnetIds: [testSubnetId], - ExistingSecurityGroupIds: [testSgId] - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Using an existing VPC fails due to missing subnet IDs', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' } - }, - VpcParams: { - VpcEnabled: true, - CreateNewVpc: false, - ExistingVpcId: testVpcId, - ExistingSecurityGroupIds: [testSgId] - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Using an existing VPC fails due to bad subnet IDs', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' } - }, - VpcParams: { - VpcEnabled: true, - CreateNewVpc: false, - ExistingVpcId: testVpcId, - ExistingPrivateSubnetIds: ['garbage'], - ExistingSecurityGroupIds: [testSgId] - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Using an existing VPC fails due to missing security group IDs', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - }, - }, - VpcParams: { - VpcEnabled: true, - CreateNewVpc: false, - ExistingVpcId: testVpcId, - ExistingPrivateSubnetIds: [testSubnetId] - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Using an existing VPC fails due to bad security group IDs', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' } - }, - VpcParams: { - VpcEnabled: true, - CreateNewVpc: false, - ExistingVpcId: testVpcId, - ExistingPrivateSubnetIds: [testSubnetId], - ExistingSecurityGroupIds: ['garbage'] - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - - describe('Email Validations', () => { - it('Email is valid succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - }, - }, - DefaultUserEmail: 'testuser@example.com' - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Email is invalid fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' } - }, - DefaultUserEmail: 'garbage' - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - - describe('ConversationMemoryParamsValidation', () => { - it('ConversationMemoryParams is valid succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - } - }, - ConversationMemoryParams: { - ConversationMemoryType: CONVERSATION_MEMORY_TYPES.DYNAMODB, - HumanPrefix: 'human', - AiPrefix: 'ai', - ChatHistoryLength: 5 - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('ConversationMemoryParams bad memory type fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' } - }, - ConversationMemoryParams: { - ConversationMemoryType: 'garbage' - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('ConversationMemoryParams bad param fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - UseCaseName: 'test', - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' } - }, - ConversationMemoryParams: { - ConversationMemoryType: CONVERSATION_MEMORY_TYPES.DYNAMODB, - HumanPrefix: 'human', - AiPrefix: 'ai', - ChatHistoryLength: -1 - }, - AuthenticationParams: { - AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, - CognitoParams: { - ExistingUserPoolId: 'us-east-1_111111111111' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - - describe('AgentParams and Agent use case type validations', () => { - it('Test valid Agent use case type with AgentParams', () => { - const payload = { - UseCaseName: 'test-agent', - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - AgentType: AGENT_TYPES.BEDROCK, - BedrockAgentParams: { - AgentId: 'abc123', - AgentAliasId: 'def456', - EnableTrace: true - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Test Agent use case type without AgentParams (should fail)', () => { - const payload = { - UseCaseName: 'test-agent', - UseCaseType: USE_CASE_TYPES.AGENT - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Agent use case type with invalid AgentId', () => { - const payload = { - UseCaseName: 'test-agent', - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - AgentType: AGENT_TYPES.BEDROCK, - BedrockAgentParams: { - AgentId: 'invalid@id', - AgentAliasId: 'def456', - EnableTrace: true - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Agent use case type with invalid AgentAliasId', () => { - const payload = { - UseCaseName: 'test-agent', - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - AgentType: AGENT_TYPES.BEDROCK, - BedrockAgentParams: { - AgentId: 'abc123', - AgentAliasId: 'toolongaliasid', - EnableTrace: true - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Agent use case type with missing EnableTrace', () => { - const payload = { - UseCaseName: 'test-agent', - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - AgentType: AGENT_TYPES.BEDROCK, - BedrockAgentParams: { - AgentId: 'abc123', - AgentAliasId: 'def456' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Agent use case type with additional properties (should fail)', () => { - const payload = { - UseCaseName: 'test-agent', - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - AgentType: AGENT_TYPES.BEDROCK, - BedrockAgentParams: { - AgentId: 'abc123', - AgentAliasId: 'def456', - EnableTrace: true, - ExtraProperty: 'should not be allowed' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Agent use case type with LlmParams (should fail)', () => { - const payload = { - UseCaseName: 'test-agent', - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - AgentType: AGENT_TYPES.BEDROCK, - BedrockAgentParams: { - AgentId: 'abc123', - AgentAliasId: 'def456', - EnableTrace: true - } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Agent use case type with KnowledgeBaseParams (should fail)', () => { - const payload = { - UseCaseName: 'test-agent', - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - AgentType: AGENT_TYPES.BEDROCK, - BedrockAgentParams: { - AgentId: 'abc123', - AgentAliasId: 'def456', - EnableTrace: true - } - }, - KnowledgeBaseParams: { - KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, - KendraIndexId: testKendraIndexId - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - - it('Test agent creation failing if AgentType is not provided', () => { - const payload = { - UseCaseName: 'test-agent', - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - BedrockAgentParams: { - AgentId: 'XXXXXX', - AgentAliasId: 'XXXXXX', - EnableTrace: true - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test agent creation failing if AgentType is invalid', () => { - const payload = { - UseCaseName: 'test-agent', - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - AgentType: 'invalid', - BedrockAgentParams: { - AgentId: 'XXXXXX', - AgentAliasId: 'XXXXXX', - EnableTrace: true - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Agent deployment, FeedbackEnabled passes', () => { - const payload = { - UseCaseName: 'test-agent', - UseCaseType: USE_CASE_TYPES.AGENT, - FeedbackParams: { - FeedbackEnabled: true - }, - AgentParams: { - AgentType: AGENT_TYPES.BEDROCK, - BedrockAgentParams: { - AgentId: 'XXXXXX', - AgentAliasId: 'XXXXXX', - EnableTrace: true - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Test Agent deployment, FeedbackParams additional fields fail', () => { - const payload = { - UseCaseName: 'test-agent', - UseCaseType: USE_CASE_TYPES.AGENT, - FeedbackParams: { - FeedbackEnabled: true, - FeedbackParameters: { 'key': 'value' } - }, - AgentParams: { - AgentType: AGENT_TYPES.BEDROCK, - BedrockAgentParams: { - AgentId: 'XXXXXX', - AgentAliasId: 'XXXXXX', - EnableTrace: true - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Test Agent deployment, RestApi Id resources pass', () => { - const payload = { - UseCaseName: 'test-agent', - UseCaseType: USE_CASE_TYPES.AGENT, - ExistingRestApiId: 'test-id', - AgentParams: { - AgentType: AGENT_TYPES.BEDROCK, - BedrockAgentParams: { - AgentId: 'XXXXXX', - AgentAliasId: 'XXXXXX', - EnableTrace: true - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - describe('AuthenticationParams Validation', () => { - describe('User Pool Id provided', () => { - it('Valid User Pool Id provided', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - } - }, - AuthenticationParams: { - AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, - CognitoParams: { - ExistingUserPoolId: 'us-east-1_111111111111' - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Valid Pool Client Id provided', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - } - }, - AuthenticationParams: { - AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, - CognitoParams: { - ExistingUserPoolId: 'us-east-1_111111111111', - ExistingUserPoolClientId: '1111111111111111111111111111' - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - }); - - describe('Invalid Input provided', () => { - it('Empty Authentication Params', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel' - } - }, - AuthenticationParams: {} - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Unsupported Authentication Provider', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel' - } - }, - AuthenticationParams: { - AuthenticationProvider: 'unsupported' - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Invalid User Pool Id provided', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel' - } - }, - AuthenticationParams: { - AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, - CognitoParams: { - ExistingUserPoolId: 'invalid user pool' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('No CognitoParams provided', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel' - } - }, - AuthenticationParams: { - AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('No User Pool provided', () => { - const payload = { - UseCaseName: 'test', - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel' - } - }, - AuthenticationParams: { - AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, - CognitoParams: {} - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - }); -}); diff --git a/source/infrastructure/test/api/model-schema/deployments/agents/deploy-agent-usecase-body.test.ts b/source/infrastructure/test/api/model-schema/deployments/agents/deploy-agent-usecase-body.test.ts new file mode 100644 index 00000000..8d9f036c --- /dev/null +++ b/source/infrastructure/test/api/model-schema/deployments/agents/deploy-agent-usecase-body.test.ts @@ -0,0 +1,312 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Validator } from 'jsonschema'; +import { deployAgentUseCaseBodySchema } from '../../../../../lib/api/model-schema/deployments/agents/deploy-agent-usecase-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../../shared/utils'; + +describe('Testing Deploy Agent Use Case Body schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = deployAgentUseCaseBodySchema; + validator = new Validator(); + }); + + describe('Valid Agent Use Case Deployments', () => { + it('should validate minimal agent deployment', () => { + const payload = { + UseCaseName: 'Test Agent', + UseCaseType: 'AgentBuilder', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentParams: { + SystemPrompt: 'You are a helpful assistant.' + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent deployment with MCP servers', () => { + const payload = { + UseCaseName: 'Test Agent with MCP', + UseCaseDescription: 'Agent with MCP server integration', + UseCaseType: 'AgentBuilder', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentParams: { + SystemPrompt: 'You are a helpful assistant with access to tools.', + MCPServers: [ + { + UseCaseId: 'server1-id', + UseCaseName: 'Server-1', + Url: 'https://gateway-123.bedrock-agentcore.us-east-1.amazonaws.com/mcp', + Type: 'gateway' + }, + { + UseCaseId: 'server2', + UseCaseName: 'Server-2', + Url: 'https://runtime-123.bedrock-agentcore.us-east-1.amazonaws.com/mcp', + Type: 'runtime' + } + ] + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent deployment with memory enabled', () => { + const payload = { + UseCaseName: 'Test Agent with Memory', + UseCaseType: 'AgentBuilder', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentParams: { + SystemPrompt: 'You are a helpful assistant with memory.', + MemoryConfig: { + LongTermEnabled: true + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate complete agent deployment', () => { + const payload = { + UseCaseName: 'Complete Test Agent', + UseCaseDescription: 'A complete agent deployment with all features', + UseCaseType: 'AgentBuilder', + DeployUI: true, + FeedbackParams: { + FeedbackEnabled: true + }, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false, + MultimodalParams: { + MultimodalEnabled: true + } + }, + AgentParams: { + SystemPrompt: 'You are a comprehensive assistant with all capabilities.', + MCPServers: [ + { + UseCaseId: 'server1-id', + UseCaseName: 'Server-1', + Url: 'https://gateway-123.bedrock-agentcore.us-east-1.amazonaws.com/mcp', + Type: 'gateway' + } + ], + MemoryConfig: { + LongTermEnabled: true + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent deployment with multimodal enabled', () => { + const payload = { + UseCaseName: 'Multimodal Agent', + UseCaseDescription: 'Agent with multimodal capabilities', + UseCaseType: 'AgentBuilder', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false, + MultimodalParams: { + MultimodalEnabled: true + } + }, + AgentParams: { + SystemPrompt: 'You are a helpful assistant that can process images and documents.' + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Invalid Agent Use Case Deployments', () => { + it('should fail validation when UseCaseName is missing', () => { + const payload = { + UseCaseType: 'AgentBuilder', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentParams: { + SystemPrompt: 'You are a helpful assistant.' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when UseCaseType is not Agent', () => { + const payload = { + UseCaseName: 'Test Agent', + UseCaseType: 'Text', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentParams: { + SystemPrompt: 'You are a helpful assistant.' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when LlmParams is missing', () => { + const payload = { + UseCaseName: 'Test Agent', + UseCaseType: 'AgentBuilder', + AgentParams: { + SystemPrompt: 'You are a helpful assistant.' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when AgentParams is missing', () => { + const payload = { + UseCaseName: 'Test Agent', + UseCaseType: 'AgentBuilder', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when SystemPrompt is missing from AgentParams', () => { + const payload = { + UseCaseName: 'Test Agent', + UseCaseType: 'AgentBuilder', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentParams: { + MCPServers: [{ McpId: 'mcp-use-case-123' }] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties in root', () => { + const payload = { + UseCaseName: 'Test Agent', + UseCaseType: 'AgentBuilder', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentParams: { + SystemPrompt: 'You are a helpful assistant.' + }, + extraProperty: 'not-allowed' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties in AgentParams', () => { + const payload = { + UseCaseName: 'Test Agent', + UseCaseType: 'AgentBuilder', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentParams: { + SystemPrompt: 'You are a helpful assistant.', + extraProperty: 'not-allowed' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/deployments/agents/params/agent-core-params.test.ts b/source/infrastructure/test/api/model-schema/deployments/agents/params/agent-core-params.test.ts new file mode 100644 index 00000000..94f4a974 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/deployments/agents/params/agent-core-params.test.ts @@ -0,0 +1,314 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Validator } from 'jsonschema'; +import { agentCoreParams } from '../../../../../../lib/api/model-schema/deployments/agents/params/agent-core-params'; +import { checkValidationSucceeded, checkValidationFailed } from '../../../shared/utils'; +import { AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH } from '../../../../../../lib/utils/constants'; + +describe('Testing Agent Core Parameters schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = agentCoreParams; + validator = new Validator(); + }); + + describe('Valid Agent Core Configurations', () => { + it('should validate agent with system prompt only', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent with system prompt and MCP servers', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant with access to tools.', + MCPServers: [ + { + UseCaseId: 'mcp-use-case-123', + UseCaseName: 'Weather Service', + Url: 'https://api.weather.example.com', + Type: 'gateway' + }, + { + UseCaseId: 'mcp-use-case-456', + UseCaseName: 'Database Service', + Url: 'https://api.database.example.com', + Type: 'runtime' + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent with system prompt and tools', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant with built-in tools.', + Tools: [{ ToolId: 'http-request' }, { ToolId: 'file-operations' }] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent with system prompt, MCP servers, and tools', () => { + const payload = { + SystemPrompt: 'You are a comprehensive assistant.', + MCPServers: [ + { + UseCaseId: 'mcp-use-case-123', + UseCaseName: 'Weather Service', + Url: 'https://api.weather.example.com', + Type: 'gateway' + } + ], + Tools: [{ ToolId: 'http-request' }, { ToolId: 'json-parser' }] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent with many MCP servers', () => { + const mcpServers = Array.from({ length: 50 }, (_, i) => ({ + UseCaseId: `mcp-use-case-${i + 1}`, + UseCaseName: `Service ${i + 1}`, + Url: `https://api.service${i + 1}.example.com`, + Type: i % 2 === 0 ? 'gateway' : 'runtime' + })); + + const payload = { + SystemPrompt: 'You are a helpful assistant.', + MCPServers: mcpServers + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent with many tools', () => { + const tools = Array.from({ length: 100 }, (_, i) => ({ + ToolId: `tool-${i + 1}` + })); + + const payload = { + SystemPrompt: 'You are a helpful assistant.', + Tools: tools + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent with maximum length system prompt', () => { + const longPrompt = 'A'.repeat(AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH); + const payload = { + SystemPrompt: longPrompt + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Invalid Agent Core Configurations', () => { + it('should fail validation when SystemPrompt is missing', () => { + const payload = { + MCPServers: [ + { + UseCaseId: 'mcp-use-case-123', + UseCaseName: 'Weather Service', + Url: 'https://api.weather.example.com', + Type: 'gateway' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when SystemPrompt is empty', () => { + const payload = { + SystemPrompt: '' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when SystemPrompt exceeds maximum length', () => { + const tooLongPrompt = 'A'.repeat(AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH + 1); + const payload = { + SystemPrompt: tooLongPrompt + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when MCP server missing required fields', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.', + MCPServers: [{}] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when MCP server missing UseCaseId', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.', + MCPServers: [ + { + UseCaseName: 'Weather Service', + Url: 'https://api.weather.example.com', + Type: 'gateway' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when MCP server missing UseCaseName', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.', + MCPServers: [ + { + UseCaseId: 'mcp-use-case-123', + Url: 'https://api.weather.example.com', + Type: 'gateway' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when MCP server missing Url', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.', + MCPServers: [ + { + UseCaseId: 'mcp-use-case-123', + UseCaseName: 'Weather Service', + Type: 'gateway' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when MCP server missing Type', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.', + MCPServers: [ + { + UseCaseId: 'mcp-use-case-123', + UseCaseName: 'Weather Service', + Url: 'https://api.weather.example.com' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when MCP server has empty UseCaseId', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.', + MCPServers: [ + { + UseCaseId: '', + UseCaseName: 'Weather Service', + Url: 'https://api.weather.example.com', + Type: 'gateway' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when MCP server has empty UseCaseName', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.', + MCPServers: [ + { + UseCaseId: 'mcp-use-case-123', + UseCaseName: '', + Url: 'https://api.weather.example.com', + Type: 'gateway' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when MCP server has empty Url', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.', + MCPServers: [ + { + UseCaseId: 'mcp-use-case-123', + UseCaseName: 'Weather Service', + Url: '', + Type: 'gateway' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when MCP server has invalid Type', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.', + MCPServers: [ + { + UseCaseId: 'mcp-use-case-123', + UseCaseName: 'Weather Service', + Url: 'https://api.weather.example.com', + Type: 'invalid-type' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when tool missing ToolId', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.', + Tools: [{}] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when tool has empty ToolId', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.', + Tools: [{ ToolId: '' }] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.', + extraProperty: 'not-allowed' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties in MCP server', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.', + MCPServers: [ + { + UseCaseId: 'mcp-use-case-123', + UseCaseName: 'Weather Service', + Url: 'https://api.weather.example.com', + Type: 'gateway', + extraProperty: 'not-allowed' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties in tool', () => { + const payload = { + SystemPrompt: 'You are a helpful assistant.', + Tools: [ + { + ToolId: 'http-request', + extraProperty: 'not-allowed' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/deployments/agents/params/agent-memory-params.test.ts b/source/infrastructure/test/api/model-schema/deployments/agents/params/agent-memory-params.test.ts new file mode 100644 index 00000000..0d1d9953 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/deployments/agents/params/agent-memory-params.test.ts @@ -0,0 +1,54 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Validator } from 'jsonschema'; +import { agentMemoryParams } from '../../../../../../lib/api/model-schema/shared/agent-memory-params'; +import { checkValidationSucceeded, checkValidationFailed } from '../../../shared/utils'; + +describe('Testing Agent Memory Parameters schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = agentMemoryParams; + validator = new Validator(); + }); + + describe('Valid Agent Memory Configurations', () => { + it('should validate empty memory config', () => { + const payload = {}; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate memory config with LongTermEnabled true', () => { + const payload = { + LongTermEnabled: true + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate memory config with LongTermEnabled false', () => { + const payload = { + LongTermEnabled: false + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Invalid Agent Memory Configurations', () => { + it('should fail validation with non-boolean LongTermEnabled', () => { + const payload = { + LongTermEnabled: 'true' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties', () => { + const payload = { + LongTermEnabled: true, + extraProperty: 'not-allowed' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/deployments/agents/update-agent-usecase-body.test.ts b/source/infrastructure/test/api/model-schema/deployments/agents/update-agent-usecase-body.test.ts new file mode 100644 index 00000000..75ae1fc2 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/deployments/agents/update-agent-usecase-body.test.ts @@ -0,0 +1,262 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Validator } from 'jsonschema'; +import { updateAgentUseCaseBodySchema } from '../../../../../lib/api/model-schema/deployments/agents/update-agent-usecase-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../../shared/utils'; +import { AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH } from '../../../../../lib/utils/constants'; + +describe('Testing Update Agent Use Case Body schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = updateAgentUseCaseBodySchema; + validator = new Validator(); + }); + + describe('Valid Agent Use Case Updates', () => { + it('should validate minimal agent update with description only', () => { + const payload = { + UseCaseDescription: 'Updated agent description' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent update with system prompt', () => { + const payload = { + AgentParams: { + SystemPrompt: 'You are an updated helpful assistant.' + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent update with MCP servers', () => { + const payload = { + AgentParams: { + MCPServers: [ + { + UseCaseId: 'mcp-use-case-789', + UseCaseName: 'MCP Server 1', + Url: 'https://example.com/mcp1', + Type: 'gateway' + }, + { + UseCaseId: 'mcp-use-case-101', + UseCaseName: 'MCP Server 2', + Url: 'https://example.com/mcp2', + Type: 'runtime' + } + ] + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent update with Tools', () => { + const payload = { + AgentParams: { + Tools: [{ ToolId: 'calculator' }, { ToolId: 'current_time' }] + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent update with memory config', () => { + const payload = { + AgentParams: { + MemoryConfig: { + LongTermEnabled: false + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent update with image config', () => { + const payload = { + AgentParams: { + MemoryConfig: { + LongTermEnabled: true + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate complete agent update', () => { + const payload = { + UseCaseDescription: 'Completely updated agent', + LlmParams: { + Temperature: 0.8, + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-haiku-20240307-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + AgentParams: { + SystemPrompt: 'You are a completely updated assistant.', + MCPServers: [ + { + UseCaseId: 'mcp-use-case-new', + UseCaseName: 'New MCP Server', + Url: 'https://example.com/mcp-new', + Type: 'gateway' + } + ], + Tools: [{ ToolId: 'calculator' }, { ToolId: 'environment' }], + MemoryConfig: { + LongTermEnabled: true + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate partial LLM params update', () => { + const payload = { + LlmParams: { + Temperature: 0.5 + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate empty payload (no updates)', () => { + const payload = {}; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agent update with valid UseCaseType', () => { + const payload = { + UseCaseType: 'AgentBuilder', + UseCaseDescription: 'Updated agent with explicit type' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Invalid Agent Use Case Updates', () => { + it('should fail validation with invalid UseCaseType', () => { + const payload = { + UseCaseType: 'Text' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with invalid UseCaseType (Agent instead of AgentBuilder)', () => { + const payload = { + UseCaseType: 'Agent' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with invalid system prompt (too long)', () => { + const tooLongPrompt = 'A'.repeat(AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH + 1); // Exceeds AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH + const payload = { + AgentParams: { + SystemPrompt: tooLongPrompt + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with invalid MCP server (missing required properties)', () => { + const payload = { + AgentParams: { + MCPServers: [{}] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with invalid MCP server (partial properties)', () => { + const payload = { + AgentParams: { + MCPServers: [ + { + UseCaseId: 'mcp-123' + // Missing UseCaseName, Url, and Type + } + ] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with invalid Tool (missing ToolId)', () => { + const payload = { + AgentParams: { + Tools: [{}] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties in root', () => { + const payload = { + UseCaseDescription: 'Updated description', + extraProperty: 'not-allowed' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties in AgentParams', () => { + const payload = { + AgentParams: { + SystemPrompt: 'Updated prompt', + extraProperty: 'not-allowed' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Partial Update Scenarios', () => { + it('should validate updating only system prompt', () => { + const payload = { + AgentParams: { + SystemPrompt: 'New system prompt only' + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate updating only MCP servers', () => { + const payload = { + AgentParams: { + MCPServers: [ + { + UseCaseId: 'new-mcp-server', + UseCaseName: 'New MCP Server', + Url: 'https://example.com/new-mcp', + Type: 'runtime' + } + ] + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate updating only memory config', () => { + const payload = { + AgentParams: { + MemoryConfig: { + LongTermEnabled: true + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate updating only Tools', () => { + const payload = { + AgentParams: { + Tools: [{ ToolId: 'calculator' }] + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/deployments/deploy-usecase-body.test.ts b/source/infrastructure/test/api/model-schema/deployments/deploy-usecase-body.test.ts new file mode 100644 index 00000000..cdf643f4 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/deployments/deploy-usecase-body.test.ts @@ -0,0 +1,431 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { deployUseCaseBodySchema } from '../../../../lib/api/model-schema/deployments/deploy-usecase-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../shared/utils'; +import { Validator } from 'jsonschema'; +import { + AGENT_TYPES, + AUTHENTICATION_PROVIDERS, + BEDROCK_INFERENCE_TYPES, + CHAT_PROVIDERS, + CONVERSATION_MEMORY_TYPES, + KNOWLEDGE_BASE_TYPES, + USE_CASE_TYPES +} from '../../../../lib/utils/constants'; + +describe('Testing API schema validation', () => { + let schema: any; + let validator: Validator; + const testKendraIndexId = '11111111-1111-1111-1111-111111111111'; + + beforeAll(() => { + schema = deployUseCaseBodySchema; + validator = new Validator(); + }); + + describe('Email Validations', () => { + it('Email is valid succeeds', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + }, + DefaultUserEmail: 'testuser@example.com' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Email is invalid fails', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' } + }, + DefaultUserEmail: 'garbage' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('ConversationMemoryParamsValidation', () => { + it('ConversationMemoryParams is valid succeeds', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + }, + ConversationMemoryParams: { + ConversationMemoryType: CONVERSATION_MEMORY_TYPES.DYNAMODB, + HumanPrefix: 'human', + AiPrefix: 'ai', + ChatHistoryLength: 5 + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('ConversationMemoryParams bad memory type fails', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' } + }, + ConversationMemoryParams: { + ConversationMemoryType: 'garbage' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('ConversationMemoryParams bad param fails', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' } + }, + ConversationMemoryParams: { + ConversationMemoryType: CONVERSATION_MEMORY_TYPES.DYNAMODB, + HumanPrefix: 'human', + AiPrefix: 'ai', + ChatHistoryLength: -1 + }, + AuthenticationParams: { + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, + CognitoParams: { + ExistingUserPoolId: 'us-east-1_111111111111' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('AgentParams and Agent use case type validations', () => { + it('Test valid Agent use case type with AgentParams', () => { + const payload = { + UseCaseName: 'test-agent', + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + AgentType: AGENT_TYPES.BEDROCK, + BedrockAgentParams: { + AgentId: 'abc123', + AgentAliasId: 'def456', + EnableTrace: true + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Agent use case type without AgentParams (should fail)', () => { + const payload = { + UseCaseName: 'test-agent', + UseCaseType: USE_CASE_TYPES.AGENT + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Agent use case type with invalid AgentId', () => { + const payload = { + UseCaseName: 'test-agent', + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + AgentType: AGENT_TYPES.BEDROCK, + BedrockAgentParams: { + AgentId: 'invalid@id', + AgentAliasId: 'def456', + EnableTrace: true + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Agent use case type with invalid AgentAliasId', () => { + const payload = { + UseCaseName: 'test-agent', + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + AgentType: AGENT_TYPES.BEDROCK, + BedrockAgentParams: { + AgentId: 'abc123', + AgentAliasId: 'toolongaliasid', + EnableTrace: true + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Agent use case type with missing EnableTrace', () => { + const payload = { + UseCaseName: 'test-agent', + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + AgentType: AGENT_TYPES.BEDROCK, + BedrockAgentParams: { + AgentId: 'abc123', + AgentAliasId: 'def456' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Agent use case type with additional properties (should fail)', () => { + const payload = { + UseCaseName: 'test-agent', + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + AgentType: AGENT_TYPES.BEDROCK, + BedrockAgentParams: { + AgentId: 'abc123', + AgentAliasId: 'def456', + EnableTrace: true, + ExtraProperty: 'should not be allowed' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Agent use case type with LlmParams (should fail)', () => { + const payload = { + UseCaseName: 'test-agent', + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + AgentType: AGENT_TYPES.BEDROCK, + BedrockAgentParams: { + AgentId: 'abc123', + AgentAliasId: 'def456', + EnableTrace: true + } + }, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Agent use case type with KnowledgeBaseParams (should fail)', () => { + const payload = { + UseCaseName: 'test-agent', + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + AgentType: AGENT_TYPES.BEDROCK, + BedrockAgentParams: { + AgentId: 'abc123', + AgentAliasId: 'def456', + EnableTrace: true + } + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, + KendraIndexId: testKendraIndexId + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + it('Test agent creation failing if AgentType is not provided', () => { + const payload = { + UseCaseName: 'test-agent', + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + BedrockAgentParams: { + AgentId: 'XXXXXX', + AgentAliasId: 'XXXXXX', + EnableTrace: true + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test agent creation failing if AgentType is invalid', () => { + const payload = { + UseCaseName: 'test-agent', + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + AgentType: 'invalid', + BedrockAgentParams: { + AgentId: 'XXXXXX', + AgentAliasId: 'XXXXXX', + EnableTrace: true + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Agent deployment, FeedbackEnabled passes', () => { + const payload = { + UseCaseName: 'test-agent', + UseCaseType: USE_CASE_TYPES.AGENT, + FeedbackParams: { + FeedbackEnabled: true + }, + AgentParams: { + AgentType: AGENT_TYPES.BEDROCK, + BedrockAgentParams: { + AgentId: 'XXXXXX', + AgentAliasId: 'XXXXXX', + EnableTrace: true + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Agent deployment, FeedbackParams additional fields fail', () => { + const payload = { + UseCaseName: 'test-agent', + UseCaseType: USE_CASE_TYPES.AGENT, + FeedbackParams: { + FeedbackEnabled: true, + FeedbackParameters: { 'key': 'value' } + }, + AgentParams: { + AgentType: AGENT_TYPES.BEDROCK, + BedrockAgentParams: { + AgentId: 'XXXXXX', + AgentAliasId: 'XXXXXX', + EnableTrace: true + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Agent deployment, RestApi Id resources pass', () => { + const payload = { + UseCaseName: 'test-agent', + UseCaseType: USE_CASE_TYPES.AGENT, + ExistingRestApiId: 'test-id', + AgentParams: { + AgentType: AGENT_TYPES.BEDROCK, + BedrockAgentParams: { + AgentId: 'XXXXXX', + AgentAliasId: 'XXXXXX', + EnableTrace: true + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + describe('ProvisionedConcurrency validations', () => { + it('ProvisionedConcurrencyValue succeeds with valid integer', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + ProvisionedConcurrencyValue: 5, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + it('ProvisionedConcurrencyValue succeeds with minimum value 1', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + ProvisionedConcurrencyValue: 1, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + it('ProvisionedConcurrencyValue succeeds with value 0 (disabled)', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + ProvisionedConcurrencyValue: 0, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + it('ProvisionedConcurrencyValue fails with value above maximum', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + ProvisionedConcurrencyValue: 901, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + it('ProvisionedConcurrencyValue fails with non-integer value', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + ProvisionedConcurrencyValue: '5', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + it('ProvisionedConcurrencyValue fails with decimal value', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + ProvisionedConcurrencyValue: 5.5, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/deployments/mcp/deploy-mcp-usecase-body.test.ts b/source/infrastructure/test/api/model-schema/deployments/mcp/deploy-mcp-usecase-body.test.ts new file mode 100644 index 00000000..76ba2ce3 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/deployments/mcp/deploy-mcp-usecase-body.test.ts @@ -0,0 +1,335 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Validator } from 'jsonschema'; +import { deployMcpUseCaseBodySchema } from '../../../../../lib/api/model-schema/deployments/mcp/deploy-mcp-usecase-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../../shared/utils'; + +describe('Testing Deploy MCP Use Case Body schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = deployMcpUseCaseBodySchema; + validator = new Validator(); + }); + + describe('Valid MCP Use Case Deployments', () => { + it('should validate gateway-type MCP server deployment', () => { + const payload = { + UseCaseName: 'Test MCP Gateway Use Case', + UseCaseDescription: 'A test MCP gateway use case', + UseCaseType: 'MCPServer', + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'test-lambda-target', + TargetDescription: 'Test Lambda target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate runtime-type MCP server deployment', () => { + const payload = { + UseCaseName: 'Test MCP Runtime Use Case', + UseCaseDescription: 'A test MCP runtime use case', + UseCaseType: 'MCPServer', + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest' + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate ECR URIs with standard ECR URI formats', () => { + const validEcrUris = [ + '123456789012.dkr.ecr.us-east-1.amazonaws.com/mcp-server:latest', + '123456789012.dkr.ecr.us-west-2.amazonaws.com/dynamodb-mcp-server:v1.0' + ]; + + validEcrUris.forEach(ecrUri => { + const payload = { + UseCaseName: 'Test MCP Runtime Use Case', + UseCaseDescription: 'A test MCP runtime use case', + UseCaseType: 'MCPServer', + MCPParams: { + RuntimeParams: { + EcrUri: ecrUri + } + } + }; + const result = validator.validate(payload, schema); + expect(result.errors).toHaveLength(0); + }); + }); + + it('should validate gateway with multiple targets', () => { + const payload = { + UseCaseName: 'Multi-Target Gateway', + UseCaseType: 'MCPServer', + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'lambda-target', + TargetDescription: 'Lambda target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + }, + { + TargetName: 'openapi-target', + TargetDescription: 'OpenAPI target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/f1a2b3c4-5678-40fe-859e-d4e5f67a8b9c.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-api-key', + OutboundAuthProviderType: 'API_KEY' + } + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate with optional UseCaseDescription', () => { + const payload = { + UseCaseName: 'Test Use Case', + UseCaseType: 'MCPServer', + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'smithyModel', + SchemaUri: 'mcp/schemas/smithyModel/e9b1801d-2516-40fe-859e-a0c7d81da2f3.smithy' + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Invalid MCP Use Case Deployments', () => { + it('should fail validation when UseCaseName is missing', () => { + const payload = { + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when MCPParams is missing', () => { + const payload = { + UseCaseName: 'Test Use Case', + UseCaseType: 'MCPServer' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Gateway Type Conditional Validation', () => { + it('should fail validation when gateway type missing GatewayParams', () => { + const payload = { + UseCaseName: 'Test Use Case', + UseCaseType: 'MCPServer', + MCPParams: {} + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when gateway type has RuntimeParams', () => { + const payload = { + UseCaseName: 'Test Use Case', + UseCaseType: 'MCPServer', + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + }, + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Runtime Type Conditional Validation', () => { + it('should fail validation when runtime type missing RuntimeParams', () => { + const payload = { + UseCaseName: 'Test Use Case', + UseCaseType: 'MCPServer', + MCPParams: {} + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when runtime type has GatewayParams', () => { + const payload = { + UseCaseName: 'Test Use Case', + UseCaseType: 'MCPServer', + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest' + }, + GatewayParams: { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when runtime config missing EcrUri', () => { + const payload = { + UseCaseName: 'Test Use Case', + UseCaseType: 'MCPServer', + MCPParams: { + RuntimeParams: {} + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with invalid ECR image URI format', () => { + const invalidEcrUris = [ + 'invalid-image-uri', + '12345678901.dkr.ecr.us-east-1.amazonaws.com/mcp-server:tag', // 11 digits instead of 12 + '123456789012.dkr.ecr.us-east-1.amazonaws.com/mcp-server', // No tag + '123456789012.dkr.ecr.us-east-1.amazonaws.com/mcp-server:my tag', // Space in tag + '123456789012.dkr.ecr.us-east-1.amazonaws.com/repo@name:latest', // @ in repo name + '123456789012.dkr.ecr.us-east-1.amazonaws.com/repo#name:latest', // # in repo name + '123456789012.dkr.ecr.us-east-1.amazonaws.com/repo%name:latest', // % in repo name + '123456789012.dkr.ecr.us-east-1.amazonaws.com/a:latest', // very short repo name + '123456789012.dkr.ecr.us-east-1.amazonaws.com/../repo:latest', // path traversal + '123456789012.dkr.ecr.us-east-1.amazonaws.com//repo:latest', // double slashes + ' 123456789012.dkr.ecr.us-east-1.amazonaws.com/repo:latest', // leading space + '123456789012.dkr.ecr.us-east-1.amazonaws.com/repo:latest ', // trailing space + 'abcd56789012.dkr.ecr.us-east-1.amazonaws.com/repo:latest', // letters in account ID + '123456789012.ecr.us-east-1.amazonaws.com/repo:latest', // missing 'dkr' + '123456789012.dkr.ecr.amazonaws.com/repo:latest', // missing region + '123456789012.dkr.ecr.us-east-1.amazonaws.com/repo:', // empty tag + `123456789012.dkr.ecr.us-east-1.amazonaws.com/${'a'.repeat(300)}:latest`, // very long repo name + `123456789012.dkr.ecr.us-east-1.amazonaws.com/repo:${'a'.repeat(350)}`, // very long tag + ]; + + invalidEcrUris.forEach(ecrUri => { + const payload = { + UseCaseName: 'Test Use Case', + UseCaseType: 'MCPServer', + MCPParams: { + RuntimeParams: { + EcrUri: ecrUri + } + } + }; + const result = validator.validate(payload, schema); + expect(result.errors.length).toBeGreaterThan(0); + }); + }); + }); + + describe('Additional Properties Validation', () => { + it('should fail validation with additional properties in root', () => { + const payload = { + UseCaseName: 'Test Use Case', + UseCaseType: 'MCPServer', + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + } + }, + extraProperty: 'not-allowed' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties in MCPParams', () => { + const payload = { + UseCaseName: 'Test Use Case', + UseCaseType: 'MCPServer', + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + }, + extraProperty: 'not-allowed' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties in RuntimeParams', () => { + const payload = { + UseCaseName: 'Test Use Case', + UseCaseType: 'MCPServer', + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest', + extraProperty: 'not-allowed' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/deployments/mcp/params/mcp-gateway-params.test.ts b/source/infrastructure/test/api/model-schema/deployments/mcp/params/mcp-gateway-params.test.ts new file mode 100644 index 00000000..60b356a1 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/deployments/mcp/params/mcp-gateway-params.test.ts @@ -0,0 +1,1026 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Validator } from 'jsonschema'; +import { gatewayParams } from '../../../../../../lib/api/model-schema/deployments/mcp/params/mcp-gateway-params'; +import { checkValidationSucceeded, checkValidationFailed } from '../../../shared/utils'; +import { + MCP_GATEWAY_MAX_TARGETS_PER_GATEWAY, + MCP_GATEWAY_TARGET_NAME_MAX_LENGTH, + MCP_GATEWAY_TARGET_DESCRIPTION_MAX_LENGTH, + OAUTH_SCOPE_MAX_LENGTH, + OAUTH_SCOPES_MAX_COUNT, + OAUTH_CUSTOM_PARAM_KEY_MAX_LENGTH, + OAUTH_CUSTOM_PARAM_VALUE_MAX_LENGTH, + OAUTH_CUSTOM_PARAMS_MAX_COUNT, + API_KEY_PARAM_NAME_MAX_LENGTH, + API_KEY_PREFIX_MAX_LENGTH +} from '../../../../../../lib/utils/constants'; + +describe('Testing MCP Gateway Parameters schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = gatewayParams; + validator = new Validator(); + }); + + describe('Valid Gateway Configurations', () => { + it('should validate lambda target with required fields', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-lambda-target', + TargetDescription: 'Test Lambda target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate openApiSchema target with API_KEY auth', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-openapi-target', + TargetDescription: 'Test OpenAPI target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/f2c3d4e5-6789-40fe-859e-b1c8e92ea4f4.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-api-key', + OutboundAuthProviderType: 'API_KEY', + AdditionalConfigParams: { + ApiKeyAdditionalConfig: { + location: 'HEADER', + parameterName: 'X-API-Key', + prefix: 'Bearer' + } + } + } + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate openApiSchema target with OAUTH auth', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-oauth-target', + TargetDescription: 'Test OAuth target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/a3b4c5d6-7890-40fe-859e-c2d9f03fb5a6.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth', + OutboundAuthProviderType: 'OAUTH', + AdditionalConfigParams: { + OAuthAdditionalConfig: { + scopes: ['read', 'write'], + customParameters: [{ key: 'audience', value: 'api.example.com' }] + } + } + } + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate openApiSchema target with minimal auth', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-existing-auth-target', + TargetDescription: 'Test existing auth target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/b4c5d6e7-8901-40fe-859e-d3eaf14ac7b8.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-api-key', + OutboundAuthProviderType: 'API_KEY' + } + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate smithyModel target', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-smithy-target', + TargetDescription: 'Test Smithy target', + TargetType: 'smithyModel', + SchemaUri: 'mcp/schemas/smithyModel/e9b1801d-2516-40fe-859e-a0c7d81da2f3.smithy' + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate multiple targets up to maximum', () => { + const targets = Array.from({ length: MCP_GATEWAY_MAX_TARGETS_PER_GATEWAY }, (_, i) => { + const paddedI = String(i).padStart(2, '0'); + return { + TargetName: `target-${i + 1}`, + TargetDescription: `Test target ${i + 1}`, + TargetType: 'lambda', + LambdaArn: `arn:aws:lambda:us-east-1:123456789012:function:test-function-${i + 1}`, + SchemaUri: `mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2${paddedI}.json` + }; + }); + + const payload = { TargetParams: targets }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Invalid Gateway Configurations', () => { + it('should fail validation when TargetParams array is empty', () => { + const payload = { + TargetParams: [] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when TargetParams array exceeds maximum', () => { + const targets = Array.from({ length: MCP_GATEWAY_MAX_TARGETS_PER_GATEWAY + 1 }, (_, i) => ({ + TargetName: `target-${i + 1}`, + TargetDescription: `Test target ${i + 1}`, + TargetType: 'lambda', + LambdaArn: `arn:aws:lambda:us-east-1:123456789012:function:test-function-${i + 1}`, + SchemaUri: `mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f${i.toString().padStart(2, '0')}.json` + })); + + const payload = { TargetParams: targets }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when TargetParams is missing', () => { + const payload = {}; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with invalid target type', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'invalid-type', + SchemaUri: 'mcp/schemas/invalid/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Target Name Validation', () => { + it('should fail validation with empty target name', () => { + const payload = { + TargetParams: [ + { + TargetName: '', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with target name exceeding max length', () => { + const longName = 'a'.repeat(MCP_GATEWAY_TARGET_NAME_MAX_LENGTH + 1); // Exceeds max length limit + const payload = { + TargetParams: [ + { + TargetName: longName, + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should validate target name with valid characters', () => { + const payload = { + TargetParams: [ + { + TargetName: 'valid-target_name123', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Lambda Target Validation', () => { + it('should fail validation when lambda target missing LambdaArn', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-lambda-target', + TargetDescription: 'Test Lambda target', + TargetType: 'lambda', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with invalid lambda ARN format', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-lambda-target', + TargetDescription: 'Test Lambda target', + TargetType: 'lambda', + LambdaArn: 'invalid-arn-format', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should validate lambda ARN with version', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-lambda-target', + TargetDescription: 'Test Lambda target with version', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function:1', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('OpenAPI Target Validation', () => { + it('should fail validation when openApiSchema target missing OutboundAuthParams', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-openapi-target', + TargetDescription: 'Test OpenAPI target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with invalid auth provider type', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-openapi-target', + TargetDescription: 'Test OpenAPI target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth', + OutboundAuthProviderType: 'INVALID_TYPE' + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when missing OutboundAuthProviderArn', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-openapi-target', + TargetDescription: 'Test OpenAPI target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json', + OutboundAuthParams: { + OutboundAuthProviderType: 'API_KEY' + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when missing OutboundAuthProviderType', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-openapi-target', + TargetDescription: 'Test OpenAPI target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth' + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with invalid Bedrock AgentCore ARN format for API_KEY', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-openapi-target', + TargetDescription: 'Test OpenAPI target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-api-key', + OutboundAuthProviderType: 'API_KEY' + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with invalid Bedrock AgentCore ARN format for OAUTH', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-openapi-target', + TargetDescription: 'Test OpenAPI target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-oauth', + OutboundAuthProviderType: 'OAUTH' + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Schema Key Validation', () => { + it('should validate with valid MCP schema key pattern for smithyModel', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'smithyModel', + SchemaUri: 'mcp/schemas/smithyModel/e9b1801d-2516-40fe-859e-a0c7d81da2f3.smithy' + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate with valid MCP schema key pattern for lambda', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/f2c3d4e5-6789-40fe-859e-b1c8e92ea4f4.json' + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate with valid MCP schema key pattern for openApiSchema', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/a3b4c5d6-7890-40fe-859e-c2d9f03fb5a6.yaml', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-api-key', + OutboundAuthProviderType: 'API_KEY' + } + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should fail validation with empty schema key', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: '' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with invalid schema key format', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'invalid-schema-key-format' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('OAuth Configuration Validation', () => { + it('should validate OAuth scopes within limits', () => { + const scopes = Array.from({ length: OAUTH_SCOPES_MAX_COUNT }, (_, i) => `scope-${i + 1}`); + const payload = { + TargetParams: [ + { + TargetName: 'test-oauth-target', + TargetDescription: 'Test OAuth target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/a3b4c5d6-7890-40fe-859e-c2d9f03fb5a6.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth', + OutboundAuthProviderType: 'OAUTH', + AdditionalConfigParams: { + OAuthAdditionalConfig: { + scopes: scopes + } + } + } + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should fail validation when OAuth scopes exceed maximum count', () => { + const scopes = Array.from({ length: OAUTH_SCOPES_MAX_COUNT + 1 }, (_, i) => `scope-${i + 1}`); + const payload = { + TargetParams: [ + { + TargetName: 'test-oauth-target', + TargetDescription: 'Test OAuth target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/a3b4c5d6-7890-40fe-859e-c2d9f03fb5a6.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth', + OutboundAuthProviderType: 'OAUTH', + AdditionalConfigParams: { + OAuthAdditionalConfig: { + scopes: scopes + } + } + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when OAuth scope exceeds maximum length', () => { + const longScope = 'a'.repeat(OAUTH_SCOPE_MAX_LENGTH + 1); + const payload = { + TargetParams: [ + { + TargetName: 'test-oauth-target', + TargetDescription: 'Test OAuth target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/a3b4c5d6-7890-40fe-859e-c2d9f03fb5a6.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth', + OutboundAuthProviderType: 'OAUTH', + AdditionalConfigParams: { + OAuthAdditionalConfig: { + scopes: [longScope] + } + } + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should validate OAuth scope when empty (optional)', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-oauth-target', + TargetDescription: 'Test OAuth target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/a3b4c5d6-7890-40fe-859e-c2d9f03fb5a6.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth', + OutboundAuthProviderType: 'OAUTH', + AdditionalConfigParams: { + OAuthAdditionalConfig: { + scopes: [''] + } + } + } + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate OAuth custom parameters within limits', () => { + const customParams = Array.from({ length: OAUTH_CUSTOM_PARAMS_MAX_COUNT }, (_, i) => ({ + key: `param-key-${i + 1}`, + value: `param-value-${i + 1}` + })); + const payload = { + TargetParams: [ + { + TargetName: 'test-oauth-target', + TargetDescription: 'Test OAuth target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/a3b4c5d6-7890-40fe-859e-c2d9f03fb5a6.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth', + OutboundAuthProviderType: 'OAUTH', + AdditionalConfigParams: { + OAuthAdditionalConfig: { + customParameters: customParams + } + } + } + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should fail validation when OAuth custom parameters exceed maximum count', () => { + const customParams = Array.from({ length: OAUTH_CUSTOM_PARAMS_MAX_COUNT + 1 }, (_, i) => ({ + key: `param-key-${i + 1}`, + value: `param-value-${i + 1}` + })); + const payload = { + TargetParams: [ + { + TargetName: 'test-oauth-target', + TargetDescription: 'Test OAuth target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/a3b4c5d6-7890-40fe-859e-c2d9f03fb5a6.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth', + OutboundAuthProviderType: 'OAUTH', + AdditionalConfigParams: { + OAuthAdditionalConfig: { + customParameters: customParams + } + } + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when OAuth custom parameter key exceeds maximum length', () => { + const longKey = 'a'.repeat(OAUTH_CUSTOM_PARAM_KEY_MAX_LENGTH + 1); + const payload = { + TargetParams: [ + { + TargetName: 'test-oauth-target', + TargetDescription: 'Test OAuth target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/a3b4c5d6-7890-40fe-859e-c2d9f03fb5a6.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth', + OutboundAuthProviderType: 'OAUTH', + AdditionalConfigParams: { + OAuthAdditionalConfig: { + customParameters: [{ key: longKey, value: 'test-value' }] + } + } + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when OAuth custom parameter value exceeds maximum length', () => { + const longValue = 'a'.repeat(OAUTH_CUSTOM_PARAM_VALUE_MAX_LENGTH + 1); + const payload = { + TargetParams: [ + { + TargetName: 'test-oauth-target', + TargetDescription: 'Test OAuth target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/a3b4c5d6-7890-40fe-859e-c2d9f03fb5a6.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth', + OutboundAuthProviderType: 'OAUTH', + AdditionalConfigParams: { + OAuthAdditionalConfig: { + customParameters: [{ key: 'test-key', value: longValue }] + } + } + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should validate when both OAuth custom parameter key and value are provided', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-oauth-target', + TargetDescription: 'Test OAuth target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/a3b4c5d6-7890-40fe-859e-c2d9f03fb5a6.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth', + OutboundAuthProviderType: 'OAUTH', + AdditionalConfigParams: { + OAuthAdditionalConfig: { + customParameters: [{ key: 'test-key', value: 'test-value' }] + } + } + } + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate when OAuth custom parameters array is empty', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-oauth-target', + TargetDescription: 'Test OAuth target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/a3b4c5d6-7890-40fe-859e-c2d9f03fb5a6.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth', + OutboundAuthProviderType: 'OAUTH', + AdditionalConfigParams: { + OAuthAdditionalConfig: { + customParameters: [] + } + } + } + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('API Key Configuration Validation', () => { + it('should validate API Key parameter name within limits', () => { + const paramName = 'a'.repeat(API_KEY_PARAM_NAME_MAX_LENGTH); + const payload = { + TargetParams: [ + { + TargetName: 'test-apikey-target', + TargetDescription: 'Test API Key target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/f2c3d4e5-6789-40fe-859e-b1c8e92ea4f4.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-api-key', + OutboundAuthProviderType: 'API_KEY', + AdditionalConfigParams: { + ApiKeyAdditionalConfig: { + location: 'HEADER', + parameterName: paramName + } + } + } + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should fail validation when API Key parameter name exceeds maximum length', () => { + const longParamName = 'a'.repeat(API_KEY_PARAM_NAME_MAX_LENGTH + 1); + const payload = { + TargetParams: [ + { + TargetName: 'test-apikey-target', + TargetDescription: 'Test API Key target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/f2c3d4e5-6789-40fe-859e-b1c8e92ea4f4.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-api-key', + OutboundAuthProviderType: 'API_KEY', + AdditionalConfigParams: { + ApiKeyAdditionalConfig: { + location: 'HEADER', + parameterName: longParamName + } + } + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should validate API Key parameter name when empty (optional)', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-apikey-target', + TargetDescription: 'Test API Key target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/f2c3d4e5-6789-40fe-859e-b1c8e92ea4f4.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-api-key', + OutboundAuthProviderType: 'API_KEY', + AdditionalConfigParams: { + ApiKeyAdditionalConfig: { + location: 'HEADER', + parameterName: '' + } + } + } + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate API Key prefix within limits', () => { + const prefix = 'a'.repeat(API_KEY_PREFIX_MAX_LENGTH); + const payload = { + TargetParams: [ + { + TargetName: 'test-apikey-target', + TargetDescription: 'Test API Key target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/f2c3d4e5-6789-40fe-859e-b1c8e92ea4f4.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-api-key', + OutboundAuthProviderType: 'API_KEY', + AdditionalConfigParams: { + ApiKeyAdditionalConfig: { + location: 'HEADER', + parameterName: 'X-API-Key', + prefix: prefix + } + } + } + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should fail validation when API Key prefix exceeds maximum length', () => { + const longPrefix = 'a'.repeat(API_KEY_PREFIX_MAX_LENGTH + 1); + const payload = { + TargetParams: [ + { + TargetName: 'test-apikey-target', + TargetDescription: 'Test API Key target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/f2c3d4e5-6789-40fe-859e-b1c8e92ea4f4.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-api-key', + OutboundAuthProviderType: 'API_KEY', + AdditionalConfigParams: { + ApiKeyAdditionalConfig: { + location: 'HEADER', + parameterName: 'X-API-Key', + prefix: longPrefix + } + } + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should validate API Key with empty prefix (optional field)', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-apikey-target', + TargetDescription: 'Test API Key target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/f2c3d4e5-6789-40fe-859e-b1c8e92ea4f4.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-api-key', + OutboundAuthProviderType: 'API_KEY', + AdditionalConfigParams: { + ApiKeyAdditionalConfig: { + location: 'HEADER', + parameterName: 'X-API-Key', + prefix: '' + } + } + } + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should fail validation with invalid API Key location', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-apikey-target', + TargetDescription: 'Test API Key target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/f2c3d4e5-6789-40fe-859e-b1c8e92ea4f4.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-api-key', + OutboundAuthProviderType: 'API_KEY', + AdditionalConfigParams: { + ApiKeyAdditionalConfig: { + location: 'INVALID_LOCATION', + parameterName: 'X-API-Key' + } + } + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Target Description Validation', () => { + it('should validate target description within limits', () => { + const description = 'a'.repeat(MCP_GATEWAY_TARGET_DESCRIPTION_MAX_LENGTH); + const payload = { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: description, + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should fail validation when target description exceeds maximum length', () => { + const longDescription = 'a'.repeat(MCP_GATEWAY_TARGET_DESCRIPTION_MAX_LENGTH + 1); + const payload = { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: longDescription, + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Additional Properties Validation', () => { + it('should fail validation with additional properties in target', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json', + extraProperty: 'not-allowed' + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties in root', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ], + extraProperty: 'not-allowed' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties in OAuth config', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-oauth-target', + TargetDescription: 'Test OAuth target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/a3b4c5d6-7890-40fe-859e-c2d9f03fb5a6.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth', + OutboundAuthProviderType: 'OAUTH', + AdditionalConfigParams: { + OAuthAdditionalConfig: { + scopes: ['read'], + extraProperty: 'not-allowed' + } + } + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties in API Key config', () => { + const payload = { + TargetParams: [ + { + TargetName: 'test-apikey-target', + TargetDescription: 'Test API Key target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/f2c3d4e5-6789-40fe-859e-b1c8e92ea4f4.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-api-key', + OutboundAuthProviderType: 'API_KEY', + AdditionalConfigParams: { + ApiKeyAdditionalConfig: { + location: 'HEADER', + parameterName: 'X-API-Key', + extraProperty: 'not-allowed' + } + } + } + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/deployments/mcp/update-mcp-usecase-body.test.ts b/source/infrastructure/test/api/model-schema/deployments/mcp/update-mcp-usecase-body.test.ts new file mode 100644 index 00000000..4335cdc4 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/deployments/mcp/update-mcp-usecase-body.test.ts @@ -0,0 +1,381 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Validator } from 'jsonschema'; +import { updateMcpUseCaseBodySchema } from '../../../../../lib/api/model-schema/deployments/mcp/update-mcp-usecase-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../../shared/utils'; + +describe('Testing Update MCP Use Case Body schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = updateMcpUseCaseBodySchema; + validator = new Validator(); + }); + + describe('Valid MCP Use Case Updates', () => { + it('should validate gateway-type MCP server update', () => { + const payload = { + UseCaseDescription: 'Updated MCP gateway use case description', + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'updated-lambda-target', + TargetDescription: 'Updated Lambda target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:updated-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f4.json' + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate runtime-type MCP server update', () => { + const payload = { + UseCaseDescription: 'Updated MCP runtime use case description', + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-updated-mcp-server:v2.0' + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate update with only UseCaseDescription', () => { + const payload = { + UseCaseDescription: 'Just updating the description', + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'existing-target', + TargetDescription: 'Existing target', + TargetType: 'smithyModel', + SchemaUri: 'mcp/schemas/smithyModel/e9b1801d-2516-40fe-859e-a0c7d81da2f3.smithy' + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate gateway update with new target configuration', () => { + const payload = { + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'new-openapi-target', + TargetDescription: 'New OpenAPI target with OAuth', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/f1a2b3c4-5678-40fe-859e-d4e5f67a8b9d.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-oauth', + OutboundAuthProviderType: 'OAUTH', + AdditionalConfigParams: { + OAuthAdditionalConfig: { + scopes: ['read', 'write'] + } + } + } + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate runtime update with new image URI', () => { + const payload = { + MCPParams: { + RuntimeParams: { + EcrUri: '987654321098.dkr.ecr.eu-west-1.amazonaws.com/different-mcp-server:latest' + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate runtime updates with standard ECR URI formats', () => { + const validEcrUris = [ + '123456789012.dkr.ecr.us-east-1.amazonaws.com/mcp-server:latest', + '123456789012.dkr.ecr.us-west-2.amazonaws.com/dynamodb-mcp-server:v2.0' + ]; + + validEcrUris.forEach(ecrUri => { + const payload = { + MCPParams: { + RuntimeParams: { + EcrUri: ecrUri + } + } + }; + const result = validator.validate(payload, schema); + expect(result.errors).toHaveLength(0); + }); + }); + }); + + describe('Invalid MCP Use Case Updates', () => { + it('should fail validation when MCPParams is missing', () => { + const payload = { + UseCaseDescription: 'Updated description', + UseCaseType: 'MCPServer' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when both GatewayParams and RuntimeParams are missing', () => { + const payload = { + UseCaseType: 'MCPServer', + MCPParams: {} + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation when empty payload is provided', () => { + const payload = {}; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Gateway Type Conditional Validation', () => { + it('should fail validation when gateway type has RuntimeParams', () => { + const payload = { + UseCaseType: 'MCPServer', + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + }, + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should validate gateway type with only GatewayParams', () => { + const payload = { + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Runtime Type Conditional Validation', () => { + it('should fail validation when runtime type has GatewayParams', () => { + const payload = { + UseCaseType: 'MCPServer', + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest' + }, + GatewayParams: { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should validate runtime type with only RuntimeParams', () => { + const payload = { + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest' + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should fail validation with invalid ECR image URI format', () => { + // Note: Comprehensive ECR URI validation is covered in deploy-mcp-usecase-body.test.ts + const invalidEcrUris = [ + 'invalid-image-uri-format', + '12345678901.dkr.ecr.us-east-1.amazonaws.com/mcp-server:tag', // 11 digits instead of 12 + '123456789012.dkr.ecr.us-east-1.amazonaws.com/mcp-server', // No tag + '123456789012.dkr.ecr.us-east-1.amazonaws.com/repo:tag:colon', // Colon in tag + ]; + + invalidEcrUris.forEach(ecrUri => { + const payload = { + UseCaseType: 'MCPServer', + MCPParams: { + RuntimeParams: { + EcrUri: ecrUri + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + it('should validate when RuntimeParams is empty (partial update allowed)', () => { + const payload = { + MCPParams: { + RuntimeParams: {} + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Gateway Configuration Validation', () => { + it('should fail validation with invalid gateway target configuration', () => { + const payload = { + UseCaseType: 'MCPServer', + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'invalid-target', + TargetDescription: 'Invalid target', + TargetType: 'lambda', + // Missing required LambdaArn for lambda type + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with empty TargetParams array', () => { + const payload = { + UseCaseType: 'MCPServer', + MCPParams: { + GatewayParams: { + TargetParams: [] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Additional Properties Validation', () => { + it('should fail validation with additional properties in root', () => { + const payload = { + UseCaseDescription: 'Updated description', + UseCaseType: 'MCPServer', + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target', + TargetType: 'smithyModel', + SchemaUri: 'mcp/schemas/lambda/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json' + } + ] + } + }, + extraProperty: 'not-allowed' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties in MCPParams', () => { + const payload = { + UseCaseType: 'MCPServer', + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest' + }, + extraProperty: 'not-allowed' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should fail validation with additional properties in RuntimeParams', () => { + const payload = { + UseCaseType: 'MCPServer', + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest', + extraProperty: 'not-allowed' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Partial Update Scenarios', () => { + it('should validate partial gateway config update with single target', () => { + const payload = { + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'single-updated-target', + TargetDescription: 'Single updated target', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/e9b1801d-2516-40fe-859e-a0c7d81da2f4.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-api-key', + OutboundAuthProviderType: 'API_KEY' + } + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate description-only update with existing config validation', () => { + const payload = { + UseCaseDescription: 'Only updating the description, keeping existing config', + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/existing-server:v1.0' + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/deployments/mcp/upload-schema-body.test.ts b/source/infrastructure/test/api/model-schema/deployments/mcp/upload-schema-body.test.ts new file mode 100644 index 00000000..0ba211d6 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/deployments/mcp/upload-schema-body.test.ts @@ -0,0 +1,348 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { uploadMcpSchemaBodySchema } from '../../../../../lib/api/model-schema/deployments/mcp/upload-schema-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../../shared/utils'; +import { Validator } from 'jsonschema'; + +describe('Testing MCP Upload Schema API schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = uploadMcpSchemaBodySchema; + validator = new Validator(); + }); + + describe('Valid Payloads', () => { + const validPayloads = [ + { + name: 'minimal valid upload payload with single file', + payload: { + files: [ + { + schemaType: 'lambda', + fileName: 'test-schema.json' + } + ] + } + }, + { + name: 'valid upload payload with multiple files', + payload: { + files: [ + { + schemaType: 'lambda', + fileName: 'lambda-schema.json' + }, + { + schemaType: 'openApiSchema', + fileName: 'api-spec.yaml' + }, + { + schemaType: 'smithyModel', + fileName: 'service.smithy' + } + ] + } + } + ]; + + test.each(validPayloads)('$name succeeds', ({ payload }) => { + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Required Fields Validations', () => { + const missingFieldTests = [ + { + name: 'missing files array', + payload: {} + }, + { + name: 'empty files array', + payload: { + files: [] + } + }, + { + name: 'missing schemaType', + payload: { + files: [ + { + fileName: 'test-schema.json' + } + ] + } + }, + { + name: 'missing fileName', + payload: { + files: [ + { + schemaType: 'lambda' + } + ] + } + } + ]; + + test.each(missingFieldTests)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Schema Type Validations', () => { + const validSchemaTypes = [ + { schemaType: 'lambda', fileName: 'lambda-function.json' }, + { schemaType: 'openApiSchema', fileName: 'api-spec.yaml' }, + { schemaType: 'smithyModel', fileName: 'service.smithy' } + ]; + + test.each(validSchemaTypes)('$schemaType schema type succeeds', ({ schemaType, fileName }) => { + const payload = { + files: [ + { + schemaType, + fileName + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + const invalidSchemaTypes = [ + { name: 'invalid schema type', schemaType: 'invalidType', fileName: 'test.json' }, + { name: 'empty schema type', schemaType: '', fileName: 'test.json' } + ]; + + test.each(invalidSchemaTypes)('$name fails', ({ schemaType, fileName }) => { + const payload = { + files: [ + { + schemaType, + fileName + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('File Name Validations', () => { + const validFileNames = [ + { name: 'valid JSON file name', schemaType: 'lambda', fileName: 'valid-schema.json' }, + { name: 'valid YAML file name', schemaType: 'openApiSchema', fileName: 'api-spec.yaml' }, + { name: 'valid YML file name', schemaType: 'openApiSchema', fileName: 'api-spec.yml' }, + { name: 'valid Smithy file name', schemaType: 'smithyModel', fileName: 'service.smithy' }, + { + name: 'file name with numbers and special characters', + schemaType: 'lambda', + fileName: 'schema_v1.2-final.json' + }, + { name: 'file name with dots in name', schemaType: 'openApiSchema', fileName: 'api.v2.spec.yaml' }, + { name: 'file name with spaces', schemaType: 'lambda', fileName: 'my schema file.json' }, + { name: 'file name with parentheses', schemaType: 'openApiSchema', fileName: 'api spec (v2).yaml' }, + { + name: 'file name with mixed valid characters', + schemaType: 'smithyModel', + fileName: 'service_v1.2 (final).smithy' + }, + { name: 'file name at maximum length', schemaType: 'lambda', fileName: 'a'.repeat(250) + '.json' } + ]; + + test.each(validFileNames)('$name succeeds', ({ schemaType, fileName }) => { + const payload = { + files: [ + { + schemaType, + fileName + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + const invalidFileNames = [ + { name: 'invalid file extension', schemaType: 'lambda', fileName: 'schema.txt' }, + { name: 'empty file name', schemaType: 'lambda', fileName: '' }, + { name: 'file name without extension', schemaType: 'lambda', fileName: 'schema' }, + { name: 'extremely long file name', schemaType: 'lambda', fileName: 'a'.repeat(251) + '.json' }, + { name: 'only extension', schemaType: 'lambda', fileName: '.json' } + ]; + + test.each(invalidFileNames)('$name fails', ({ schemaType, fileName }) => { + const payload = { + files: [ + { + schemaType, + fileName + } + ] + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Edge Cases', () => { + const validEdgeCases = [ + { name: 'single character name', fileName: 'a.json' }, + { name: 'numbers only in name', fileName: '123.yaml' }, + { name: 'mixed case with spaces', fileName: 'MySchema File.json' }, + { name: 'multiple dots', fileName: 'schema.v1.2.3.json' }, + { name: 'underscores and hyphens', fileName: 'my_schema-file.yaml' }, + { name: 'parentheses with spaces', fileName: 'schema (backup copy).json' } + ]; + + test.each(validEdgeCases)('$name succeeds', ({ fileName }) => { + const payload = { + files: [ + { + schemaType: 'lambda', + fileName + } + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Mixed Schema Types Validations', () => { + const mixedSchemaTests = [ + { + name: 'mixed valid schema types', + payload: { + files: [ + { + schemaType: 'lambda', + fileName: 'lambda-function.json' + }, + { + schemaType: 'openApiSchema', + fileName: 'rest-api.yaml' + }, + { + schemaType: 'smithyModel', + fileName: 'data-model.smithy' + } + ] + }, + shouldSucceed: true + }, + { + name: 'multiple files of same type', + payload: { + files: [ + { + schemaType: 'lambda', + fileName: 'function1.json' + }, + { + schemaType: 'lambda', + fileName: 'function2.json' + } + ] + }, + shouldSucceed: true + }, + { + name: 'one valid and one invalid file', + payload: { + files: [ + { + schemaType: 'lambda', + fileName: 'valid-schema.json' + }, + { + schemaType: 'invalidType', + fileName: 'invalid-schema.json' + } + ] + }, + shouldSucceed: false + } + ]; + + test.each(mixedSchemaTests)('$name', ({ payload, shouldSucceed }) => { + const result = validator.validate(payload, schema); + if (shouldSucceed) { + checkValidationSucceeded(result); + } else { + checkValidationFailed(result); + } + }); + }); + + describe('Additional Properties and Edge Cases', () => { + const invalidCases = [ + { + name: 'additional properties in root', + payload: { + files: [ + { + schemaType: 'lambda', + fileName: 'test-schema.json' + } + ], + extraField: 'not allowed' + } + }, + { + name: 'additional properties in file object', + payload: { + files: [ + { + schemaType: 'lambda', + fileName: 'test-schema.json', + extraProperty: 'not allowed' + } + ] + } + }, + { + name: 'null files array', + payload: { + files: null + } + }, + { + name: 'files array with null item', + payload: { + files: [null] + } + }, + { + name: 'files array with non-object item', + payload: { + files: ['invalid'] + } + }, + { + name: 'schemaType as number', + payload: { + files: [ + { + schemaType: 123, + fileName: 'test.json' + } + ] + } + }, + { + name: 'fileName as number', + payload: { + files: [ + { + schemaType: 'lambda', + fileName: 123 + } + ] + } + } + ]; + + test.each(invalidCases)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/deployments/mcp/upload-schema-response.test.ts b/source/infrastructure/test/api/model-schema/deployments/mcp/upload-schema-response.test.ts new file mode 100644 index 00000000..d03a7e16 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/deployments/mcp/upload-schema-response.test.ts @@ -0,0 +1,362 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { uploadMcpSchemaResponseSchema } from '../../../../../lib/api/model-schema/deployments/mcp/upload-schema-response'; +import { checkValidationSucceeded, checkValidationFailed } from '../../shared/utils'; +import { Validator } from 'jsonschema'; + +describe('Testing MCP Upload Schema Response API schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = uploadMcpSchemaResponseSchema; + validator = new Validator(); + }); + + describe('Valid Payloads', () => { + const validPayloads = [ + { + name: 'minimal valid response payload with single upload', + payload: { + uploads: [ + { + uploadUrl: 'https://example-bucket.s3.amazonaws.com/', + formFields: { + key: 'mcp/schemas/lambda/test-schema.json', + 'x-amz-meta-userid': 'user123', + 'Content-Type': 'application/json' + }, + fileName: 'test-schema.json', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'valid response payload with multiple uploads', + payload: { + uploads: [ + { + uploadUrl: 'https://example-bucket.s3.amazonaws.com/', + formFields: { + key: 'mcp/schemas/lambda/lambda-schema.json', + 'x-amz-meta-userid': 'user123' + }, + fileName: 'lambda-schema.json', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + }, + { + uploadUrl: 'https://example-bucket.s3.amazonaws.com/', + formFields: { + key: 'mcp/schemas/openApi/api-spec.yaml', + 'x-amz-meta-userid': 'user123' + }, + fileName: 'api-spec.yaml', + expiresIn: 1800, + createdAt: '2023-12-01T10:05:00.000Z' + } + ] + } + }, + { + name: 'empty uploads array', + payload: { + uploads: [] + } + }, + { + name: 'empty formFields object', + payload: { + uploads: [ + { + uploadUrl: 'https://example.com', + formFields: {}, + fileName: 'test.json', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + } + ]; + + test.each(validPayloads)('$name succeeds', ({ payload }) => { + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Required Fields Validations', () => { + const missingFieldTests = [ + { + name: 'missing uploads array', + payload: {} + }, + { + name: 'missing uploadUrl', + payload: { + uploads: [ + { + formFields: { key: 'test' }, + fileName: 'test.json', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'missing formFields', + payload: { + uploads: [ + { + uploadUrl: 'https://example.com', + fileName: 'test.json', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'missing fileName', + payload: { + uploads: [ + { + uploadUrl: 'https://example.com', + formFields: { key: 'test' }, + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'missing expiresIn', + payload: { + uploads: [ + { + uploadUrl: 'https://example.com', + formFields: { key: 'test' }, + fileName: 'test.json', + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'missing createdAt', + payload: { + uploads: [ + { + uploadUrl: 'https://example.com', + formFields: { key: 'test' }, + fileName: 'test.json', + expiresIn: 3600 + } + ] + } + } + ]; + + test.each(missingFieldTests)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Field Type Validations', () => { + const validFieldTests = [ + { + name: 'uploadUrl with valid URI format', + payload: { + uploads: [ + { + uploadUrl: 'https://my-bucket.s3.us-east-1.amazonaws.com/', + formFields: { key: 'test' }, + fileName: 'test.json', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'formFields with multiple string values', + payload: { + uploads: [ + { + uploadUrl: 'https://example.com', + formFields: { + key: 'mcp/schemas/lambda/test.json', + 'x-amz-meta-userid': 'user123', + 'x-amz-meta-filename': 'test.json', + 'Content-Type': 'application/json', + tagging: 'schemaType=lambda&uploadedBy=user123' + }, + fileName: 'test.json', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'expiresIn with positive integer', + payload: { + uploads: [ + { + uploadUrl: 'https://example.com', + formFields: { key: 'test' }, + fileName: 'test.json', + expiresIn: 1, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'createdAt with valid ISO 8601 format', + payload: { + uploads: [ + { + uploadUrl: 'https://example.com', + formFields: { key: 'test' }, + fileName: 'test.json', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.123Z' + } + ] + } + } + ]; + + test.each(validFieldTests)('$name succeeds', ({ payload }) => { + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + const invalidFieldTests = [ + { + name: 'expiresIn with zero', + payload: { + uploads: [ + { + uploadUrl: 'https://example.com', + formFields: { key: 'test' }, + fileName: 'test.json', + expiresIn: 0, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'expiresIn with negative number', + payload: { + uploads: [ + { + uploadUrl: 'https://example.com', + formFields: { key: 'test' }, + fileName: 'test.json', + expiresIn: -1, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'invalid data types', + payload: { + uploads: [ + { + uploadUrl: 123, // Should be string + formFields: 'invalid', // Should be object + fileName: 456, // Should be string + expiresIn: 'invalid', // Should be integer + createdAt: 789 // Should be string + } + ] + } + }, + { + name: 'formFields with non-string values', + payload: { + uploads: [ + { + uploadUrl: 'https://example.com', + formFields: { + key: 'test', + invalidField: 123 // Should be string + }, + fileName: 'test.json', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + } + ]; + + test.each(invalidFieldTests)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Additional Properties and Edge Cases', () => { + const invalidCases = [ + { + name: 'additional properties in root', + payload: { + uploads: [ + { + uploadUrl: 'https://example.com', + formFields: { key: 'test' }, + fileName: 'test.json', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ], + extraField: 'not allowed' + } + }, + { + name: 'additional properties in upload object', + payload: { + uploads: [ + { + uploadUrl: 'https://example.com', + formFields: { key: 'test' }, + fileName: 'test.json', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z', + extraProperty: 'not allowed' + } + ] + } + }, + { + name: 'null uploads array', + payload: { + uploads: null + } + }, + { + name: 'uploads array with null item', + payload: { + uploads: [null] + } + }, + { + name: 'uploads array with non-object item', + payload: { + uploads: ['invalid'] + } + } + ]; + + test.each(invalidCases)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/deployments/update-usecase-body.test.ts b/source/infrastructure/test/api/model-schema/deployments/update-usecase-body.test.ts new file mode 100644 index 00000000..2bf91563 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/deployments/update-usecase-body.test.ts @@ -0,0 +1,352 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { updateUseCaseBodySchema } from '../../../../lib/api/model-schema/deployments/update-usecase-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../shared/utils'; +import { Validator } from 'jsonschema'; +import { + BEDROCK_INFERENCE_TYPES, + CHAT_PROVIDERS, + CONVERSATION_MEMORY_TYPES, + USE_CASE_TYPES +} from '../../../../lib/utils/constants'; + +describe('Testing API schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = updateUseCaseBodySchema; + validator = new Validator(); + }); + + describe('Email Validations', () => { + it('Email is valid succeeds', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + }, + DefaultUserEmail: 'testuser@example.com' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Email is invalid fails', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' } + }, + DefaultUserEmail: 'garbage' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('ConversationMemoryParams Validation', () => { + it('ConversationMemoryParams is valid succeeds', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + }, + ConversationMemoryParams: { + ConversationMemoryType: CONVERSATION_MEMORY_TYPES.DYNAMODB, + HumanPrefix: 'human', + AiPrefix: 'ai', + ChatHistoryLength: 5 + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('ConversationMemoryParams is invalid fails', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' } + }, + ConversationMemoryParams: { + ConversationMemoryType: 'garbage' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('ConversationMemoryParams bad param fails', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' } + }, + ConversationMemoryParams: { + ConversationMemoryType: CONVERSATION_MEMORY_TYPES.DYNAMODB, + HumanPrefix: 'human', + AiPrefix: 'ai', + ChatHistoryLength: -1 + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Multiple Settings Validations', () => { + it('Multiple Settings are valid succeeds', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + }, + ConversationMemoryParams: { + ConversationMemoryType: CONVERSATION_MEMORY_TYPES.DYNAMODB + }, + VpcParams: { + ExistingPrivateSubnetIds: ['subnet-11111111'] + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Multiple Settings are valid succeeds, no LLM params', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + ConversationMemoryParams: { + ConversationMemoryType: CONVERSATION_MEMORY_TYPES.DYNAMODB + }, + VpcParams: { + ExistingPrivateSubnetIds: ['subnet-11111111'] + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Multiple Settings where 1 is invalid fails', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK + }, + ConversationMemoryParams: { + ConversationMemoryType: 'garbage' + }, + VpcParams: { + ExistingVpcId: 'garbage' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Agent use case update validations', () => { + it('Valid AgentParams succeeds', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + BedrockAgentParams: { + AgentId: 'agent123', + AgentAliasId: 'alias456', + EnableTrace: true + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('AgentParams with missing optional field succeeds', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + BedrockAgentParams: { + AgentId: 'agent123', + AgentAliasId: 'alias456' + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('AgentParams with missing required field fails', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + BedrockAgentParams: { + AgentId: 'agent123' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('AgentId exceeding maxLength fails', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + BedrockAgentParams: { + AgentId: 'agent1234567890', // 11 characters, exceeds maxLength of 10 + AgentAliasId: 'alias456' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('AgentAliasId with invalid characters fails', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + BedrockAgentParams: { + AgentId: 'agent123', + AgentAliasId: 'alias_456' // Contains underscore, which is not allowed + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('EnableTrace with non-boolean value fails', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + BedrockAgentParams: { + AgentId: 'agent123', + AgentAliasId: 'alias456', + EnableTrace: 'true' // Should be a boolean, not a string + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Additional properties in BedrockAgentParams fails', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + BedrockAgentParams: { + AgentId: 'agent123', + AgentAliasId: 'alias456', + ExtraField: 'should not be here' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Additional properties in AgentParams fails', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + BedrockAgentParams: { + AgentId: 'agent123', + AgentAliasId: 'alias456' + }, + ExtraField: 'should not be here' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Invalid agent type leads to failure', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.AGENT, + AgentParams: { + AgentType: 'invalid' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Missing UseCaseType fails', () => { + const payload = { + AgentParams: { + BedrockAgentParams: { + AgentId: 'agent123', + AgentAliasId: 'alias456' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + it('Test Agent deployment, FeedbackEnabled passes', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.AGENT, + FeedbackParams: { + FeedbackEnabled: true + }, + AgentParams: { + BedrockAgentParams: { + AgentId: 'agent123', + AgentAliasId: 'alias456' + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Agent deployment, FeedbackParams additional fields fail', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.AGENT, + FeedbackParams: { + FeedbackEnabled: true, + FeedbackParameters: { 'key': 'value' } + }, + AgentParams: { + BedrockAgentParams: { + AgentId: 'agent123', + AgentAliasId: 'alias456' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + describe('ProvisionedConcurrency validations', () => { + it('ProvisionedConcurrencyValue succeeds with valid integer', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + ProvisionedConcurrencyValue: 3 + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('ProvisionedConcurrencyValue succeeds with value 0 (disabled)', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + ProvisionedConcurrencyValue: 0 + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('ProvisionedConcurrencyValue fails with value above maximum', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + ProvisionedConcurrencyValue: 901 + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('ProvisionedConcurrencyValue fails with non-integer value', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + ProvisionedConcurrencyValue: '3' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/deployments/workflows/deploy-workflow-usecase-body.test.ts b/source/infrastructure/test/api/model-schema/deployments/workflows/deploy-workflow-usecase-body.test.ts new file mode 100644 index 00000000..b8ee112a --- /dev/null +++ b/source/infrastructure/test/api/model-schema/deployments/workflows/deploy-workflow-usecase-body.test.ts @@ -0,0 +1,802 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Validator } from 'jsonschema'; +import { deployWorkflowUseCaseBodySchema } from '../../../../../lib/api/model-schema/deployments/workflows/deploy-workflow-usecase-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../../shared/utils'; +import { AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH } from '../../../../../lib/utils/constants'; + +describe('Testing Deploy Workflow Use Case Body schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = deployWorkflowUseCaseBodySchema; + validator = new Validator(); + }); + + describe('Valid Workflow Use Case Deployments', () => { + it('should validate minimal workflow deployment', () => { + const payload = { + UseCaseName: 'Test Workflow', + UseCaseType: 'Workflow', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + WorkflowParams: { + SystemPrompt: 'You are a customer support coordinator that routes queries to specialized agents.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Support Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a customer support agent.' + } + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate workflow deployment with multiple agents', () => { + const payload = { + UseCaseName: 'Multi-Agent Workflow', + UseCaseDescription: 'Workflow with multiple specialized agents', + UseCaseType: 'Workflow', + DefaultUserEmail: 'test@example.com', + DeployUI: true, + FeedbackParams: { + FeedbackEnabled: true + }, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.5, + Streaming: true, + Verbose: false, + RAGEnabled: false, + MultimodalParams: { + MultimodalEnabled: true + } + }, + WorkflowParams: { + SystemPrompt: + 'You are a multi-agent coordinator that routes tasks to specialized agents based on their capabilities.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Research Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.3, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a research specialist agent.' + } + }, + { + UseCaseId: '87654321-4321-4321-4321-210987654321', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Product Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.5, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a product specialist agent.' + } + }, + { + UseCaseId: '11111111-2222-3333-4444-555555555555', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Support Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a customer support agent.' + } + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate workflow deployment with multimodal enabled', () => { + const payload = { + UseCaseName: 'Multimodal Workflow', + UseCaseDescription: 'Workflow with multimodal capabilities', + UseCaseType: 'Workflow', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false, + MultimodalParams: { + MultimodalEnabled: true + } + }, + WorkflowParams: { + SystemPrompt: 'You are a workflow coordinator that can process images and documents.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Multimodal Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false, + MultimodalParams: { + MultimodalEnabled: true + } + }, + AgentBuilderParams: { + SystemPrompt: 'You are a multimodal processing agent.' + } + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate workflow deployment with maximum agents', () => { + const payload = { + UseCaseName: 'Large Workflow', + UseCaseType: 'Workflow', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + WorkflowParams: { + SystemPrompt: 'You coordinate a large team of specialized agents.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '11111111-1111-1111-1111-111111111111', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent 1', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are agent 1.' + } + }, + { + UseCaseId: '22222222-2222-2222-2222-222222222222', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent 2', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are agent 2.' + } + }, + { + UseCaseId: '33333333-3333-3333-3333-333333333333', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent 3', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are agent 3.' + } + }, + { + UseCaseId: '44444444-4444-4444-4444-444444444444', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent 4', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are agent 4.' + } + }, + { + UseCaseId: '55555555-5555-5555-5555-555555555555', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent 5', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are agent 5.' + } + }, + { + UseCaseId: '66666666-6666-6666-6666-666666666666', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent 6', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are agent 6.' + } + }, + { + UseCaseId: '77777777-7777-7777-7777-777777777777', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent 7', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are agent 7.' + } + }, + { + UseCaseId: '88888888-8888-8888-8888-888888888888', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent 8', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are agent 8.' + } + }, + { + UseCaseId: '99999999-9999-9999-9999-999999999999', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent 9', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are agent 9.' + } + }, + { + UseCaseId: '00000000-0000-0000-0000-000000000000', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent 10', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are agent 10.' + } + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate workflow deployment with long system prompt', () => { + const longPrompt = 'You are a sophisticated workflow coordinator. '.repeat(100); // ~4000 characters + const payload = { + UseCaseName: 'Complex Workflow', + UseCaseType: 'Workflow', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + WorkflowParams: { + SystemPrompt: longPrompt, + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Complex Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a complex processing agent.' + } + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Invalid Workflow Use Case Deployments', () => { + it('should reject workflow with missing required fields', () => { + const payload = { + UseCaseName: 'Incomplete Workflow' + // Missing UseCaseType, LlmParams, WorkflowParams + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject workflow with wrong use case type', () => { + const payload = { + UseCaseName: 'Wrong Type Workflow', + UseCaseType: 'Text', // Should be 'Workflow' + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + WorkflowParams: { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject workflow with empty system prompt', () => { + const payload = { + UseCaseName: 'Empty Prompt Workflow', + UseCaseType: 'Workflow', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + WorkflowParams: { + SystemPrompt: '', // Empty string + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject workflow with system prompt exceeding max length', () => { + const tooLongPrompt = 'A'.repeat(AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH + 1); // Exceeds AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH + const payload = { + UseCaseName: 'Too Long Prompt Workflow', + UseCaseType: 'Workflow', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + WorkflowParams: { + SystemPrompt: tooLongPrompt, + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject workflow with invalid orchestration pattern', () => { + const payload = { + UseCaseName: 'Invalid Pattern Workflow', + UseCaseType: 'Workflow', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + WorkflowParams: { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'invalid-pattern', // Not in allowed enum + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject workflow with no selected agents', () => { + const payload = { + UseCaseName: 'No Agents Workflow', + UseCaseType: 'Workflow', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + WorkflowParams: { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [] // Empty array + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject workflow with too many selected agents', () => { + const tooManyAgents = Array.from({ length: 11 }, (_, i) => ({ + UseCaseId: `${i.toString().padStart(8, '0')}-1111-1111-1111-111111111111`, + UseCaseType: 'AgentBuilder', + UseCaseName: `Agent ${i + 1}`, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: `You are agent ${i + 1}.` + } + })); + const payload = { + UseCaseName: 'Too Many Agents Workflow', + UseCaseType: 'Workflow', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + WorkflowParams: { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: tooManyAgents // 11 agents, exceeds max of 10 + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject workflow with agent missing UseCaseId', () => { + const payload = { + UseCaseName: 'Missing Use Case ID Workflow', + UseCaseType: 'Workflow', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + WorkflowParams: { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + // Missing UseCaseId + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent without ID', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject workflow with invalid UseCaseId format', () => { + const payload = { + UseCaseName: 'Invalid Use Case ID Workflow', + UseCaseType: 'Workflow', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + WorkflowParams: { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'invalid-uuid-format', // Invalid UUID format + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent with invalid ID', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/deployments/workflows/params/workflow-core-params.test.ts b/source/infrastructure/test/api/model-schema/deployments/workflows/params/workflow-core-params.test.ts new file mode 100644 index 00000000..bcfea786 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/deployments/workflows/params/workflow-core-params.test.ts @@ -0,0 +1,544 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Validator } from 'jsonschema'; +import { workflowCoreParams } from '../../../../../../lib/api/model-schema/deployments/workflows/params/workflow-core-params'; +import { checkValidationSucceeded, checkValidationFailed } from '../../../shared/utils'; +import { AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH } from '../../../../../../lib/utils/constants'; + +describe('Testing Workflow Core Parameters schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = workflowCoreParams; + validator = new Validator(); + }); + + describe('Valid Workflow Core Parameters', () => { + it('should validate minimal workflow parameters', () => { + const payload = { + SystemPrompt: 'You are a workflow coordinator.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate workflow parameters with multiple agents', () => { + const payload = { + SystemPrompt: 'You are a multi-agent coordinator that routes tasks to specialized agents.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Research Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.3, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a research agent.' + } + }, + { + UseCaseId: '87654321-4321-4321-4321-210987654321', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Analysis Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.5, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are an analysis agent.' + } + }, + { + UseCaseId: '11111111-2222-3333-4444-555555555555', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Reporting Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a reporting agent.' + } + } + ] + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate workflow parameters with maximum agents', () => { + const maxAgents = Array.from({ length: 10 }, (_, i) => ({ + UseCaseId: `${i.toString().padStart(8, '0')}-1111-1111-1111-111111111111`, + UseCaseType: 'AgentBuilder', + UseCaseName: `Agent ${i + 1}`, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: `You are agent ${i + 1}.` + } + })); + const payload = { + SystemPrompt: 'You coordinate a large team of specialized agents.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: maxAgents + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate workflow parameters with long system prompt', () => { + const longPrompt = 'You are a sophisticated workflow coordinator. '.repeat(200); // ~8000 characters + const payload = { + SystemPrompt: longPrompt, + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate workflow parameters at maximum system prompt length', () => { + const maxLengthPrompt = 'A'.repeat(AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH); // Exactly at max length + const payload = { + SystemPrompt: maxLengthPrompt, + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Invalid Workflow Core Parameters', () => { + it('should reject parameters with missing SystemPrompt', () => { + const payload = { + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject parameters with missing OrchestrationPattern', () => { + const payload = { + SystemPrompt: 'You are a coordinator.', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject parameters with missing AgentsAsToolsParams', () => { + const payload = { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'agents-as-tools' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject parameters with empty SystemPrompt', () => { + const payload = { + SystemPrompt: '', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject parameters with SystemPrompt exceeding max length', () => { + const tooLongPrompt = 'A'.repeat(AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH + 1); // Exceeds max length + const payload = { + SystemPrompt: tooLongPrompt, + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject parameters with invalid OrchestrationPattern', () => { + const payload = { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'invalid-pattern', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject parameters with empty SelectedAgents array', () => { + const payload = { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject parameters with too many SelectedAgents', () => { + const tooManyAgents = Array.from({ length: 11 }, (_, i) => ({ + UseCaseId: `${i.toString().padStart(8, '0')}-1111-1111-1111-111111111111`, + UseCaseType: 'AgentBuilder', + UseCaseName: `Agent ${i + 1}`, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: `You are agent ${i + 1}.` + } + })); + const payload = { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: tooManyAgents + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject parameters with agent missing AgentId', () => { + const payload = { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + // Missing UseCaseId + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent without ID', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject parameters with empty AgentId', () => { + const payload = { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'invalid-uuid-format', // Invalid UUID format + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent with invalid ID', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject parameters with additional properties', () => { + const payload = { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + }, + InvalidProperty: 'This should not be allowed' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject parameters with additional properties in agent object', () => { + const payload = { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789012', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + }, + InvalidProperty: 'This should not be allowed' + } + ] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/deployments/workflows/update-workflow-usecase-body.test.ts b/source/infrastructure/test/api/model-schema/deployments/workflows/update-workflow-usecase-body.test.ts new file mode 100644 index 00000000..8585b828 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/deployments/workflows/update-workflow-usecase-body.test.ts @@ -0,0 +1,426 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Validator } from 'jsonschema'; +import { updateWorkflowUseCaseBodySchema } from '../../../../../lib/api/model-schema/deployments/workflows/update-workflow-usecase-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../../shared/utils'; +import { AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH } from '../../../../../lib/utils/constants'; + +describe('Testing Update Workflow Use Case Body schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = updateWorkflowUseCaseBodySchema; + validator = new Validator(); + }); + + describe('Valid Workflow Use Case Updates', () => { + it('should validate empty update payload', () => { + const payload = {}; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate partial workflow parameter updates', () => { + const payload = { + WorkflowParams: { + SystemPrompt: 'Updated system prompt for the workflow coordinator.' + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate partial LLM parameter updates', () => { + const payload = { + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-haiku-20240307-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.8, + Streaming: false + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate orchestration pattern update', () => { + const payload = { + WorkflowParams: { + OrchestrationPattern: 'agents-as-tools' + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate agents as tools params update', () => { + const payload = { + WorkflowParams: { + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '11111111-1111-1111-1111-111111111111', + UseCaseType: 'AgentBuilder', + UseCaseName: 'New Agent 123', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-haiku-20240307-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + AgentBuilderParams: { + SystemPrompt: 'You are a new specialized agent.' + } + }, + { + UseCaseId: '22222222-2222-2222-2222-222222222222', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Updated Agent 456', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-haiku-20240307-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + AgentBuilderParams: { + SystemPrompt: 'You are an updated specialized agent.' + } + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate complete workflow parameters update', () => { + const payload = { + WorkflowParams: { + SystemPrompt: 'Completely updated system prompt for multi-agent coordination.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '33333333-3333-3333-3333-333333333333', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Research Agent New', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-haiku-20240307-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + AgentBuilderParams: { + SystemPrompt: 'You are a research agent.' + } + }, + { + UseCaseId: '44444444-4444-4444-4444-444444444444', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Analysis Agent New', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-haiku-20240307-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + AgentBuilderParams: { + SystemPrompt: 'You are an analysis agent.' + } + }, + { + UseCaseId: '55555555-5555-5555-5555-555555555555', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Reporting Agent New', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-haiku-20240307-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + AgentBuilderParams: { + SystemPrompt: 'You are a reporting agent.' + } + } + ] + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate use case description update', () => { + const payload = { + UseCaseDescription: 'Updated description for the workflow use case' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate feedback parameters update', () => { + const payload = { + FeedbackParams: { + FeedbackEnabled: true + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate combined updates', () => { + const payload = { + UseCaseDescription: 'Updated multi-agent workflow with enhanced capabilities', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-haiku-20240307-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.6 + }, + WorkflowParams: { + SystemPrompt: 'You are an enhanced workflow coordinator with improved routing capabilities.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '12345678-1234-1234-1234-123456789abc', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Enhanced Research Agent', + UseCaseDescription: 'Agent specialized in research tasks', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-haiku-20240307-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + AgentBuilderParams: { + SystemPrompt: 'You are a research specialist agent.' + } + }, + { + UseCaseId: '87654321-4321-4321-4321-cba987654321', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Enhanced Analysis Agent', + UseCaseDescription: 'Agent specialized in data analysis', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-haiku-20240307-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + AgentBuilderParams: { + SystemPrompt: 'You are a data analysis specialist agent.' + } + } + ] + } + }, + FeedbackParams: { + FeedbackEnabled: true + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('should validate workflow update with valid UseCaseType', () => { + const payload = { + UseCaseType: 'Workflow', + UseCaseDescription: 'Updated workflow with explicit type' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Invalid Workflow Use Case Updates', () => { + it('should reject update with invalid UseCaseType', () => { + const payload = { + UseCaseType: 'Text' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject update with invalid UseCaseType (AgentBuilder instead of Workflow)', () => { + const payload = { + UseCaseType: 'AgentBuilder' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject update with invalid orchestration pattern', () => { + const payload = { + WorkflowParams: { + OrchestrationPattern: 'invalid-pattern' // Not in allowed enum + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject update with empty system prompt', () => { + const payload = { + WorkflowParams: { + SystemPrompt: '' // Empty string + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject update with system prompt exceeding max length', () => { + const tooLongPrompt = 'A'.repeat(AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH + 1); // Exceeds AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH + const payload = { + WorkflowParams: { + SystemPrompt: tooLongPrompt + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject update with empty agents array', () => { + const payload = { + WorkflowParams: { + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [] // Empty array + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject update with too many agents', () => { + const tooManyAgents = Array.from({ length: 11 }, (_, i) => ({ + UseCaseId: `${i.toString().padStart(8, '0')}-1111-1111-1111-111111111111`, + UseCaseType: 'AgentBuilder', + UseCaseName: `Agent ${i + 1}`, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-haiku-20240307-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + AgentBuilderParams: { + SystemPrompt: `You are agent ${i + 1}.` + } + })); + const payload = { + WorkflowParams: { + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: tooManyAgents // 11 agents, exceeds max of 10 + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject update with agent missing required fields', () => { + const payload = { + WorkflowParams: { + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseName: 'Agent without required fields' // Missing UseCaseId, UseCaseType, LlmParams, AgentBuilderParams + } + ] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject update with invalid UseCaseId format', () => { + const payload = { + WorkflowParams: { + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'invalid-uuid-format', // Invalid UUID format + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-haiku-20240307-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject update with additional properties in WorkflowParams', () => { + const payload = { + WorkflowParams: { + SystemPrompt: 'Valid prompt', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '66666666-6666-6666-6666-666666666666', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-haiku-20240307-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + } + } + ] + }, + InvalidProperty: 'This should not be allowed' // Additional property + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('should reject update with additional properties in agent object', () => { + const payload = { + WorkflowParams: { + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: '77777777-7777-7777-7777-777777777777', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-haiku-20240307-v1:0', + BedrockInferenceType: 'QUICK_START' + } + }, + AgentBuilderParams: { + SystemPrompt: 'You are a test agent.' + }, + InvalidProperty: 'This should not be allowed' // Additional property + } + ] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/feedback/feedback-body.test.ts b/source/infrastructure/test/api/model-schema/feedback/feedback-body.test.ts new file mode 100644 index 00000000..3764f519 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/feedback/feedback-body.test.ts @@ -0,0 +1,198 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { feedbackRequestSchema } from '../../../../lib/api/model-schema/feedback/feedback-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../shared/utils'; +import { Validator } from 'jsonschema'; + +describe('Testing Feedback API schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = feedbackRequestSchema; + validator = new Validator(); + }); + + describe('Required Fields Validations', () => { + it('Test minimal valid feedback payload', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + feedback: 'positive' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test missing required field fails', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + // Missing feedback field + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test invalid useCaseRecordKey format fails', () => { + const payload = { + useCaseRecordKey: 'invalid-format', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + feedback: 'positive' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test invalid conversationId format fails', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: 'invalid-uuid-format', + messageId: '87654321-4321-4321-4321-210987654321', + feedback: 'positive' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test invalid messageId format fails', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: 'invalid-uuid-format', + feedback: 'positive' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Feedback Value Validations', () => { + it('Test positive feedback succeeds', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + feedback: 'positive' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test negative feedback succeeds', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + feedback: 'negative' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test invalid feedback value fails', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + feedback: 'neutral' // Not in FEEDBACK_VALUES + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Optional Fields Validations', () => { + it('Test with rephrased query succeeds', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + feedback: 'positive', + rephrasedQuery: 'What is the weather like today?' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test with source documents succeeds', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + feedback: 'negative', + sourceDocuments: [ + 's3://bucket/document1.pdf', + 's3://bucket/document2.txt' + ] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test with feedback reasons succeeds', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + feedback: 'negative', + feedbackReason: ['Inaccurate', 'Other'] + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test with valid comment succeeds', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + feedback: 'negative', + comment: 'The response was not helpful for my specific use case.' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test with all optional fields succeeds', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + feedback: 'negative', + rephrasedQuery: 'How do I configure AWS Lambda?', + sourceDocuments: ['s3://bucket/lambda-guide.pdf'], + feedbackReason: ['Inaccurate', 'Incomplete or insufficient'], + comment: 'The response did not include information about environment variables.' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test with invalid feedback reason fails', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + feedback: 'negative', + feedbackReason: ['InvalidReason'] // Not in FEEDBACK_REASON_OPTIONS + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test with invalid comment characters fails', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + feedback: 'negative', + comment: 'Invalid characters: @#$%^&*()' // Contains invalid characters + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Additional Properties Validation', () => { + it('Test with additional properties fails', () => { + const payload = { + useCaseRecordKey: 'abcd1234-efab5678', + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + feedback: 'positive', + extraField: 'not allowed' // Additional property not allowed + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); \ No newline at end of file diff --git a/source/infrastructure/test/api/model-schema/multimodal/files-delete-request-body.test.ts b/source/infrastructure/test/api/model-schema/multimodal/files-delete-request-body.test.ts new file mode 100644 index 00000000..624679d2 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/multimodal/files-delete-request-body.test.ts @@ -0,0 +1,212 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { filesDeleteRequestSchema } from '../../../../lib/api/model-schema/multimodal/files-delete-request-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../shared/utils'; +import { MAX_FILE_DELETES_PER_BATCH } from '../../../../lib/utils/constants'; +import { Validator } from 'jsonschema'; + +describe('Testing Multimodal Files Delete Request API schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = filesDeleteRequestSchema; + validator = new Validator(); + }); + + describe('Valid Payloads', () => { + const validPayloads = [ + { + name: 'minimal valid delete payload with single file', + payload: { + fileNames: ['image.png'], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: 'valid delete payload with multiple files', + payload: { + fileNames: ['image1.jpg', 'image2.png', 'document.pdf'], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: 'valid delete payload with mixed file types', + payload: { + fileNames: ['photo.jpeg', 'spreadsheet.xlsx', 'notes.txt', 'presentation.html'], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: `valid delete payload with maximum files (${MAX_FILE_DELETES_PER_BATCH})`, + payload: { + fileNames: Array.from({ length: MAX_FILE_DELETES_PER_BATCH }, (_, i) => `file${i}.png`), + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: 'valid delete payload with complex file names', + payload: { + fileNames: ['my file v2.png', 'data_export.csv', 'report-final.pdf'], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + } + ]; + + test.each(validPayloads)('$name succeeds', ({ payload }) => { + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Required Fields Validations', () => { + const missingFieldTests = [ + { + name: 'missing fileNames array', + payload: { + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: 'missing conversationId', + payload: { + fileNames: ['image.png'], + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: 'missing messageId', + payload: { + fileNames: ['image.png'], + conversationId: '12345678-1234-1234-1234-123456789012' + } + }, + { + name: 'empty fileNames array', + payload: { + fileNames: [], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + } + ]; + + test.each(missingFieldTests)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('File Name Validations', () => { + // Note: Comprehensive file name validation is covered in files-upload-request-body.test.ts + // This section only tests delete-specific edge cases + const deleteSpecificTests = [ + { name: 'single valid file name', fileName: 'image.png', shouldSucceed: true }, + { name: 'invalid extension for delete', fileName: 'malware.exe', shouldSucceed: false }, + { name: 'empty filename in delete request', fileName: '', shouldSucceed: false } + ]; + + test.each(deleteSpecificTests)('$name', ({ fileName, shouldSucceed }) => { + const payload = { + fileNames: [fileName], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + }; + const result = validator.validate(payload, schema); + if (shouldSucceed) { + checkValidationSucceeded(result); + } else { + checkValidationFailed(result); + } + }); + }); + + describe('UUID Validations', () => { + test('invalid UUID in delete request fails', () => { + const payload = { + fileNames: ['image.png'], + conversationId: 'invalid-uuid', + messageId: '87654321-4321-4321-4321-210987654321' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('FileNames Array Validations', () => { + const validFileNamesArrays = [ + { name: 'single file', fileNames: ['image.png'] }, + { name: 'multiple files', fileNames: ['image1.png', 'image2.jpg', 'document.pdf'] }, + { + name: `maximum files (${MAX_FILE_DELETES_PER_BATCH})`, + fileNames: Array.from({ length: MAX_FILE_DELETES_PER_BATCH }, (_, i) => `file${i}.png`) + } + ]; + + test.each(validFileNamesArrays)('$name succeeds', ({ fileNames }) => { + const payload = { + fileNames, + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + const invalidFileNamesArrays = [ + { + name: `too many files (${MAX_FILE_DELETES_PER_BATCH + 1})`, + fileNames: Array.from({ length: MAX_FILE_DELETES_PER_BATCH + 1 }, (_, i) => `file${i}.png`) + }, + { name: 'null array', fileNames: null }, + { name: 'non-array value', fileNames: 'not-an-array' } + ]; + + test.each(invalidFileNamesArrays)('$name fails', ({ fileNames }) => { + const payload = { + fileNames, + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Additional Properties and Edge Cases', () => { + const invalidCases = [ + { + name: 'additional properties in root', + payload: { + fileNames: ['image.png'], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + extraField: 'not allowed' + } + }, + + { + name: 'fileNames array with null item', + payload: { + fileNames: [null], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: 'fileNames array with non-string item', + payload: { + fileNames: [123], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + } + ]; + + test.each(invalidCases)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/multimodal/files-delete-response-body.test.ts b/source/infrastructure/test/api/model-schema/multimodal/files-delete-response-body.test.ts new file mode 100644 index 00000000..6878d189 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/multimodal/files-delete-response-body.test.ts @@ -0,0 +1,487 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { filesDeleteResponseSchema } from '../../../../lib/api/model-schema/multimodal/files-delete-response-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../shared/utils'; +import { Validator } from 'jsonschema'; + +describe('Testing Multimodal Files Delete Response API schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = filesDeleteResponseSchema; + validator = new Validator(); + }); + + describe('Valid Payloads', () => { + const validPayloads = [ + { + name: 'successful single file deletion', + payload: { + deletions: [ + { + success: true, + fileName: 'image.png' + } + ], + allSuccessful: true, + failureCount: 0 + } + }, + { + name: 'failed single file deletion', + payload: { + deletions: [ + { + success: false, + fileName: 'image.png', + error: 'File not found' + } + ], + allSuccessful: false, + failureCount: 1 + } + }, + { + name: 'mixed success and failure for multiple files', + payload: { + deletions: [ + { + success: true, + fileName: 'image1.png' + }, + { + success: false, + fileName: 'image2.jpg', + error: 'Access denied' + }, + { + success: true, + fileName: 'document.pdf' + } + ], + allSuccessful: false, + failureCount: 1 + } + }, + { + name: 'all successful deletions', + payload: { + deletions: [ + { + success: true, + fileName: 'image1.png' + }, + { + success: true, + fileName: 'image2.jpg' + }, + { + success: true, + fileName: 'document.pdf' + } + ], + allSuccessful: true, + failureCount: 0 + } + }, + { + name: 'all failed deletions', + payload: { + deletions: [ + { + success: false, + fileName: 'image1.png', + error: 'File not found' + }, + { + success: false, + fileName: 'image2.jpg', + error: 'Access denied' + } + ], + allSuccessful: false, + failureCount: 2 + } + }, + { + name: 'successful deletion without optional error field', + payload: { + deletions: [ + { + success: true, + fileName: 'image.png' + } + ], + allSuccessful: true, + failureCount: 0 + } + } + ]; + + test.each(validPayloads)('$name succeeds', ({ payload }) => { + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Required Fields Validations', () => { + const missingFieldTests = [ + { + name: 'missing deletions array', + payload: { + allSuccessful: true, + failureCount: 0 + } + }, + { + name: 'missing allSuccessful field', + payload: { + deletions: [ + { + success: true, + fileName: 'image.png' + } + ], + failureCount: 0 + } + }, + { + name: 'missing failureCount field', + payload: { + deletions: [ + { + success: true, + fileName: 'image.png' + } + ], + allSuccessful: true + } + }, + { + name: 'empty deletions array', + payload: { + deletions: [], + allSuccessful: true, + failureCount: 0 + } + }, + { + name: 'missing success field in deletion item', + payload: { + deletions: [ + { + fileName: 'image.png' + } + ], + allSuccessful: true, + failureCount: 0 + } + }, + { + name: 'missing fileName field in deletion item', + payload: { + deletions: [ + { + success: true + } + ], + allSuccessful: true, + failureCount: 0 + } + } + ]; + + test.each(missingFieldTests)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Deletion Item Field Validations', () => { + const validDeletionItems = [ + { + name: 'successful deletion without error', + success: true, + fileName: 'image.png' + }, + { + name: 'failed deletion with error message', + success: false, + fileName: 'document.pdf', + error: 'File not found in S3 bucket' + }, + { + name: 'complex file name', + success: true, + fileName: 'My Document Final Version.pdf' + }, + { + name: 'failed deletion with detailed error', + success: false, + fileName: 'data.csv', + error: 'Access denied: insufficient permissions to delete file' + }, + { + name: 'failed deletion with null error', + success: false, + fileName: 'image.png', + error: null + } + ]; + + test.each(validDeletionItems)('$name succeeds', (deletionItem) => { + const { name, ...validFields } = deletionItem; + const payload = { + deletions: [validFields], + allSuccessful: deletionItem.success, + failureCount: deletionItem.success ? 0 : 1 + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + const invalidDeletionItems = [ + { + name: 'empty fileName', + success: true, + fileName: '' + }, + { + name: 'null fileName', + success: true, + fileName: null + }, + { + name: 'non-string fileName', + success: true, + fileName: 123 + }, + { + name: 'non-boolean success', + success: 'true', + fileName: 'image.png' + }, + { + name: 'null success', + success: null, + fileName: 'image.png' + }, + { + name: 'non-string error', + success: false, + fileName: 'image.png', + error: 123 + }, + { + name: 'empty error string', + success: false, + fileName: 'image.png', + error: '' + } + ]; + + test.each(invalidDeletionItems)('$name fails', (deletionItem) => { + const { name, ...validFields } = deletionItem; + const payload = { + deletions: [validFields], + allSuccessful: false, + failureCount: 1 + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Summary Fields Validations', () => { + const validSummaryFields = [ + { + name: 'all successful with zero failure count', + allSuccessful: true, + failureCount: 0 + }, + { + name: 'not all successful with positive failure count', + allSuccessful: false, + failureCount: 2 + }, + { + name: 'not all successful with zero failure count (edge case)', + allSuccessful: false, + failureCount: 0 + } + ]; + + test.each(validSummaryFields)('$name succeeds', ({ allSuccessful, failureCount }) => { + const payload = { + deletions: [ + { + success: true, + fileName: 'image.png' + } + ], + allSuccessful, + failureCount + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + const invalidSummaryFields = [ + { + name: 'non-boolean allSuccessful', + allSuccessful: 'true', + failureCount: 0 + }, + { + name: 'null allSuccessful', + allSuccessful: null, + failureCount: 0 + }, + { + name: 'non-number failureCount', + allSuccessful: true, + failureCount: '0' + }, + { + name: 'null failureCount', + allSuccessful: true, + failureCount: null + }, + { + name: 'negative failureCount', + allSuccessful: false, + failureCount: -1 + } + ]; + + test.each(invalidSummaryFields)('$name fails', ({ allSuccessful, failureCount }) => { + const payload = { + deletions: [ + { + success: true, + fileName: 'image.png' + } + ], + allSuccessful, + failureCount + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Multiple Deletions Validations', () => { + const multipleDeletionsTests = [ + { + name: 'multiple successful deletions', + payload: { + deletions: [ + { + success: true, + fileName: 'image1.png' + }, + { + success: true, + fileName: 'image2.jpg' + } + ], + allSuccessful: true, + failureCount: 0 + }, + shouldSucceed: true + }, + { + name: 'one valid and one invalid deletion item', + payload: { + deletions: [ + { + success: true, + fileName: 'image1.png' + }, + { + success: true, + fileName: '' + } + ], + allSuccessful: true, + failureCount: 0 + }, + shouldSucceed: false + } + ]; + + test.each(multipleDeletionsTests)('$name', ({ payload, shouldSucceed }) => { + const result = validator.validate(payload, schema); + if (shouldSucceed) { + checkValidationSucceeded(result); + } else { + checkValidationFailed(result); + } + }); + }); + + describe('Additional Properties and Edge Cases', () => { + const invalidCases = [ + { + name: 'additional properties in root', + payload: { + deletions: [ + { + success: true, + fileName: 'image.png' + } + ], + allSuccessful: true, + failureCount: 0, + extraField: 'not allowed' + } + }, + { + name: 'additional properties in deletion item', + payload: { + deletions: [ + { + success: true, + fileName: 'image.png', + extraProperty: 'not allowed' + } + ], + allSuccessful: true, + failureCount: 0 + } + }, + { + name: 'null deletions array', + payload: { + deletions: null, + allSuccessful: true, + failureCount: 0 + } + }, + { + name: 'deletions array with null item', + payload: { + deletions: [null], + allSuccessful: false, + failureCount: 1 + } + }, + { + name: 'deletions array with non-object item', + payload: { + deletions: ['invalid'], + allSuccessful: false, + failureCount: 1 + } + }, + { + name: 'non-array deletions', + payload: { + deletions: 'not-an-array', + allSuccessful: true, + failureCount: 0 + } + } + ]; + + test.each(invalidCases)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/multimodal/files-get-response-body.test.ts b/source/infrastructure/test/api/model-schema/multimodal/files-get-response-body.test.ts new file mode 100644 index 00000000..bed1dfc0 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/multimodal/files-get-response-body.test.ts @@ -0,0 +1,192 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { filesGetResponseSchema } from '../../../../lib/api/model-schema/multimodal/files-get-response-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../shared/utils'; +import { Validator } from 'jsonschema'; + +describe('Testing Multimodal Files Get Response API schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = filesGetResponseSchema; + validator = new Validator(); + }); + + describe('Valid Payloads', () => { + const validPayloads = [ + { + name: 'minimal valid get response', + payload: { + downloadUrl: 'https://s3.amazonaws.com/bucket/presigned-url' + } + }, + { + name: 'valid response with complex URL', + payload: { + downloadUrl: 'https://s3.amazonaws.com/bucket/presigned-url?param=value&encoded=%20space' + } + }, + { + name: 'valid response with long URL', + payload: { + downloadUrl: 'https://s3.amazonaws.com/bucket/very/long/path/to/file/presentation.html' + } + } + ]; + + test.each(validPayloads)('$name succeeds', ({ payload }) => { + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Required Fields Validations', () => { + const missingFieldTests = [ + { + name: 'missing downloadUrl', + payload: {} + }, + { + name: 'empty payload', + payload: {} + } + ]; + + test.each(missingFieldTests)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Download URL Validations', () => { + const validDownloadUrls = [ + { name: 'simple S3 URL', url: 'https://s3.amazonaws.com/bucket' }, + { name: 'URL with encoded characters', url: 'https://s3.amazonaws.com/bucket/my%20file.jpg' } + ]; + + test.each(validDownloadUrls)('$name succeeds', ({ url }) => { + const payload = { + downloadUrl: url + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + const invalidDownloadUrls = [ + { name: 'empty URL', url: '' }, + { name: 'null URL', url: null }, + { name: 'non-string URL', url: 123 }, + { name: 'invalid URL format', url: 'not-a-url' } + ]; + + test.each(invalidDownloadUrls)('$name fails', ({ url }) => { + const payload = { + downloadUrl: url + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Download URL Format Validations', () => { + const urlFormatTests = [ + { + name: 'valid S3 URL with query params', + url: 'https://s3.amazonaws.com/bucket/file?param=value', + shouldSucceed: true + }, + { + name: 'valid S3 URL with encoded characters', + url: 'https://s3.amazonaws.com/bucket/my%20file.jpg', + shouldSucceed: true + }, + { name: 'invalid URL format', url: 'not-a-valid-url', shouldSucceed: false }, + { name: 'empty URL', url: '', shouldSucceed: false } + ]; + + test.each(urlFormatTests)('$name', ({ url, shouldSucceed }) => { + const payload = { + downloadUrl: url + }; + const result = validator.validate(payload, schema); + if (shouldSucceed) { + checkValidationSucceeded(result); + } else { + checkValidationFailed(result); + } + }); + }); + + describe('Edge Cases and Combinations', () => { + const edgeCases = [ + { + name: 'minimal valid response', + payload: { + downloadUrl: 'https://s3.amazonaws.com/b/f.png' + }, + shouldSucceed: true + }, + { + name: 'maximum length URL', + payload: { + downloadUrl: 'https://s3.amazonaws.com/bucket/' + 'a'.repeat(1000) + '.jpg' + }, + shouldSucceed: true + }, + { + name: 'special characters in URL', + payload: { + downloadUrl: 'https://s3.amazonaws.com/bucket/my%20file%20(1).jpg?param=value&encoded=%20space' + }, + shouldSucceed: true + } + ]; + + test.each(edgeCases)('$name', ({ payload, shouldSucceed }) => { + const result = validator.validate(payload, schema); + if (shouldSucceed) { + checkValidationSucceeded(result); + } else { + checkValidationFailed(result); + } + }); + }); + + describe('Additional Properties and Type Validations', () => { + const invalidCases = [ + { + name: 'additional properties in root', + payload: { + downloadUrl: 'https://s3.amazonaws.com/bucket/presigned-url', + extraField: 'not allowed' + } + }, + { + name: 'wrong type for downloadUrl', + payload: { + downloadUrl: 123 + } + }, + { + name: 'null downloadUrl', + payload: { + downloadUrl: null + } + }, + { + name: 'array instead of object', + payload: ['https://s3.amazonaws.com/bucket/presigned-url'] + }, + { + name: 'string instead of object', + payload: 'not an object' + }, + { + name: 'number instead of object', + payload: 123 + } + ]; + + test.each(invalidCases)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/multimodal/files-upload-request-body.test.ts b/source/infrastructure/test/api/model-schema/multimodal/files-upload-request-body.test.ts new file mode 100644 index 00000000..169d8b7a --- /dev/null +++ b/source/infrastructure/test/api/model-schema/multimodal/files-upload-request-body.test.ts @@ -0,0 +1,330 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { filesUploadRequestSchema } from '../../../../lib/api/model-schema/multimodal/files-upload-request-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../shared/utils'; +import { MAX_FILE_UPLOADS_PER_BATCH, MULTIMODAL_FILENAME_PATTERN } from '../../../../lib/utils/constants'; +import { Validator } from 'jsonschema'; + +describe('Testing Multimodal Files Upload Request API schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = filesUploadRequestSchema; + validator = new Validator(); + }); + + describe('Valid Payloads', () => { + const validPayloads = [ + { + name: 'minimal valid upload payload with single image file', + payload: { + fileNames: ['image.png'], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: 'valid upload payload with multiple image files', + payload: { + fileNames: ['image1.jpg', 'image2.png', 'image3.gif'], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: 'valid upload payload with document files', + payload: { + fileNames: ['document.pdf', 'spreadsheet.xlsx', 'text.txt'], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: 'valid upload payload with mixed file types', + payload: { + fileNames: ['image.png', 'document.pdf', 'data.csv', 'presentation.html'], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: 'valid upload payload with maximum files', + payload: { + fileNames: Array.from({ length: MAX_FILE_UPLOADS_PER_BATCH }, (_, i) => `file${i}.png`), + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + } + ]; + + test.each(validPayloads)('$name succeeds', ({ payload }) => { + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Required Fields Validations', () => { + const missingFieldTests = [ + { + name: 'missing files array', + payload: { + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: 'empty files array', + payload: { + fileNames: [], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: 'missing conversationId', + payload: { + fileNames: ['image.png'], + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: 'missing messageId', + payload: { + fileNames: ['image.png'], + conversationId: '12345678-1234-1234-1234-123456789012' + } + } + ]; + + test.each(missingFieldTests)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('File Name Validations', () => { + const validFileNames = [ + { name: 'PNG image', fileName: 'image.png' }, + { name: 'JPEG image', fileName: 'photo.jpeg' }, + { name: 'JPG image', fileName: 'picture.jpg' }, + { name: 'GIF image', fileName: 'animation.gif' }, + { name: 'WebP image', fileName: 'modern.webp' }, + { name: 'PDF document', fileName: 'document.pdf' }, + { name: 'CSV file', fileName: 'data.csv' }, + { name: 'Word document', fileName: 'report.doc' }, + { name: 'Word document (new)', fileName: 'report.docx' }, + { name: 'Excel spreadsheet', fileName: 'data.xls' }, + { name: 'Excel spreadsheet (new)', fileName: 'data.xlsx' }, + { name: 'HTML file', fileName: 'page.html' }, + { name: 'Text file', fileName: 'notes.txt' }, + { name: 'Markdown file', fileName: 'readme.md' }, + { name: 'Space (\\x20)', fileName: 'my\x20file.png' }, + { name: 'underscores', fileName: 'my_file.pdf' }, + { name: 'hyphens', fileName: 'my-file.jpg' }, + { name: 'numbers', fileName: 'file123.pdf' }, + { name: 'file at maximum length', fileName: 'a'.repeat(250) + '.png' } + ]; + + test.each(validFileNames)('$name succeeds', ({ fileName }) => { + const payload = { + fileNames: [fileName], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + const invalidFileNames = [ + { name: 'unsupported extension', fileName: 'file.exe' }, + { name: 'no extension', fileName: 'filename' }, + { name: 'empty filename', fileName: '' }, + { name: 'only extension', fileName: '.png' }, + { name: 'extremely long filename', fileName: 'a'.repeat(252) + '.png' }, + { name: 'invalid characters', fileName: 'file<>.png' }, + { name: 'path traversal attempt', fileName: '../file.png' }, + { name: 'null filename', fileName: null }, + { name: 'dots', fileName: 'file.v1.2.png' }, + { name: 'ending with space', fileName: 'file .png' }, + { name: 'parentheses', fileName: 'file(1).png' }, + { name: 'square brackets', fileName: 'file[1].png' }, + { name: 'curly braces', fileName: 'file{1}.png' }, + { name: 'single quotes', fileName: "file'test'.png" }, + { name: 'double quotes', fileName: 'file"test".png' }, + { name: 'backslash', fileName: 'file\\test.png' }, + { name: 'forward slash', fileName: 'file/test.png' }, + { name: 'multiple consecutive spaces', fileName: 'file name.png' }, + { name: 'non-breaking space', fileName: 'file\u00A0name.png' }, + { name: 'tab character', fileName: 'file\tname.png' }, + { name: 'starts with non-alphanumeric character', fileName: '_file.png' }, + { name: 'leading space', fileName: ' file.pdf' }, + { name: 'consecutive spaces', fileName: 'file name.jpg' }, + { name: 'non-breaking space (\\u00A0)', fileName: 'file\u00A0name.pdf' }, + { name: 'zero-width space (\\u200B)', fileName: 'file\u200Bname.png' } + ]; + + test.each(invalidFileNames)('$name fails', ({ fileName }) => { + const payload = { + fileNames: [fileName], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('UUID Validations', () => { + const validUUIDs = [ + '12345678-1234-1234-1234-123456789012', + 'abcdef12-3456-7890-abcd-ef1234567890', + '00000000-0000-0000-0000-000000000000', + 'ffffffff-ffff-ffff-ffff-ffffffffffff' + ]; + + test.each(validUUIDs)('valid UUID %s succeeds', (uuid) => { + const payload = { + fileNames: ['image.png'], + conversationId: uuid, + messageId: uuid + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + const invalidUUIDs = [ + { name: 'too short', uuid: '12345678-1234-1234-1234-12345678901' }, + { name: 'too long', uuid: '12345678-1234-1234-1234-1234567890123' }, + { name: 'missing hyphens', uuid: '12345678123412341234123456789012' }, + { name: 'invalid characters', uuid: '12345678-1234-1234-1234-12345678901g' }, + { name: 'uppercase letters', uuid: '12345678-1234-1234-1234-123456789ABC' }, + { name: 'empty string', uuid: '' }, + { name: 'null value', uuid: null } + ]; + + test.each(invalidUUIDs)('invalid conversationId UUID $name fails', ({ uuid }) => { + const payload = { + fileNames: ['image.png'], + conversationId: uuid, + messageId: '87654321-4321-4321-4321-210987654321' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + test.each(invalidUUIDs)('invalid messageId UUID $name fails', ({ uuid }) => { + const payload = { + fileNames: ['image.png'], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: uuid + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Files Array Validations', () => { + const validFilesArrays = [ + { name: 'single file', fileNames: ['image.png'] }, + { name: 'multiple files', fileNames: ['image1.png', 'image2.jpg', 'document.pdf'] }, + { + name: `maximum files (${MAX_FILE_UPLOADS_PER_BATCH})`, + fileNames: Array.from({ length: MAX_FILE_UPLOADS_PER_BATCH }, (_, i) => `file${i}.png`) + } + ]; + + test.each(validFilesArrays)('$name succeeds', ({ fileNames }) => { + const payload = { + fileNames, + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + const invalidFilesArrays = [ + { + name: `too many files (${MAX_FILE_UPLOADS_PER_BATCH + 1})`, + fileNames: Array.from({ length: MAX_FILE_UPLOADS_PER_BATCH + 1 }, (_, i) => `file${i}.png`) + }, + { name: 'null array', fileNames: null }, + { name: 'non-array value', fileNames: 'not-an-array' } + ]; + + test.each(invalidFilesArrays)('$name fails', ({ fileNames }) => { + const payload = { + fileNames, + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Path Traversal Security Tests', () => { + const pathTraversalAttacks = [ + { name: 'Windows path traversal with ..\\', fileName: '..\\..\\windows\\system32\\config\\sam.txt' }, + { name: 'mixed path separators', fileName: '../..\\config/secrets.pdf' }, + { name: 'absolute Unix path', fileName: '/etc/passwd.txt' }, + { name: 'absolute Windows path', fileName: 'C:\\Windows\\System32\\config\\sam.txt' }, + { name: 'URL encoded path traversal', fileName: '%2e%2e%2f%2e%2e%2fpasswd.txt' }, + { name: 'double encoded path traversal', fileName: '%252e%252e%252fpasswd.txt' }, + { name: 'current directory reference', fileName: './file.pdf' }, + { name: 'multiple path separators', fileName: 'folder//subfolder\\file.docx' }, + { name: 'path with null byte', fileName: 'file.txt\0.png' }, + { name: 'path with unicode separators', fileName: 'file\u2044name.jpg' }, + { name: 'long path traversal chain', fileName: '../../../../../../../../../etc/passwd.txt' } + ]; + + test.each(pathTraversalAttacks)('$name is blocked', ({ fileName }) => { + const payload = { + fileNames: [fileName], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('File Name Pattern Validation', () => { + test('pattern correctly validates the constant is properly formatted', () => { + const pattern = new RegExp(MULTIMODAL_FILENAME_PATTERN); + + // Test that the pattern is a valid regex + expect(pattern).toBeInstanceOf(RegExp); + expect(pattern.test('test.png')).toBe(true); + expect(pattern.test('test.exe')).toBe(false); + expect(pattern.test('../test.pdf')).toBe(false); + }); + }); + + describe('Additional Properties and Edge Cases', () => { + const invalidCases = [ + { + name: 'additional properties in root', + payload: { + fileNames: ['image.png'], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321', + extraField: 'not allowed' + } + }, + + { + name: 'fileNames array with null item', + payload: { + fileNames: [null], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + }, + { + name: 'fileNames array with non-string item', + payload: { + fileNames: [123], + conversationId: '12345678-1234-1234-1234-123456789012', + messageId: '87654321-4321-4321-4321-210987654321' + } + } + ]; + + test.each(invalidCases)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/multimodal/files-upload-response-body.test.ts b/source/infrastructure/test/api/model-schema/multimodal/files-upload-response-body.test.ts new file mode 100644 index 00000000..0a01b29c --- /dev/null +++ b/source/infrastructure/test/api/model-schema/multimodal/files-upload-response-body.test.ts @@ -0,0 +1,698 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { filesUploadResponseSchema } from '../../../../lib/api/model-schema/multimodal/files-upload-response-body'; +import { checkValidationSucceeded, checkValidationFailed } from '../shared/utils'; +import { Validator } from 'jsonschema'; + +describe('Testing Multimodal Files Upload Response API schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + schema = filesUploadResponseSchema; + validator = new Validator(); + }); + + describe('Valid Payloads', () => { + const validPayloads = [ + { + name: 'minimal valid response with single upload', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/presigned-url', + formFields: { + key: 'user-123/conversation-456/message-789/file-abc', + 'x-amz-meta-userid': 'user-123' + }, + fileName: 'image.png', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'valid response with multiple uploads', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/presigned-url-1', + formFields: { + key: 'user-123/conversation-456/message-789/file-abc1', + 'Content-Type': 'image/png' + }, + fileName: 'image1.png', + fileKey: 'user-123/conversation-456/message-789/file-abc1', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + }, + { + uploadUrl: 'https://s3.amazonaws.com/bucket/presigned-url-2', + formFields: { + key: 'user-123/conversation-456/message-789/file-abc2', + 'Content-Type': 'application/pdf' + }, + fileName: 'document.pdf', + fileKey: 'user-123/conversation-456/message-789/file-abc2', + expiresIn: 1800, + createdAt: '2023-12-01T10:05:00.000Z' + }, + { + uploadUrl: 'https://s3.amazonaws.com/bucket/presigned-url-3', + formFields: { + key: 'user-123/conversation-456/message-789/file-abc3', + 'Content-Type': 'text/csv' + }, + fileName: 'data.csv', + fileKey: 'user-123/conversation-456/message-789/file-abc3', + expiresIn: 7200, + createdAt: '2023-12-01T10:10:00.000Z' + } + ] + } + }, + { + name: 'valid response with complex file names and URLs', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.us-west-2.amazonaws.com/my-bucket/path/to/file', + formFields: { + key: 'user-12345678-1234-1234-1234-123456789012/conversation-87654321-4321-4321-4321-210987654321/message-11111111-2222-3333-4444-555555555555/file-aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee', + 'x-amz-meta-userid': 'user-12345678-1234-1234-1234-123456789012', + 'x-amz-meta-filename': 'my complex file name v2.png', + 'Content-Type': 'image/png' + }, + fileName: 'my complex file name v2.png', + fileKey: + 'user-12345678-1234-1234-1234-123456789012/conversation-87654321-4321-4321-4321-210987654321/message-11111111-2222-3333-4444-555555555555/file-aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.123Z', + error: null + } + ] + } + }, + { + name: 'valid response with error field as string', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/presigned-url', + formFields: { + key: 'user-123/conversation-456/message-789/file-abc' + }, + fileName: 'failed-file.png', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z', + error: 'File size exceeds maximum allowed limit' + } + ] + } + }, + { + name: 'valid response with mixed success and error uploads', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/presigned-url-1', + formFields: { + key: 'user-123/conversation-456/message-789/file-abc1' + }, + fileName: 'success-file.png', + fileKey: 'user-123/conversation-456/message-789/file-abc1', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z', + error: null + }, + { + uploadUrl: 'https://s3.amazonaws.com/bucket/presigned-url-2', + formFields: { + key: 'user-123/conversation-456/message-789/file-abc2' + }, + fileName: 'failed-file.png', + fileKey: 'user-123/conversation-456/message-789/file-abc2', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z', + error: 'Unsupported file type' + } + ] + } + } + ]; + + test.each(validPayloads)('$name succeeds', ({ payload }) => { + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Required Fields Validations', () => { + const missingFieldTests = [ + { + name: 'missing uploads array', + payload: {} + }, + { + name: 'missing uploadUrl in upload item', + payload: { + uploads: [ + { + formFields: { key: 'test' }, + fileName: 'image.png', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'missing formFields in upload item', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/presigned-url', + fileName: 'image.png', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'missing fileName in upload item', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/presigned-url', + formFields: { key: 'test' }, + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'missing fileKey in upload item', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/presigned-url', + formFields: { key: 'test' }, + fileName: 'image.png', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'missing expiresIn in upload item', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/presigned-url', + formFields: { key: 'test' }, + fileName: 'image.png', + fileKey: 'user-123/conversation-456/message-789/file-abc', + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'missing createdAt in upload item', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/presigned-url', + formFields: { key: 'test' }, + fileName: 'image.png', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600 + } + ] + } + } + ]; + + test.each(missingFieldTests)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Error Field Validations', () => { + const errorFieldTests = [ + { + name: 'error field as null', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { key: 'test' }, + fileName: 'test.json', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z', + error: null + } + ] + }, + shouldSucceed: true + }, + { + name: 'error field as string', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { key: 'test' }, + fileName: 'test.json', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z', + error: 'File processing failed' + } + ] + }, + shouldSucceed: true + }, + { + name: 'error field omitted (optional)', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { key: 'test' }, + fileName: 'test.json', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + }, + shouldSucceed: true + }, + { + name: 'error field as empty string', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { key: 'test' }, + fileName: 'test.json', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z', + error: '' + } + ] + }, + shouldSucceed: false + }, + { + name: 'error field as number', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { key: 'test' }, + fileName: 'test.json', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z', + error: 123 + } + ] + }, + shouldSucceed: false + }, + { + name: 'error field as boolean', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { key: 'test' }, + fileName: 'test.json', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z', + error: false + } + ] + }, + shouldSucceed: false + } + ]; + + test.each(errorFieldTests)('$name', ({ payload, shouldSucceed }) => { + const result = validator.validate(payload, schema); + if (shouldSucceed) { + checkValidationSucceeded(result); + } else { + checkValidationFailed(result); + } + }); + }); + + describe('Field Type Validations', () => { + const validFieldTests = [ + { + name: 'uploadUrl with valid URI format', + payload: { + uploads: [ + { + uploadUrl: 'https://my-bucket.s3.us-east-1.amazonaws.com/', + formFields: { key: 'test' }, + fileName: 'image.png', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'formFields with multiple string values', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { + key: 'user-123/conversation-456/message-789/file-abc', + 'x-amz-meta-userid': 'user-123', + 'x-amz-meta-filename': 'test.json', + 'Content-Type': 'application/json', + tagging: 'fileType=multimodal&uploadedBy=user123' + }, + fileName: 'test.json', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + + { + name: 'UUID-based file key', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { key: 'test' }, + fileName: 'document.pdf', + fileKey: + 'user-12345678-1234-1234-1234-123456789012/conversation-87654321-4321-4321-4321-210987654321/message-11111111-2222-3333-4444-555555555555/file-aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'expiresIn with positive integer', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { key: 'test' }, + fileName: 'test.json', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 1, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'createdAt with valid ISO 8601 format', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { key: 'test' }, + fileName: 'test.json', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.123Z' + } + ] + } + } + ]; + + test.each(validFieldTests)('$name succeeds', ({ payload }) => { + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + const invalidFieldTests = [ + { + name: 'expiresIn with zero', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { key: 'test' }, + fileName: 'test.json', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 0, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'expiresIn with negative number', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { key: 'test' }, + fileName: 'test.json', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: -1, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'invalid data types', + payload: { + uploads: [ + { + uploadUrl: 123, // Should be string + formFields: 'invalid', // Should be object + fileName: 456, // Should be string + fileKey: 789, // Should be string + expiresIn: 'invalid', // Should be integer + createdAt: 101112 // Should be string + } + ] + } + }, + { + name: 'formFields with non-string values', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { + key: 'test', + invalidField: 123 // Should be string + }, + fileName: 'test.json', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'empty string fields', + payload: { + uploads: [ + { + uploadUrl: '', // Should not be empty + formFields: { key: 'test' }, + fileName: '', // Should not be empty + fileKey: '', // Should not be empty + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'empty formFields object', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: {}, + fileName: 'test.json', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + } + }, + { + name: 'null values', + payload: { + uploads: [ + { + uploadUrl: null, + formFields: null, + fileName: null, + fileKey: null, + expiresIn: null, + createdAt: null + } + ] + } + } + ]; + + test.each(invalidFieldTests)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Multiple Uploads Validations', () => { + const multipleUploadsTests = [ + { + name: 'multiple valid uploads', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key1', + formFields: { key: 'test1' }, + fileName: 'image1.png', + fileKey: 'user-123/conversation-456/message-789/file-abc1', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + }, + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key2', + formFields: { key: 'test2' }, + fileName: 'image2.jpg', + fileKey: 'user-123/conversation-456/message-789/file-abc2', + expiresIn: 1800, + createdAt: '2023-12-01T10:05:00.000Z' + } + ] + }, + shouldSucceed: true + }, + { + name: 'one valid and one invalid upload', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key1', + formFields: { key: 'test1' }, + fileName: 'image1.png', + fileKey: 'user-123/conversation-456/message-789/file-abc1', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + }, + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key2', + formFields: { key: 'test2' }, + fileName: '', // Invalid empty fileName + fileKey: 'user-123/conversation-456/message-789/file-abc2', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ] + }, + shouldSucceed: false + } + ]; + + test.each(multipleUploadsTests)('$name', ({ payload, shouldSucceed }) => { + const result = validator.validate(payload, schema); + if (shouldSucceed) { + checkValidationSucceeded(result); + } else { + checkValidationFailed(result); + } + }); + }); + + describe('Additional Properties and Edge Cases', () => { + const invalidCases = [ + { + name: 'additional properties in root', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { key: 'test' }, + fileName: 'image.png', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z' + } + ], + extraField: 'not allowed' + } + }, + { + name: 'additional properties in upload item', + payload: { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + formFields: { key: 'test' }, + fileName: 'image.png', + fileKey: 'user-123/conversation-456/message-789/file-abc', + expiresIn: 3600, + createdAt: '2023-12-01T10:00:00.000Z', + extraProperty: 'not allowed' + } + ] + } + }, + { + name: 'null uploads array', + payload: { + uploads: null + } + }, + { + name: 'uploads array with null item', + payload: { + uploads: [null] + } + }, + { + name: 'uploads array with non-object item', + payload: { + uploads: ['invalid'] + } + }, + { + name: 'non-array uploads', + payload: { + uploads: 'not-an-array' + } + } + ]; + + test.each(invalidCases)('$name fails', ({ payload }) => { + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); diff --git a/source/infrastructure/test/api/model-schema/shared/auth-params.test.ts b/source/infrastructure/test/api/model-schema/shared/auth-params.test.ts new file mode 100644 index 00000000..6ef75f44 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/shared/auth-params.test.ts @@ -0,0 +1,248 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Validator } from 'jsonschema'; +import { checkValidationSucceeded, checkValidationFailed } from './utils'; +import { USE_CASE_TYPES, AUTHENTICATION_PROVIDERS, BEDROCK_INFERENCE_TYPES, CHAT_PROVIDERS } from '../../../../lib/utils/constants'; +import { deployUseCaseBodySchema } from '../../../../lib/api/model-schema/deployments/deploy-usecase-body'; +import { updateUseCaseBodySchema } from '../../../../lib/api/model-schema/deployments/update-usecase-body'; + +describe('Testing AuthParams schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + validator = new Validator(); + }); + + describe('AuthenticationParams Creation Validation', () => { + + beforeAll(() => { + schema = deployUseCaseBodySchema; + }); + + describe('User Pool Id provided', () => { + it('Valid User Pool Id provided', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + }, + AuthenticationParams: { + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, + CognitoParams: { + ExistingUserPoolId: 'us-east-1_111111111111' + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Valid Pool Client Id provided', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + }, + AuthenticationParams: { + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, + CognitoParams: { + ExistingUserPoolId: 'us-east-1_111111111111', + ExistingUserPoolClientId: '1111111111111111111111111111' + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Invalid Input provided', () => { + it('Empty Authentication Params', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel' + } + }, + AuthenticationParams: {} + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Unsupported Authentication Provider', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel' + } + }, + AuthenticationParams: { + AuthenticationProvider: 'unsupported' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Invalid User Pool Id provided', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel' + } + }, + AuthenticationParams: { + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, + CognitoParams: { + ExistingUserPoolId: 'invalid user pool' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('No CognitoParams provided', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel' + } + }, + AuthenticationParams: { + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('No User Pool provided', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel' + } + }, + AuthenticationParams: { + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, + CognitoParams: {} + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + }); + + describe('AuthenticationParams Update Validation', () => { + + beforeAll(() => { + schema = updateUseCaseBodySchema; + }); + + describe('User Pool Id provided', () => { + it('Valid User Pool Id provided', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + AuthenticationParams: { + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, + CognitoParams: { + ExistingUserPoolId: 'us-east-1_111111111111' + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Valid Pool Client Id provided', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + AuthenticationParams: { + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, + CognitoParams: { + ExistingUserPoolId: 'us-east-1_111111111111', + ExistingUserPoolClientId: '1111111111111111111111111111' + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('Invalid Input provided', () => { + it('Empty Authentication Params', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + AuthenticationParams: {} + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Unsupported Authentication Provider', () => { + const payload = { + AuthenticationParams: { + UseCaseType: USE_CASE_TYPES.TEXT, + AuthenticationProvider: 'unsupported' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Invalid User Pool Id provided', () => { + const payload = { + AuthenticationParams: { + UseCaseType: USE_CASE_TYPES.TEXT, + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, + CognitoParams: { + ExistingUserPoolId: 'invalid user pool' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('No CognitoParams provided', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + AuthenticationParams: { + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('No User Pool provided', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + AuthenticationParams: { + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, + CognitoParams: {} + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + }); +}); \ No newline at end of file diff --git a/source/infrastructure/test/api/model-schema/update-usecase-body.test.ts b/source/infrastructure/test/api/model-schema/shared/bedrock-params.test.ts similarity index 54% rename from source/infrastructure/test/api/model-schema/update-usecase-body.test.ts rename to source/infrastructure/test/api/model-schema/shared/bedrock-params.test.ts index e3312c98..d5adcf16 100644 --- a/source/infrastructure/test/api/model-schema/update-usecase-body.test.ts +++ b/source/infrastructure/test/api/model-schema/shared/bedrock-params.test.ts @@ -1,379 +1,618 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -import { updateUseCaseBodySchema } from '../../../lib/api/model-schema/update-usecase-body'; -import { checkValidationSucceeded, checkValidationFailed } from './utils'; import { Validator } from 'jsonschema'; -import { - AUTHENTICATION_PROVIDERS, +import { checkValidationSucceeded, checkValidationFailed } from './utils'; +import { + USE_CASE_TYPES, + CHAT_PROVIDERS, BEDROCK_INFERENCE_TYPES, - CHAT_PROVIDERS, - CONVERSATION_MEMORY_TYPES, - KNOWLEDGE_BASE_TYPES, + KNOWLEDGE_BASE_TYPES, + DEFAULT_KENDRA_EDITION, MAX_KENDRA_NUMBER_OF_DOCS, MAX_SCORE_THRESHOLD, MIN_KENDRA_NUMBER_OF_DOCS, - MIN_SCORE_THRESHOLD, - USE_CASE_TYPES -} from '../../../lib/utils/constants'; + MIN_SCORE_THRESHOLD +} from '../../../../lib/utils/constants'; +import { deployUseCaseBodySchema } from '../../../../lib/api/model-schema/deployments/deploy-usecase-body'; +import { updateUseCaseBodySchema } from '../../../../lib/api/model-schema/deployments/update-usecase-body'; -describe('Testing API schema validation', () => { +describe('Testing KnowledgeBaseParams schema validation', () => { let schema: any; let validator: Validator; const testKendraIndexId = '11111111-1111-1111-1111-111111111111'; beforeAll(() => { - schema = updateUseCaseBodySchema; validator = new Validator(); }); - describe('LlmParamsValidations', () => { - describe('Bedrock deployments', () => { - it('Test Bedrock update', () => { + describe('KnowledgeBaseParams Create validations', () => { + + beforeAll(() => { + schema = deployUseCaseBodySchema; + }); + + describe('Kendra validations', () => { + it('New Kendra index succeeds', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, + KendraKnowledgeBaseParams: { + KendraIndexName: 'test' } } }; checkValidationSucceeded(validator.validate(payload, schema)); }); - it('Test Bedrock update with arn', () => { + it('New Kendra index fails for no name', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelArn: 'arn:aws:bedrock:us-east-1:111111111111:custom-model/test.1/111111111111', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.PROVISIONED + BedrockLlmParams: { ModelId: 'fakemodel' }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, + KendraKnowledgeBaseParams: { + QueryCapacityUnits: 2, + StorageCapacityUnits: 1, + KendraIndexEdition: DEFAULT_KENDRA_EDITION } } }; - checkValidationSucceeded(validator.validate(payload, schema)); + checkValidationFailed(validator.validate(payload, schema)); }); - it('Test Bedrock update with an InferenceProfileId', () => { + it('New Kendra index succeeds with additional params', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - InferenceProfileId: 'fakeprofile', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILE + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, + KendraKnowledgeBaseParams: { + KendraIndexName: 'test', + QueryCapacityUnits: 2, + StorageCapacityUnits: 1, + KendraIndexEdition: DEFAULT_KENDRA_EDITION } } }; checkValidationSucceeded(validator.validate(payload, schema)); }); - it('Test Bedrock update with a guardrail', () => { + it('Existing Kendra index succeeds', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { + BedrockLlmParams: { ModelId: 'fakemodel', - GuardrailIdentifier: 'fakeid', - GuardrailVersion: 'DRAFT', BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, + KendraKnowledgeBaseParams: { + ExistingKendraIndexId: testKendraIndexId } } }; checkValidationSucceeded(validator.validate(payload, schema)); }); - it('Test Bedrock update to no guardrail id', () => { + it('Existing Kendra index fails when providing extra params', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - GuardrailIdentifier: null, - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + BedrockLlmParams: { ModelId: 'fakemodel' }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, + KendraKnowledgeBaseParams: { + ExistingKendraIndexId: testKendraIndexId, + StorageCapacityUnits: 1 } } }; - checkValidationSucceeded(validator.validate(payload, schema)); + checkValidationFailed(validator.validate(payload, schema)); }); - it('Test Bedrock update to no guardrail', () => { + it('Kendra index fails when providing bad index id', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - GuardrailIdentifier: null, - GuardrailVersion: null, - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + BedrockLlmParams: { ModelId: 'fakemodel' }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, + KendraKnowledgeBaseParams: { + ExistingKendraIndexId: 'garbage' } } }; - checkValidationSucceeded(validator.validate(payload, schema)); + checkValidationFailed(validator.validate(payload, schema)); }); - it('Test Bedrock update to no guardrail version', () => { + it('Kendra index fails when providing both new and existing params', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - GuardrailVersion: null, - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + BedrockLlmParams: { ModelId: 'fakemodel' }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, + KendraKnowledgeBaseParams: { + KendraIndexName: 'test', + ExistingKendraIndexId: testKendraIndexId } } }; - checkValidationSucceeded(validator.validate(payload, schema)); + checkValidationFailed(validator.validate(payload, schema)); }); - it('Test Bedrock update failed with bad guardrail', () => { + it('Kendra index succeeds when RBAC enabled flag is provided', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - GuardrailIdentifier: 1, - GuardrailVersion: 'DRAFT' + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, + KendraKnowledgeBaseParams: { + KendraIndexName: 'test', + RoleBasedAccessControlEnabled: true } } }; - checkValidationFailed(validator.validate(payload, schema)); + checkValidationSucceeded(validator.validate(payload, schema)); }); - it('Test Bedrock update failed, bad params', () => { + it('fails when NoDocsFoundResponse is empty', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, - OtherLlmParams: { - ModelId: 'fakemodel', - ApiKey: 'fakekey' + BedrockLlmParams: { ModelId: 'fakemodel' }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, + NoDocsFoundResponse: '', + KendraKnowledgeBaseParams: { + KendraIndexName: 'test' } } }; checkValidationFailed(validator.validate(payload, schema)); }); - it('Test Bedrock update failed, bad arn', () => { + it('passes when NoDocsFoundResponse has valid string', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, UseCaseName: 'test', - BedrockLlmParams: { + LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, - ModelArn: 'garbage' + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, + KendraKnowledgeBaseParams: { + KendraIndexName: 'test' + }, + NoDocsFoundResponse: 'test message' } }; - checkValidationFailed(validator.validate(payload, schema)); + checkValidationSucceeded(validator.validate(payload, schema)); }); + }); - it('Test Bedrock update failed, bad InferenceProfileId', () => { + describe('Bedrock knowledge base validations', () => { + it('Bedrock succeeds', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, UseCaseName: 'test', - BedrockLlmParams: { + LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, - InferenceProfileId: '_garbage' + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, + BedrockKnowledgeBaseParams: { + BedrockKnowledgeBaseId: 'testid' + } } }; - checkValidationFailed(validator.validate(payload, schema)); + checkValidationSucceeded(validator.validate(payload, schema)); }); - it('Test Bedrock update failed, provide both a ModelId and InferenceProfileId', () => { + it('Bedrock with optional params', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, UseCaseName: 'test', - BedrockLlmParams: { + LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, - ModelId: 'fakemodel', - InferenceProfileId: 'fakeprofile' + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, + BedrockKnowledgeBaseParams: { + BedrockKnowledgeBaseId: 'testid', + RetrievalFilter: {}, + OverrideSearchType: 'SEMANTIC' + } } }; - checkValidationFailed(validator.validate(payload, schema)); + checkValidationSucceeded(validator.validate(payload, schema)); }); - it('Test Bedrock update failed, bad guardrail id', () => { + it('Bedrock fails for missing id', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - GuardrailIdentifier: '_garbage', - GuardrailVersion: 'DRAFT' - } + BedrockLlmParams: { ModelId: 'fakemodel' }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, + BedrockKnowledgeBaseParams: {} } }; checkValidationFailed(validator.validate(payload, schema)); }); - it('Test Bedrock deployment, FeedbackEnabled passes', () => { + it('Bedrock fails for bad id', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, - FeedbackParams: { - FeedbackEnabled: true - }, + UseCaseName: 'test', LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - GuardrailIdentifier: null, - GuardrailVersion: null, - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + BedrockLlmParams: { ModelId: 'fakemodel' }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, + BedrockKnowledgeBaseParams: { + BedrockKnowledgeBaseId: '?!' } } }; - checkValidationSucceeded(validator.validate(payload, schema)); + checkValidationFailed(validator.validate(payload, schema)); }); - it('Test Bedrock deployment, FeedbackParams additional fields fail', () => { + it('Bedrock fails for a bad retrieval filter type', () => { const payload = { - UseCaseType: USE_CASE_TYPES.AGENT, - FeedbackParams: { - FeedbackEnabled: true, - FeedbackParameters: { 'key': 'value' } + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' }, + RAGEnabled: true }, - AgentParams: { - BedrockAgentParams: { - AgentId: 'agent123', - AgentAliasId: 'alias456' + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, + BedrockKnowledgeBaseParams: { + BedrockKnowledgeBaseId: 'testid', + RetrievalFilter: 'garbage' } } }; checkValidationFailed(validator.validate(payload, schema)); }); - }); - describe('SageMaker deployments', () => { - it('Test SageMaker update', () => { + it('Bedrock fails for a bad OverrideSearchType', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - SageMakerLlmParams: { - EndpointName: 'fake-endpoint', - ModelInputPayloadSchema: {}, - ModelOutputJSONPath: '$[0].generated_text' + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, + BedrockKnowledgeBaseParams: { + BedrockKnowledgeBaseId: 'testid', + OverrideSearchType: 'garbage' } } }; - checkValidationSucceeded(validator.validate(payload, schema)); + checkValidationFailed(validator.validate(payload, schema)); }); - it('Test SageMaker update only one item', () => { + it('fails when NoDocsFoundResponse is empty', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { - SageMakerLlmParams: { - ModelInputPayloadSchema: {} + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, + NoDocsFoundResponse: '', + BedrockKnowledgeBaseParams: { + BedrockKnowledgeBaseId: 'testid' } } }; - checkValidationSucceeded(validator.validate(payload, schema)); + checkValidationFailed(validator.validate(payload, schema)); }); - it('Test SageMaker update failed, invalid InferenceEndpoint', () => { + it('passes when NoDocsFoundResponse has valid string', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - SageMakerLlmParams: { - EndpointName: '$%', - ModelInputPayloadSchema: {}, - ModelOutputJSONPath: '$[0].generated_text' - } + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, + BedrockKnowledgeBaseParams: { + BedrockKnowledgeBaseId: 'testid' + }, + NoDocsFoundResponse: 'test message' } }; - checkValidationFailed(validator.validate(payload, schema)); + checkValidationSucceeded(validator.validate(payload, schema)); }); + }); - it('Test SageMaker update failed, invalid ModelInputPayloadSchema', () => { + describe('General knowledge base validations', () => { + it('setting misc parameters succeeds', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - SageMakerLlmParams: { - EndpointName: 'fake-endpoint', - ModelInputPayloadSchema: 'garbage', - ModelOutputJSONPath: '$[0].generated_text' - } + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, + BedrockKnowledgeBaseParams: { + BedrockKnowledgeBaseId: 'testid' + }, + NumberOfDocs: 3, + ScoreThreshold: 0.5, + ReturnSourceDocs: true } }; - checkValidationFailed(validator.validate(payload, schema)); + checkValidationSucceeded(validator.validate(payload, schema)); }); - it('Test SageMaker update failed, invalid ModelOutputJSONPath', () => { + it('setting NumberOfDocs below range fails', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - SageMakerLlmParams: { - EndpointName: 'fake-endpoint', - ModelInputPayloadSchema: 'garbage', - ModelOutputJSONPath: '{}' - } + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, + BedrockKnowledgeBaseParams: { + BedrockKnowledgeBaseId: 'testid' + }, + NumberOfDocs: MIN_KENDRA_NUMBER_OF_DOCS - 1 } }; checkValidationFailed(validator.validate(payload, schema)); }); - }); - describe('Advanced model params', () => { - it('Succeeds with advanced model params of all compatible types', () => { + it('setting NumberOfDocs above range fails', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { - ModelParams: { - Param1: { Value: 'hello', Type: 'string' }, - Param2: { Value: '1', Type: 'integer' }, - Param3: { Value: '1.0', Type: 'float' }, - Param4: { Value: 'true', Type: 'boolean' }, - Param5: { Value: JSON.stringify(['hello', 'world']), Type: 'list' }, - Param6: { Value: JSON.stringify({ 'hello': 'world' }), Type: 'dictionary' } - } + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, + BedrockKnowledgeBaseParams: { + BedrockKnowledgeBaseId: 'testid' + }, + NumberOfDocs: MAX_KENDRA_NUMBER_OF_DOCS + 1 } }; - checkValidationSucceeded(validator.validate(payload, schema)); + checkValidationFailed(validator.validate(payload, schema)); }); - it('Fails with advanced model params of incompatible types', () => { + it('setting ScoreThreshold below range fails', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, BedrockLlmParams: { ModelId: 'fakemodel' }, - ModelParams: { - Param1: { Value: 'hello', Type: 'othertype' } - } + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, + BedrockKnowledgeBaseParams: { + BedrockKnowledgeBaseId: 'testid' + }, + ScoreThreshold: MIN_SCORE_THRESHOLD - 1 } }; checkValidationFailed(validator.validate(payload, schema)); }); - it('Fails with advanced model params with non-string value', () => { + it('setting ScoreThreshold above range fails', () => { const payload = { UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', LlmParams: { ModelProvider: CHAT_PROVIDERS.BEDROCK, BedrockLlmParams: { ModelId: 'fakemodel' }, - ModelParams: { - Param1: { Value: 1.0, Type: 'float' } - } + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, + BedrockKnowledgeBaseParams: { + BedrockKnowledgeBaseId: 'testid' + }, + ScoreThreshold: MAX_SCORE_THRESHOLD + 1 + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + it('Can not provide KnowledgeBaseParams if not using RAG', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' }, + RAGEnabled: false + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, + KendraKnowledgeBaseParams: { + ExistingKendraIndexId: testKendraIndexId + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Can not provide BedrockKnowledgeBaseParams if not using Kendra', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.KENDRA, + KendraKnowledgeBaseParams: { + ExistingKendraIndexId: testKendraIndexId + }, + BedrockKnowledgeBaseParams: { + BedrockKnowledgeBaseId: 'testid' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Can not provide KendraKnowledgeBaseParams if not using Bedrock', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' }, + RAGEnabled: true + }, + KnowledgeBaseParams: { + KnowledgeBaseType: KNOWLEDGE_BASE_TYPES.BEDROCK, + KendraKnowledgeBaseParams: { + ExistingKendraIndexId: testKendraIndexId + }, + BedrockKnowledgeBaseParams: { + BedrockKnowledgeBaseId: 'testid' } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Can not validate a bad KnowledgeBaseType', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + RAGEnabled: false + }, + KnowledgeBaseParams: { + KnowledgeBaseType: 'garbage' + } + }; + checkValidationFailed(validator.validate(payload, schema)); }); }); - describe('KnowledgeBaseParams validations', () => { + + describe('KnowledgeBaseParams Update validations', () => { + + beforeAll(() => { + schema = updateUseCaseBodySchema; + }); + describe('Kendra validations', () => { it('Updating Kendra index ID succeeds', () => { const payload = { @@ -707,421 +946,4 @@ describe('Testing API schema validation', () => { checkValidationFailed(validator.validate(payload, schema)); }); }); - - describe('VpcParams validations', () => { - const testVpcId = 'vpc-11111111'; - const testSubnetId = 'subnet-11111111'; - const testSgId = 'sg-11111111'; - - it('Updating subnets succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - VpcParams: { - ExistingPrivateSubnetIds: [testSubnetId] - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Updating security groups succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - VpcParams: { - ExistingSecurityGroupIds: [testSgId] - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Attempting to pass a VPC ID fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - VpcParams: { - ExistingVpcId: testVpcId - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - - describe('Email Validations', () => { - it('Email is valid succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - } - }, - DefaultUserEmail: 'testuser@example.com' - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Email is invalid fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' } - }, - DefaultUserEmail: 'garbage' - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - - describe('ConversationMemoryParams Validation', () => { - it('ConversationMemoryParams is valid succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - } - }, - ConversationMemoryParams: { - ConversationMemoryType: CONVERSATION_MEMORY_TYPES.DYNAMODB, - HumanPrefix: 'human', - AiPrefix: 'ai', - ChatHistoryLength: 5 - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('ConversationMemoryParams is invalid fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' } - }, - ConversationMemoryParams: { - ConversationMemoryType: 'garbage' - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('ConversationMemoryParams bad param fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelId: 'fakemodel' } - }, - ConversationMemoryParams: { - ConversationMemoryType: CONVERSATION_MEMORY_TYPES.DYNAMODB, - HumanPrefix: 'human', - AiPrefix: 'ai', - ChatHistoryLength: -1 - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - - describe('Multiple Settings Validations', () => { - it('Multiple Settings are valid succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - BedrockLlmParams: { - ModelId: 'fakemodel', - BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START - } - }, - ConversationMemoryParams: { - ConversationMemoryType: CONVERSATION_MEMORY_TYPES.DYNAMODB - }, - VpcParams: { - ExistingPrivateSubnetIds: ['subnet-11111111'] - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Multiple Settings are valid succeeds, no LLM params', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - ConversationMemoryParams: { - ConversationMemoryType: CONVERSATION_MEMORY_TYPES.DYNAMODB - }, - VpcParams: { - ExistingPrivateSubnetIds: ['subnet-11111111'] - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Multiple Settings where 1 is invalid fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK - }, - ConversationMemoryParams: { - ConversationMemoryType: 'garbage' - }, - VpcParams: { - ExistingVpcId: 'garbage' - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - - describe('Agent use case update validations', () => { - it('Valid AgentParams succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - BedrockAgentParams: { - AgentId: 'agent123', - AgentAliasId: 'alias456', - EnableTrace: true - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('AgentParams with missing optional field succeeds', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - BedrockAgentParams: { - AgentId: 'agent123', - AgentAliasId: 'alias456' - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('AgentParams with missing required field fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - BedrockAgentParams: { - AgentId: 'agent123' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('AgentId exceeding maxLength fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - BedrockAgentParams: { - AgentId: 'agent1234567890', // 11 characters, exceeds maxLength of 10 - AgentAliasId: 'alias456' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('AgentAliasId with invalid characters fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - BedrockAgentParams: { - AgentId: 'agent123', - AgentAliasId: 'alias_456' // Contains underscore, which is not allowed - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('EnableTrace with non-boolean value fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - BedrockAgentParams: { - AgentId: 'agent123', - AgentAliasId: 'alias456', - EnableTrace: 'true' // Should be a boolean, not a string - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Additional properties in BedrockAgentParams fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - BedrockAgentParams: { - AgentId: 'agent123', - AgentAliasId: 'alias456', - ExtraField: 'should not be here' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Additional properties in AgentParams fails', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - BedrockAgentParams: { - AgentId: 'agent123', - AgentAliasId: 'alias456' - }, - ExtraField: 'should not be here' - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Invalid agent type leads to failure', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.AGENT, - AgentParams: { - AgentType: 'invalid' - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Missing UseCaseType fails', () => { - const payload = { - AgentParams: { - BedrockAgentParams: { - AgentId: 'agent123', - AgentAliasId: 'alias456' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - - it('Test Agent deployment, FeedbackEnabled passes', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.AGENT, - FeedbackParams: { - FeedbackEnabled: true - }, - AgentParams: { - BedrockAgentParams: { - AgentId: 'agent123', - AgentAliasId: 'alias456' - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Test Agent deployment, FeedbackParams additional fields fail', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.AGENT, - FeedbackParams: { - FeedbackEnabled: true, - FeedbackParameters: { 'key': 'value' } - }, - AgentParams: { - BedrockAgentParams: { - AgentId: 'agent123', - AgentAliasId: 'alias456' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - describe('AuthenticationParams Validation', () => { - describe('User Pool Id provided', () => { - it('Valid User Pool Id provided', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - AuthenticationParams: { - AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, - CognitoParams: { - ExistingUserPoolId: 'us-east-1_111111111111' - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - - it('Valid Pool Client Id provided', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - AuthenticationParams: { - AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, - CognitoParams: { - ExistingUserPoolId: 'us-east-1_111111111111', - ExistingUserPoolClientId: '1111111111111111111111111111' - } - } - }; - checkValidationSucceeded(validator.validate(payload, schema)); - }); - }); - - describe('Invalid Input provided', () => { - it('Empty Authentication Params', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - AuthenticationParams: {} - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Unsupported Authentication Provider', () => { - const payload = { - AuthenticationParams: { - UseCaseType: USE_CASE_TYPES.TEXT, - AuthenticationProvider: 'unsupported' - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('Invalid User Pool Id provided', () => { - const payload = { - AuthenticationParams: { - UseCaseType: USE_CASE_TYPES.TEXT, - AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, - CognitoParams: { - ExistingUserPoolId: 'invalid user pool' - } - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('No CognitoParams provided', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - AuthenticationParams: { - AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - - it('No User Pool provided', () => { - const payload = { - UseCaseType: USE_CASE_TYPES.TEXT, - AuthenticationParams: { - AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, - CognitoParams: {} - } - }; - checkValidationFailed(validator.validate(payload, schema)); - }); - }); - }); -}); +}); \ No newline at end of file diff --git a/source/infrastructure/test/api/model-schema/shared/llm-params.test.ts b/source/infrastructure/test/api/model-schema/shared/llm-params.test.ts new file mode 100644 index 00000000..28762dd0 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/shared/llm-params.test.ts @@ -0,0 +1,948 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Validator } from 'jsonschema'; +import { checkValidationSucceeded, checkValidationFailed } from './utils'; +import { USE_CASE_TYPES, CHAT_PROVIDERS, BEDROCK_INFERENCE_TYPES } from '../../../../lib/utils/constants'; +import { deployUseCaseBodySchema } from '../../../../lib/api/model-schema/deployments/deploy-usecase-body'; +import { updateUseCaseBodySchema } from '../../../../lib/api/model-schema/deployments/update-usecase-body'; + +describe('Testing LlmParams schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + validator = new Validator(); + }); + + describe('LlmCreateParamsValidations', () => { + + beforeAll(() => { + schema = deployUseCaseBodySchema; + }); + + describe('Bedrock deployments', () => { + it('Test Bedrock deployment', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment with a provisioned model', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelArn: 'arn:aws:bedrock:us-east-1:111111111111:custom-model/test.1/111111111111', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.PROVISIONED + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment with a guardrail', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + GuardrailIdentifier: 'fakeid', + GuardrailVersion: 'DRAFT', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment with an InferenceProfileId', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + InferenceProfileId: 'fakeprofile', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILE + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment failed, missing ModelId for QUICK_START', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment failed, missing ModelId for OTHER_FOUNDATION', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.OTHER_FOUNDATION + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment failed, missing InferenceProfileId for INFERENCE_PROFILE', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILE + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment failed, missing ModelArn for PROVISIONED', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.PROVISIONED + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment failed, missing BedrockInferenceType', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment failed, missing params', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment failed, bad arn', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + ModelArn: 'garbage' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment failed, no guardrail version', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + GuardrailIdentifier: 'fakeid' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment failed, no guardrail id', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + GuardrailVersion: 'DRAFT' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment failed, provided ModelId and InferenceProfileId', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + InferenceProfileId: 'fakeprofile' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment failed, bad InferenceProfileId', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + InferenceProfileId: '_garbage' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment failed, bad guardrail version', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + GuardrailIdentifier: 'fakeid', + GuardrailVersion: 'garbage' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment failed, bad guardrail id', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + GuardrailIdentifier: '_garbage', + GuardrailVersion: 'DRAFT' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment, FeedbackEnabled passes', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + FeedbackParams: { + FeedbackEnabled: true + }, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment, FeedbackParams additional fields fail', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + FeedbackParams: { + FeedbackEnabled: true, + FeedbackParameters: { 'key': 'value' } + }, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment, MultimodalParams enabled passes', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + MultimodalParams: { + MultimodalEnabled: true + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment, MultimodalParams disabled passes', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + MultimodalParams: { + MultimodalEnabled: false + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment, MultimodalParams additional fields fail', () => { + const payload = { + UseCaseName: 'test', + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + MultimodalParams: { + MultimodalEnabled: true, + SupportedFileTypes: ['image/jpeg'] + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment, RestApi Id resources pass', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + ExistingRestApiId: 'test-id', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + }); + + describe('SageMaker deployments', () => { + it('Test SageMaker deployment', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.SAGEMAKER, + SageMakerLlmParams: { + EndpointName: 'fake-endpoint', + ModelInputPayloadSchema: {}, + ModelOutputJSONPath: '$[0].generated_text' + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test SageMaker deployment failed, missing EndpointName', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.SAGEMAKER, + SageMakerLlmParams: { + ModelInputPayloadSchema: {}, + ModelOutputJSONPath: '$[0].generated_text' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test SageMaker deployment failed, invalid EndpointName', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.SAGEMAKER, + SageMakerLlmParams: { + EndpointName: '$%', + ModelInputPayloadSchema: {}, + ModelOutputJSONPath: '$[0].generated_text' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test SageMaker deployment failed, missing ModelInputPayloadSchema', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.SAGEMAKER, + SageMakerLlmParams: { + EndpointName: 'fake-endpoint', + ModelOutputJSONPath: '$[0].generated_text' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test SageMaker deployment failed, invalid ModelInputPayloadSchema', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.SAGEMAKER, + SageMakerLlmParams: { + EndpointName: 'fake-endpoint', + ModelInputPayloadSchema: 'garbage', + ModelOutputJSONPath: '$[0].generated_text' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test SageMaker deployment failed, invalid ModelOutputJSONPath', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.SAGEMAKER, + SageMakerLlmParams: { + EndpointName: 'fake-endpoint', + ModelInputPayloadSchema: 'garbage', + ModelOutputJSONPath: '{}' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test SageMaker deployment failed, missing ModelOutputJSONPath', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.SAGEMAKER, + SageMakerLlmParams: { + EndpointName: 'fake-endpoint', + ModelInputPayloadSchema: {} + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Advanced model params', () => { + it('Succeeds with advanced model params of all compatible types', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + ModelParams: { + Param1: { Value: 'hello', Type: 'string' }, + Param2: { Value: '1', Type: 'integer' }, + Param3: { Value: '1.0', Type: 'float' }, + Param4: { Value: 'true', Type: 'boolean' }, + Param5: { Value: JSON.stringify(['hello', 'world']), Type: 'list' }, + Param6: { Value: JSON.stringify({ 'hello': 'world' }), Type: 'dictionary' } + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Fails with advanced model params of incompatible types', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' }, + ModelParams: { + Param1: { Value: 'hello', Type: 'othertype' } + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Fails with advanced model params with non-string value', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' }, + ModelParams: { + Param1: { Value: 1.0, Type: 'float' } + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + }); + + describe('LlmUpdateParamsValidations', () => { + + beforeAll(() => { + schema = updateUseCaseBodySchema; + }); + + describe('Bedrock deployments', () => { + it('Test Bedrock update', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock update with arn', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelArn: 'arn:aws:bedrock:us-east-1:111111111111:custom-model/test.1/111111111111', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.PROVISIONED + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock update with an InferenceProfileId', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + InferenceProfileId: 'fakeprofile', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILE + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock update with a guardrail', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + GuardrailIdentifier: 'fakeid', + GuardrailVersion: 'DRAFT', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock update to no guardrail id', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + GuardrailIdentifier: null, + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock update to no guardrail', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + GuardrailIdentifier: null, + GuardrailVersion: null, + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock update to no guardrail version', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + GuardrailVersion: null, + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock update failed with bad guardrail', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + GuardrailIdentifier: 1, + GuardrailVersion: 'DRAFT' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock update failed, bad params', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + OtherLlmParams: { + ModelId: 'fakemodel', + ApiKey: 'fakekey' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock update failed, bad arn', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + BedrockLlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + ModelArn: 'garbage' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock update failed, bad InferenceProfileId', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + BedrockLlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + InferenceProfileId: '_garbage' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock update failed, provide both a ModelId and InferenceProfileId', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + BedrockLlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + ModelId: 'fakemodel', + InferenceProfileId: 'fakeprofile' + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock update failed, bad guardrail id', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + GuardrailIdentifier: '_garbage', + GuardrailVersion: 'DRAFT' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment, FeedbackEnabled passes', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + FeedbackParams: { + FeedbackEnabled: true + }, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + GuardrailIdentifier: null, + GuardrailVersion: null, + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock deployment, FeedbackParams additional fields fail', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.AGENT, + FeedbackParams: { + FeedbackEnabled: true, + FeedbackParameters: { 'key': 'value' } + }, + AgentParams: { + BedrockAgentParams: { + AgentId: 'agent123', + AgentAliasId: 'alias456' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test Bedrock update, MultimodalParams enabled passes', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + MultimodalParams: { + MultimodalEnabled: true + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock update, MultimodalParams disabled passes', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + MultimodalParams: { + MultimodalEnabled: false + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test Bedrock update, MultimodalParams additional fields fail', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + MultimodalParams: { + MultimodalEnabled: true, + MaxFileSize: 5242880 + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('SageMaker deployments', () => { + it('Test SageMaker update', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.SAGEMAKER, + SageMakerLlmParams: { + EndpointName: 'fake-endpoint', + ModelInputPayloadSchema: {}, + ModelOutputJSONPath: '$[0].generated_text' + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test SageMaker update only one item', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + SageMakerLlmParams: { + ModelInputPayloadSchema: {} + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Test SageMaker update failed, invalid InferenceEndpoint', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.SAGEMAKER, + SageMakerLlmParams: { + EndpointName: '$%', + ModelInputPayloadSchema: {}, + ModelOutputJSONPath: '$[0].generated_text' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test SageMaker update failed, invalid ModelInputPayloadSchema', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.SAGEMAKER, + SageMakerLlmParams: { + EndpointName: 'fake-endpoint', + ModelInputPayloadSchema: 'garbage', + ModelOutputJSONPath: '$[0].generated_text' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Test SageMaker update failed, invalid ModelOutputJSONPath', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.SAGEMAKER, + SageMakerLlmParams: { + EndpointName: 'fake-endpoint', + ModelInputPayloadSchema: 'garbage', + ModelOutputJSONPath: '{}' + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + describe('Advanced model params', () => { + it('Succeeds with advanced model params of all compatible types', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelParams: { + Param1: { Value: 'hello', Type: 'string' }, + Param2: { Value: '1', Type: 'integer' }, + Param3: { Value: '1.0', Type: 'float' }, + Param4: { Value: 'true', Type: 'boolean' }, + Param5: { Value: JSON.stringify(['hello', 'world']), Type: 'list' }, + Param6: { Value: JSON.stringify({ 'hello': 'world' }), Type: 'dictionary' } + } + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Fails with advanced model params of incompatible types', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' }, + ModelParams: { + Param1: { Value: 'hello', Type: 'othertype' } + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Fails with advanced model params with non-string value', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' }, + ModelParams: { + Param1: { Value: 1.0, Type: 'float' } + } + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + }); +}); \ No newline at end of file diff --git a/source/infrastructure/test/api/model-schema/utils.ts b/source/infrastructure/test/api/model-schema/shared/utils.ts similarity index 99% rename from source/infrastructure/test/api/model-schema/utils.ts rename to source/infrastructure/test/api/model-schema/shared/utils.ts index b161b93e..a6f62215 100644 --- a/source/infrastructure/test/api/model-schema/utils.ts +++ b/source/infrastructure/test/api/model-schema/shared/utils.ts @@ -20,4 +20,4 @@ export function checkValidationSucceeded(result: ValidatorResult) { export function checkValidationFailed(result: ValidatorResult) { expect(result.valid).toBeFalsy(); -} +} \ No newline at end of file diff --git a/source/infrastructure/test/api/model-schema/shared/vpc-params.test.ts b/source/infrastructure/test/api/model-schema/shared/vpc-params.test.ts new file mode 100644 index 00000000..69cbf380 --- /dev/null +++ b/source/infrastructure/test/api/model-schema/shared/vpc-params.test.ts @@ -0,0 +1,278 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Validator } from 'jsonschema'; +import { checkValidationSucceeded, checkValidationFailed } from './utils'; +import { BEDROCK_INFERENCE_TYPES, CHAT_PROVIDERS, USE_CASE_TYPES } from '../../../../lib/utils/constants'; +import { deployUseCaseBodySchema } from '../../../../lib/api/model-schema/deployments/deploy-usecase-body'; +import { updateUseCaseBodySchema } from '../../../../lib/api/model-schema/deployments/update-usecase-body'; +describe('Testing Vpc schema validation', () => { + let schema: any; + let validator: Validator; + + beforeAll(() => { + validator = new Validator(); + }); + + describe('VpcParams Creation validations', () => { + + beforeAll(() => { + schema = deployUseCaseBodySchema; + }); + + const testVpcId = 'vpc-11111111'; + const testSubnetId = 'subnet-11111111'; + const testSgId = 'sg-11111111'; + + it('No VPC succeeds', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + }, + VpcParams: { + VpcEnabled: false + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('No VPC fails due to a mismatch of params', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' } + }, + VpcParams: { + VpcEnabled: false, + CreateNewVpc: true + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Create a VPC succeeds', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + }, + VpcParams: { + VpcEnabled: true, + CreateNewVpc: true + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Create a VPC fails due to extra params', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + }, + VpcParams: { + VpcEnabled: true, + CreateNewVpc: true, + ExistingVpcId: testVpcId + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Using an existing VPC succeeds', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + } + }, + VpcParams: { + VpcEnabled: true, + CreateNewVpc: false, + ExistingVpcId: testVpcId, + ExistingPrivateSubnetIds: [testSubnetId], + ExistingSecurityGroupIds: [testSgId] + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Using an existing VPC fails due to missing VPC ID', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' } + }, + VpcParams: { + VpcEnabled: true, + CreateNewVpc: false, + ExistingPrivateSubnetIds: [testSubnetId], + ExistingSecurityGroupIds: [testSgId] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Using an existing VPC fails due to bad VPC ID', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' } + }, + VpcParams: { + VpcEnabled: true, + CreateNewVpc: false, + ExistingVpcId: 'garbage', + ExistingPrivateSubnetIds: [testSubnetId], + ExistingSecurityGroupIds: [testSgId] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Using an existing VPC fails due to missing subnet IDs', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' } + }, + VpcParams: { + VpcEnabled: true, + CreateNewVpc: false, + ExistingVpcId: testVpcId, + ExistingSecurityGroupIds: [testSgId] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Using an existing VPC fails due to bad subnet IDs', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' } + }, + VpcParams: { + VpcEnabled: true, + CreateNewVpc: false, + ExistingVpcId: testVpcId, + ExistingPrivateSubnetIds: ['garbage'], + ExistingSecurityGroupIds: [testSgId] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Using an existing VPC fails due to missing security group IDs', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fakemodel', + BedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START + }, + }, + VpcParams: { + VpcEnabled: true, + CreateNewVpc: false, + ExistingVpcId: testVpcId, + ExistingPrivateSubnetIds: [testSubnetId] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + + it('Using an existing VPC fails due to bad security group IDs', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + UseCaseName: 'test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { ModelId: 'fakemodel' } + }, + VpcParams: { + VpcEnabled: true, + CreateNewVpc: false, + ExistingVpcId: testVpcId, + ExistingPrivateSubnetIds: [testSubnetId], + ExistingSecurityGroupIds: ['garbage'] + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); + + + describe('VpcParams validations', () => { + + beforeAll(() => { + schema = updateUseCaseBodySchema; + }); + + const testVpcId = 'vpc-11111111'; + const testSubnetId = 'subnet-11111111'; + const testSgId = 'sg-11111111'; + + it('Updating subnets succeeds', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + VpcParams: { + ExistingPrivateSubnetIds: [testSubnetId] + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Updating security groups succeeds', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + VpcParams: { + ExistingSecurityGroupIds: [testSgId] + } + }; + checkValidationSucceeded(validator.validate(payload, schema)); + }); + + it('Attempting to pass a VPC ID fails', () => { + const payload = { + UseCaseType: USE_CASE_TYPES.TEXT, + VpcParams: { + ExistingVpcId: testVpcId + } + }; + checkValidationFailed(validator.validate(payload, schema)); + }); + }); +}); \ No newline at end of file diff --git a/source/infrastructure/test/api/rest-request-processor.test.ts b/source/infrastructure/test/api/rest-request-processor.test.ts index eca0191d..58c9bf11 100644 --- a/source/infrastructure/test/api/rest-request-processor.test.ts +++ b/source/infrastructure/test/api/rest-request-processor.test.ts @@ -28,8 +28,8 @@ describe('When deploying', () => { }); }); - it('Should have lambdas for custom resource, management APIs, and Authorization', () => { - template.resourceCountIs('AWS::Lambda::Function', 4); + it('Should have lambdas for custom resource, management APIs, AgentCore auth, and Authorization', () => { + template.resourceCountIs('AWS::Lambda::Function', 7); template.hasResourceProperties('AWS::Lambda::Function', { 'Role': { @@ -219,6 +219,9 @@ function createTemplate(props: Partial): [cdk.assertions.Template new RestRequestProcessor(stack, 'WebSocketEndpoint', { useCaseManagementAPILambda: new lambda.Function(stack, 'chatLambda', mockLambdaFuncProps), modelInfoAPILambda: new lambda.Function(stack, 'modelInfoLambda', mockLambdaFuncProps), + mcpManagementAPILambda: new lambda.Function(stack, 'mcpManagementLambda', mockLambdaFuncProps), + agentManagementAPILambda: new lambda.Function(stack, 'agentManagementLambda', mockLambdaFuncProps), + workflowManagementAPILambda: new lambda.Function(stack, 'workflowManagementLambda', mockLambdaFuncProps), applicationTrademarkName: 'fake-name', defaultUserEmail: 'testuser@example.com', customResourceLambdaArn: crLambda.functionArn, diff --git a/source/infrastructure/test/api/use-case-rest-endpoint.test.ts b/source/infrastructure/test/api/use-case-rest-endpoint.test.ts index d1b6960d..56b157f5 100644 --- a/source/infrastructure/test/api/use-case-rest-endpoint.test.ts +++ b/source/infrastructure/test/api/use-case-rest-endpoint.test.ts @@ -431,10 +431,11 @@ describe('NewUseCaseRestEndpointDeployment', () => { 'OrStatement': { 'Statements': [ { - 'ByteMatchStatement': { - 'FieldToMatch': { 'UriPath': {} }, - 'PositionalConstraint': 'ENDS_WITH', - 'SearchString': '/deployments', + 'RegexMatchStatement': { + 'FieldToMatch': { + 'UriPath': {} + }, + 'RegexString': '/deployments(/mcp|/agents|/workflows)?$', 'TextTransformations': [{ 'Priority': 0, 'Type': 'NONE' }] } }, @@ -444,7 +445,7 @@ describe('NewUseCaseRestEndpointDeployment', () => { 'UriPath': {} }, 'RegexString': - '/deployments/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$', + '/deployments(/mcp|/agents|/workflows)?/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$', 'TextTransformations': [{ 'Priority': 0, 'Type': 'NONE' }] } } diff --git a/source/infrastructure/test/api/websocket-endpoint.test.ts b/source/infrastructure/test/api/websocket-endpoint.test.ts index cebb02f7..eb568911 100644 --- a/source/infrastructure/test/api/websocket-endpoint.test.ts +++ b/source/infrastructure/test/api/websocket-endpoint.test.ts @@ -29,6 +29,7 @@ describe('When creating a WebSocketEndpoint', () => { authorizerLambda: new lambda.Function(stack, 'AuthorizerLambda', mockLambdaFuncProps), onConnectLambda: new lambda.Function(stack, 'OnConnectLambda', mockLambdaFuncProps), onDisconnectLambda: new lambda.Function(stack, 'OnDisconnectLambda', mockLambdaFuncProps), + chatLlmProviderLambda: new lambda.Function(stack, 'ChatLlmProviderLambda', mockLambdaFuncProps), useCaseUUID: 'fake-id', lambdaRouteMapping: lambdaRouteMapping }); diff --git a/source/infrastructure/test/api/websocket-request-processor.test.ts b/source/infrastructure/test/api/websocket-request-processor.test.ts index 077fc16d..bfc472f1 100644 --- a/source/infrastructure/test/api/websocket-request-processor.test.ts +++ b/source/infrastructure/test/api/websocket-request-processor.test.ts @@ -44,6 +44,7 @@ describe('When deploying', () => { existingCognitoUserPoolId: 'fake-id', existingCognitoGroupPolicyTableName: 'fake-table-arn', customResourceLambda: new lambda.Function(stack, 'customResourceLambda', mockLambdaFuncProps), + chatLlmProviderLambda: new lambda.Function(stack, 'chatLlmProviderLambda', mockLambdaFuncProps), useCaseUUID: 'fake-uuid', cognitoDomainPrefix: 'fake-prefix', existingCognitoUserPoolClientId: 'fake123clientid', @@ -62,7 +63,7 @@ describe('When deploying', () => { }); it('Should have lambdas for custom resource, chatProvider, onConnect, onDisconnect, and Authorization', () => { - template.resourceCountIs('AWS::Lambda::Function', 6); + template.resourceCountIs('AWS::Lambda::Function', 7); template.hasResourceProperties('AWS::Lambda::Function', { 'Role': { @@ -264,6 +265,79 @@ describe('When deploying', () => { }); }); + it('should create SQS to Lambda mappings using SqsToLambda construct', () => { + // SqsToLambda construct should create event source mappings + template.hasResourceProperties('AWS::Lambda::EventSourceMapping', { + EventSourceArn: { + 'Fn::GetAtt': [Match.stringLikeRegexp('.*Queue.*'), 'Arn'] + }, + FunctionName: { + 'Ref': Match.anyValue() + } + }); + + // Should have event source mappings for each route + template.resourceCountIs('AWS::Lambda::EventSourceMapping', 2); + }); + + it('should set WEBSOCKET_CALLBACK_URL environment variable on chat provider lambda', () => { + template.hasResourceProperties('AWS::Lambda::Function', { + Environment: { + Variables: { + WEBSOCKET_CALLBACK_URL: { + 'Fn::Join': [ + '', + [ + 'https://', + { + 'Ref': Match.stringLikeRegexp('.*WebSocketApi.*') + }, + '.execute-api.', + { + 'Ref': 'AWS::Region' + }, + '.', + { + 'Ref': 'AWS::URLSuffix' + }, + '/prod' + ] + ] + } + } + } + }); + }); + + it('should grant websocket API manage connections permissions to chat provider lambda', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Action: 'execute-api:ManageConnections', + Resource: { + 'Fn::Join': [ + '', + [ + 'arn:', + { 'Ref': 'AWS::Partition' }, + ':execute-api:', + { 'Ref': 'AWS::Region' }, + ':', + { 'Ref': 'AWS::AccountId' }, + ':', + { 'Ref': Match.stringLikeRegexp('.*WebSocketApi.*') }, + '/*/*/@connections/*' + ] + ] + } + } + ]) + } + }); + }); + it('Should have cognito resources', () => { template.resourceCountIs('AWS::Cognito::UserPool', 1); template.resourceCountIs('AWS::Cognito::UserPoolClient', 1); diff --git a/source/infrastructure/test/auth/component-cognito-app-client.test.ts b/source/infrastructure/test/auth/component-cognito-app-client.test.ts new file mode 100644 index 00000000..f1388cf2 --- /dev/null +++ b/source/infrastructure/test/auth/component-cognito-app-client.test.ts @@ -0,0 +1,311 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as cognito from 'aws-cdk-lib/aws-cognito'; +import { Template } from 'aws-cdk-lib/assertions'; +import { + ComponentCognitoAppClient, + ComponentTokenValidity, + ComponentType +} from '../../lib/auth/component-cognito-app-client'; + +describe('ComponentCognitoAppClient', () => { + let app: cdk.App; + let stack: cdk.Stack; + let userPool: cognito.IUserPool; + let resourceServer: cognito.UserPoolResourceServer; + + beforeEach(() => { + app = new cdk.App(); + stack = new cdk.Stack(app, 'TestStack'); + userPool = cognito.UserPool.fromUserPoolId(stack, 'TestUserPool', 'us-east-1_TEST123'); + + // Create the AgentCore resource server that provides the scopes + resourceServer = new cognito.UserPoolResourceServer(stack, 'AgentCoreResourceServer', { + identifier: 'agentcore', + userPoolResourceServerName: 'agentcore', + userPool: userPool, + scopes: [ + { + scopeName: 'componentAccess', + scopeDescription: 'Scope for component authentication' + } + ] + }); + }); + + describe('Basic functionality', () => { + test('creates Cognito App Client with correct configuration', () => { + new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'abc12345', + componentType: ComponentType.AGENT + }); + + const template = Template.fromStack(stack); + + template.hasResourceProperties('AWS::Cognito::UserPoolClient', { + UserPoolId: 'us-east-1_TEST123', + ClientName: 'agent-abc12345-client', + GenerateSecret: true, + ExplicitAuthFlows: ['ALLOW_REFRESH_TOKEN_AUTH'], + AllowedOAuthFlowsUserPoolClient: true, + AllowedOAuthFlows: ['client_credentials'], + AllowedOAuthScopes: ['agentcore/componentAccess'], + TokenValidityUnits: { + AccessToken: 'minutes', + RefreshToken: 'hours' + }, + AccessTokenValidity: ComponentTokenValidity.ACCESS_TOKEN_MINUTES, + RefreshTokenValidity: ComponentTokenValidity.REFRESH_TOKEN_HOURS, + PreventUserExistenceErrors: 'ENABLED', + EnableTokenRevocation: true, + AuthSessionValidity: ComponentTokenValidity.AUTH_SESSION_MINUTES, + SupportedIdentityProviders: ['COGNITO'] + }); + }); + + test('creates exactly one App Client resource', () => { + new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'abc12345', + componentType: ComponentType.AGENT + }); + + const template = Template.fromStack(stack); + template.resourceCountIs('AWS::Cognito::UserPoolClient', 1); + }); + + test('exposes client ID and app client interface', () => { + const appClient = new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'abc12345', + componentType: ComponentType.AGENT + }); + + expect(appClient.appClient).toBeDefined(); + expect(appClient.getClientId()).toBeDefined(); + expect(appClient.getClientSecret()).toBeDefined(); + }); + + test('returns CfnUserPoolClient type from appClient property', () => { + const appClient = new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'abc12345', + componentType: ComponentType.AGENT + }); + + expect(appClient.appClient).toBeInstanceOf(cognito.CfnUserPoolClient); + }); + + test('getClientId returns client ID attribute', () => { + const appClient = new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'abc12345', + componentType: ComponentType.AGENT + }); + + expect(appClient.getClientId()).toBe(appClient.appClient.attrClientId); + }); + + test('getClientSecret returns client secret attribute', () => { + const appClient = new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'abc12345', + componentType: ComponentType.AGENT + }); + + expect(appClient.getClientSecret()).toBe(appClient.appClient.attrClientSecret); + }); + }); + + describe('Component types', () => { + test.each([ + [ComponentType.AGENT, 'agent-def67890-client'], + [ComponentType.WORKFLOW, 'workflow-def67890-client'], + [ComponentType.MCP, 'mcp-def67890-client'] + ])('creates App Client for %s component type', (componentType, expectedClientName) => { + new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'def67890', + componentType + }); + + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::Cognito::UserPoolClient', { + ClientName: expectedClientName + }); + }); + }); + + describe('Custom token validity', () => { + test('uses custom access token validity', () => { + new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'ghi12345', + componentType: ComponentType.AGENT, + accessTokenValidityMinutes: 30 + }); + + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::Cognito::UserPoolClient', { + AccessTokenValidity: 30 + }); + }); + + test('uses custom refresh token validity', () => { + new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'jkl67890', + componentType: ComponentType.AGENT, + refreshTokenValidityHours: 48 + }); + + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::Cognito::UserPoolClient', { + RefreshTokenValidity: 48 + }); + }); + + test('uses default token validity when not specified', () => { + new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'mno12345', + componentType: ComponentType.AGENT + }); + + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::Cognito::UserPoolClient', { + AccessTokenValidity: ComponentTokenValidity.ACCESS_TOKEN_MINUTES, + RefreshTokenValidity: ComponentTokenValidity.REFRESH_TOKEN_HOURS + }); + }); + }); + + describe('Security configuration', () => { + test('configures M2M authentication flows and OAuth2', () => { + new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'pqr67890', + componentType: ComponentType.AGENT + }); + + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::Cognito::UserPoolClient', { + ExplicitAuthFlows: ['ALLOW_REFRESH_TOKEN_AUTH'], + AllowedOAuthFlowsUserPoolClient: true, + AllowedOAuthFlows: ['client_credentials'], + AllowedOAuthScopes: ['agentcore/componentAccess'] + }); + }); + + test('enables security features', () => { + new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'stu12345', + componentType: ComponentType.AGENT + }); + + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::Cognito::UserPoolClient', { + GenerateSecret: true, + PreventUserExistenceErrors: 'ENABLED', + EnableTokenRevocation: true, + SupportedIdentityProviders: ['COGNITO'] + }); + }); + }); + + describe('Component naming', () => { + test('handles use case short IDs with various formats', () => { + new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'vwx67890', + componentType: ComponentType.AGENT + }); + + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::Cognito::UserPoolClient', { + ClientName: 'agent-vwx67890-client' + }); + }); + + test('generates client name with correct format', () => { + new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'yz123456', + componentType: ComponentType.AGENT + }); + + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::Cognito::UserPoolClient', { + ClientName: 'agent-yz123456-client' + }); + }); + + test('client name is simple and predictable', () => { + new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'abc12345', + componentType: ComponentType.AGENT + }); + + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::Cognito::UserPoolClient', { + ClientName: 'agent-abc12345-client' + }); + }); + + test('handles different User Pool IDs', () => { + const differentUserPool = cognito.UserPool.fromUserPoolId( + stack, + 'DifferentUserPool', + 'us-west-2_DIFFERENT' + ); + + new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool: differentUserPool, + useCaseShortId: 'abc98765', + componentType: ComponentType.AGENT + }); + + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::Cognito::UserPoolClient', { + UserPoolId: 'us-west-2_DIFFERENT' + }); + }); + }); + + describe('CDK Nag suppressions', () => { + test('includes appropriate CDK Nag suppressions', () => { + new ComponentCognitoAppClient(stack, 'TestAppClient', { + userPool, + useCaseShortId: 'xyz54321', + componentType: ComponentType.AGENT + }); + + const template = Template.fromStack(stack); + const resources = template.findResources('AWS::Cognito::UserPoolClient'); + const appClientResource = Object.values(resources)[0]; + + expect(appClientResource.Metadata?.['cdk_nag']).toBeDefined(); + }); + }); +}); + +describe('ComponentType', () => { + test('has correct enum values', () => { + expect(ComponentType.AGENT).toBe('AGENT'); + expect(ComponentType.WORKFLOW).toBe('WORKFLOW'); + expect(ComponentType.MCP).toBe('MCP'); + }); +}); + +describe('ComponentTokenValidity', () => { + test('has correct default values', () => { + expect(ComponentTokenValidity.ACCESS_TOKEN_MINUTES).toBe(60); + expect(ComponentTokenValidity.REFRESH_TOKEN_HOURS).toBe(24); + expect(ComponentTokenValidity.AUTH_SESSION_MINUTES).toBe(3); + }); +}); diff --git a/source/infrastructure/test/auth/deployment-platform-cognito-setup.test.ts b/source/infrastructure/test/auth/deployment-platform-cognito-setup.test.ts index 58bb7065..db179d1d 100644 --- a/source/infrastructure/test/auth/deployment-platform-cognito-setup.test.ts +++ b/source/infrastructure/test/auth/deployment-platform-cognito-setup.test.ts @@ -244,12 +244,10 @@ describe('When cognito resources are created', () => { 'Fn::Not': [ { 'Fn::Equals': [notificationSubscriptionEmailCapture.asString(), 'placeholder@example.com'] - }, + } ] - }, - {"Condition":"TestCognitoSetupCognitoGroupConditionD133761D"} - + { 'Condition': 'TestCognitoSetupCognitoGroupConditionD133761D' } ] }); @@ -284,12 +282,41 @@ describe('When cognito resources are created', () => { 'Fn::Not': [ { 'Fn::Equals': ['', 'placeholder@example.com'] - }, + } ] - }, - {"Condition":"TestCognitoSetupCognitoGroupConditionD133761D"} + { 'Condition': 'TestCognitoSetupCognitoGroupConditionD133761D' } + ] + }); + }); + + it('should create AgentCore resource server with correct configuration', () => { + const app = new cdk.App(); + const stack = new cdk.Stack(app, 'TestStack'); + const cognitoSetup = new CognitoSetup(stack, 'TestCognitoSetup', { + userPoolProps: { + defaultUserEmail: 'test@example.com', + applicationTrademarkName: 'TestApp', + userGroupName: 'test-group', + existingCognitoUserPoolId: '', + existingCognitoGroupPolicyTableName: '', + customResourceLambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function/test-function' + } as UserPoolProps, + deployWebApp: 'Yes' + }); + + cognitoSetup.createAgentCoreResourceServer(); + + const template = Template.fromStack(stack); + template.hasResourceProperties('AWS::Cognito::UserPoolResourceServer', { + Identifier: 'agentcore', + Name: 'agentcore', + Scopes: [ + { + ScopeName: 'componentAccess', + ScopeDescription: 'Scope for component authentication' + } ] }); }); diff --git a/source/infrastructure/test/bedrock-agent-stack.test.ts b/source/infrastructure/test/bedrock-agent-stack.test.ts index 82a7885d..fe8a0714 100644 --- a/source/infrastructure/test/bedrock-agent-stack.test.ts +++ b/source/infrastructure/test/bedrock-agent-stack.test.ts @@ -229,6 +229,63 @@ describe('BedrockAgent Stack', () => { const customResource = resources[resourceKeys[0]]; expect(customResource.Condition).toBe('FeedbackEnabledCondition'); }); + + it('should have validation rules preventing multimodal parameters for Bedrock Agent use cases', () => { + const templateJson = template.toJSON(); + + expect(templateJson.Rules.NoMultimodalEnabledForBedrockAgentRule).toEqual({ + RuleCondition: { + 'Fn::Equals': [{ 'Ref': 'MultimodalEnabled' }, 'Yes'] + }, + Assertions: [ + { + Assert: { + 'Fn::Equals': ['false', 'true'] + }, + AssertDescription: + 'Multimodal functionality is not supported for Bedrock Agent Use Cases. Please set MultimodalEnabled to No.' + } + ] + }); + + expect(templateJson.Rules.NoMultimodalBucketForBedrockAgentRule).toEqual({ + RuleCondition: { + 'Fn::Not': [ + { + 'Fn::Equals': [{ 'Ref': 'ExistingMultimodalDataBucket' }, ''] + } + ] + }, + Assertions: [ + { + Assert: { + 'Fn::Equals': ['false', 'true'] + }, + AssertDescription: + 'Multimodal data bucket is not supported for Bedrock Agent Use Cases. Please leave ExistingMultimodalDataBucket empty.' + } + ] + }); + + expect(templateJson.Rules.NoMultimodalTableForBedrockAgentRule).toEqual({ + RuleCondition: { + 'Fn::Not': [ + { + 'Fn::Equals': [{ 'Ref': 'ExistingMultimodalDataMetadataTable' }, ''] + } + ] + }, + Assertions: [ + { + Assert: { + 'Fn::Equals': ['false', 'true'] + }, + AssertDescription: + 'Multimodal metadata table is not supported for Bedrock Agent Use Cases. Please leave ExistingMultimodalDataMetadataTable empty.' + } + ] + }); + }); }); function buildStack(): [Template, cdk.Stack] { diff --git a/source/infrastructure/test/bedrock-chat-stack.test.ts b/source/infrastructure/test/bedrock-chat-stack.test.ts index 43f4181d..0456cb14 100644 --- a/source/infrastructure/test/bedrock-chat-stack.test.ts +++ b/source/infrastructure/test/bedrock-chat-stack.test.ts @@ -436,6 +436,97 @@ describe('When Chat use case is created', () => { } }); }); + + it('should have a separate Lambda version policy with wildcard resource targeting', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + 'PolicyDocument': { + 'Statement': [ + { + 'Action': 'lambda:PublishVersion', + 'Effect': 'Allow', + 'Resource': { + 'Fn::Join': [ + '', + [ + 'arn:', + { + 'Ref': 'AWS::Partition' + }, + ':lambda:', + { + 'Ref': 'AWS::Region' + }, + ':', + { + 'Ref': 'AWS::AccountId' + }, + ':function:*' + ] + ] + } + } + ], + 'Version': '2012-10-17' + } + }); + }); + + it('should have validation rules preventing multimodal parameters for Text use cases', () => { + const templateJson = template.toJSON(); + + expect(templateJson.Rules.NoMultimodalEnabledForTextUseCaseRule).toEqual({ + RuleCondition: { + 'Fn::Equals': [{ 'Ref': 'MultimodalEnabled' }, 'Yes'] + }, + Assertions: [ + { + Assert: { + 'Fn::Equals': ['false', 'true'] + }, + AssertDescription: + 'Multimodal functionality is not supported for Text Use Cases. Please set MultimodalEnabled to No.' + } + ] + }); + + expect(templateJson.Rules.NoMultimodalBucketForTextUseCaseRule).toEqual({ + RuleCondition: { + 'Fn::Not': [ + { + 'Fn::Equals': [{ 'Ref': 'ExistingMultimodalDataBucket' }, ''] + } + ] + }, + Assertions: [ + { + Assert: { + 'Fn::Equals': ['false', 'true'] + }, + AssertDescription: + 'Multimodal data bucket is not supported for Text Use Cases. Please leave ExistingMultimodalDataBucket empty.' + } + ] + }); + + expect(templateJson.Rules.NoMultimodalTableForTextUseCaseRule).toEqual({ + RuleCondition: { + 'Fn::Not': [ + { + 'Fn::Equals': [{ 'Ref': 'ExistingMultimodalDataMetadataTable' }, ''] + } + ] + }, + Assertions: [ + { + Assert: { + 'Fn::Equals': ['false', 'true'] + }, + AssertDescription: + 'Multimodal metadata table is not supported for Text Use Cases. Please leave ExistingMultimodalDataMetadataTable empty.' + } + ] + }); + }); }); function buildStack(): [Template, cdk.Stack] { diff --git a/source/infrastructure/test/deployment-platform-stack.test.ts b/source/infrastructure/test/deployment-platform-stack.test.ts index 64ffb089..69b738c5 100644 --- a/source/infrastructure/test/deployment-platform-stack.test.ts +++ b/source/infrastructure/test/deployment-platform-stack.test.ts @@ -7,7 +7,7 @@ import * as rawCdkJson from '../cdk.json'; import { Match, Template } from 'aws-cdk-lib/assertions'; import { DeploymentPlatformStack } from '../lib/deployment-platform-stack'; -import { INTERNAL_EMAIL_DOMAIN } from '../lib/utils/constants'; +import { INTERNAL_EMAIL_DOMAIN, MULTIMODAL_FILE_EXPIRATION_DAYS } from '../lib/utils/constants'; describe('When deployment platform stack is created', () => { let template: Template; @@ -113,8 +113,23 @@ describe('When deployment platform stack is created', () => { } }); - // Ensure you expected to add a new CfnOutput before incrementing this value - expect(Object.keys(template.findOutputs('*')).length).toEqual(11); + template.hasOutput('MultimodalDataBucketName', { + 'Value': { + 'Ref': Match.stringLikeRegexp( + 'UseCaseManagementSetupMultimodalSetupFactoriesMultimodalDataBucketS3Bucket' + ) + }, + 'Description': 'S3 bucket for storing multimodal files' + }); + + template.hasOutput('MultimodalDataMetadataTable', { + 'Value': { + 'Ref': Match.stringLikeRegexp('UseCaseManagementSetupMultimodalSetupMultimodalDataMetadataTable') + }, + 'Description': 'DynamoDB table for storing multimodal files metadata' + }); + + expect(Object.keys(template.findOutputs('*')).length).toEqual(14); }); describe('when nested stacks are created', () => { @@ -373,6 +388,7 @@ describe('When deployment platform stack is created', () => { describe('With all environment variables and context.json available', () => { let template: Template; let jsonTemplate: { [key: string]: any }; + let stack: cdk.Stack; beforeAll(() => { process.env.DIST_OUTPUT_BUCKET = 'fake-artifact-bucket'; @@ -380,7 +396,7 @@ describe('With all environment variables and context.json available', () => { process.env.SOLUTION_NAME = 'fake-solution-name'; process.env.VERSION = 'v9.9.9'; - [template, jsonTemplate] = buildStack(); + [template, jsonTemplate, stack] = buildStack(); }); afterAll(() => { @@ -393,7 +409,6 @@ describe('With all environment variables and context.json available', () => { describe('When synthesizing through standard pipeline, it should generate necessary mapping', () => { it('has mapping for "Data"', () => { - expect(jsonTemplate['Mappings']['Solution']['Data']['SendAnonymousUsageData']).toEqual('Yes'); expect(jsonTemplate['Mappings']['Solution']['Data']['ID']).toEqual(process.env.SOLUTION_ID); expect(jsonTemplate['Mappings']['Solution']['Data']['Version']).toEqual(process.env.VERSION); expect(jsonTemplate['Mappings']['Solution']['Data']['SolutionName']).toEqual(process.env.SOLUTION_NAME); @@ -414,6 +429,104 @@ describe('With all environment variables and context.json available', () => { }); }); + it('should create API Gateway resources with correct configuration for multimodal files', () => { + template.hasResourceProperties('AWS::ApiGateway::Resource', { + PathPart: 'files' + }); + + // Should have POST, DELETE, and GET methods for files + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'POST', + AuthorizationType: 'CUSTOM', + OperationName: 'UploadFiles' + }); + + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'DELETE', + AuthorizationType: 'CUSTOM', + OperationName: 'DeleteFiles' + }); + + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'GET', + AuthorizationType: 'CUSTOM', + OperationName: 'GetFile' + }); + }); + + it('should create shared ECR Pull-Through Cache for AgentCore images', () => { + // Import the resolver to get environment-aware values + const { + resolveUpstreamRegistryUrl, + resolveUpstreamRepositoryPrefix + } = require('../lib/use-case-stacks/agent-core/utils/image-uri-resolver'); + + template.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + 'EcrRepositoryPrefix': { + 'Fn::GetAtt': [ + Match.stringLikeRegexp('SharedECRPullThroughCacheEcrRepoPrefixGenerator.*'), + 'EcrRepoPrefix' + ] + }, + 'UpstreamRegistry': 'ecr-public', + 'UpstreamRegistryUrl': resolveUpstreamRegistryUrl(), + 'UpstreamRepositoryPrefix': resolveUpstreamRepositoryPrefix() + }); + }); + + it('should output shared ECR cache prefix for use by agent deployments', () => { + template.hasOutput('SharedECRCachePrefix', { + 'Description': 'Shared ECR Pull-Through Cache repository prefix for AgentCore images' + }); + }); + + it('should configure agent management lambda with shared ECR cache prefix environment variable', () => { + // Verify the shared ECR cache resource exists in the main template + template.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + 'EcrRepositoryPrefix': { + 'Fn::GetAtt': [ + Match.stringLikeRegexp('SharedECRPullThroughCacheEcrRepoPrefixGenerator.*'), + 'EcrRepoPrefix' + ] + } + }); + + // Access the nested stack template to verify the lambda environment variable + const deploymentPlatformStack = stack as DeploymentPlatformStack; + const useCaseManagementTemplate = Template.fromStack( + deploymentPlatformStack.useCaseManagementSetup.useCaseManagement + ); + + // Verify the agent management lambda has the shared ECR cache prefix environment variable + useCaseManagementTemplate.hasResourceProperties('AWS::Lambda::Function', { + Handler: 'agents-handler.agentsHandler', + Environment: { + Variables: Match.objectLike({ + 'SHARED_ECR_CACHE_PREFIX': { + 'Ref': Match.stringLikeRegexp('referencetoDeploymentPlatformStack.*EcrRepoPrefix') + } + }) + } + }); + }); + + it('should configure agent management lambda with model info table environment variable', () => { + const deploymentPlatformStack = stack as DeploymentPlatformStack; + const useCaseManagementTemplate = Template.fromStack( + deploymentPlatformStack.useCaseManagementSetup.useCaseManagement + ); + + // Verify the agent management lambda has model info table environment variable + useCaseManagementTemplate.hasResourceProperties('AWS::Lambda::Function', { + Handler: 'agents-handler.agentsHandler', + Environment: { + Variables: Match.objectLike({ + MODEL_INFO_TABLE_NAME: Match.anyValue() + }) + } + }); + }); + it('should create API Gateway method with correct properties for feedback submission', () => { template.hasResourceProperties('AWS::ApiGateway::Method', { HttpMethod: 'POST', @@ -424,6 +537,185 @@ describe('With all environment variables and context.json available', () => { } }); }); + + describe('Strands Tools SSM Parameter', () => { + it('should create SSM parameter with correct path', () => { + template.hasResourceProperties('AWS::SSM::Parameter', { + Type: 'String', + Name: { + 'Fn::Join': [ + '', + [ + '/gaab/', + { + 'Ref': 'AWS::StackName' + }, + '/strands-tools' + ] + ] + }, + Description: 'Available Strands SDK tools for Agent Builder and Workflow use cases' + }); + }); + + it('should create SSM parameter with valid JSON array structure', () => { + const ssmParameters = template.findResources('AWS::SSM::Parameter', { + Properties: { + Name: { + 'Fn::Join': [ + '', + [ + '/gaab/', + { + 'Ref': 'AWS::StackName' + }, + '/strands-tools' + ] + ] + } + } + }); + + const parameterKeys = Object.keys(ssmParameters); + expect(parameterKeys.length).toBeGreaterThan(0); + + const parameterValue = ssmParameters[parameterKeys[0]].Properties.Value; + const tools = JSON.parse(parameterValue); + + // Verify it's an array + expect(Array.isArray(tools)).toBe(true); + + // Verify structure of tools + tools.forEach((tool: any) => { + expect(tool).toHaveProperty('name'); + expect(tool).toHaveProperty('description'); + expect(tool).toHaveProperty('value'); + expect(tool).toHaveProperty('category'); + expect(tool).toHaveProperty('isDefault'); + expect(typeof tool.name).toBe('string'); + expect(typeof tool.description).toBe('string'); + expect(typeof tool.value).toBe('string'); + expect(typeof tool.category).toBe('string'); + expect(typeof tool.isDefault).toBe('boolean'); + }); + + // Verify expected tools are present + const toolValues = tools.map((t: any) => t.value); + expect(toolValues).toContain('calculator'); + expect(toolValues).toContain('current_time'); + expect(toolValues).toContain('environment'); + + // Verify default tools + const defaultTools = tools.filter((t: any) => t.isDefault); + const defaultToolValues = defaultTools.map((t: any) => t.value); + expect(defaultToolValues).toContain('calculator'); + expect(defaultToolValues).toContain('current_time'); + }); + + it('should configure MCP management lambda with STRANDS_TOOLS_SSM_PARAM environment variable', () => { + const deploymentPlatformStack = stack as DeploymentPlatformStack; + const useCaseManagementTemplate = Template.fromStack( + deploymentPlatformStack.useCaseManagementSetup.useCaseManagement + ); + + // Verify the MCP management lambda has the STRANDS_TOOLS_SSM_PARAM environment variable + useCaseManagementTemplate.hasResourceProperties('AWS::Lambda::Function', { + Handler: 'mcp-handler.mcpHandler', + Environment: { + Variables: Match.objectLike({ + STRANDS_TOOLS_SSM_PARAM: { + 'Ref': Match.stringLikeRegexp('referencetoDeploymentPlatformStackStrandsToolsParameter*') + } + }) + } + }); + }); + + it('should grant MCP management lambda ssm:GetParameter permission', () => { + const deploymentPlatformStack = stack as DeploymentPlatformStack; + const useCaseManagementTemplate = Template.fromStack( + deploymentPlatformStack.useCaseManagementSetup.useCaseManagement + ); + + // Verify IAM Policy exists with SSM GetParameter permission + useCaseManagementTemplate.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Action: 'ssm:GetParameter', + Effect: 'Allow', + Resource: Match.objectLike({ + 'Fn::Join': Match.arrayWith([ + Match.arrayWith([ + Match.stringLikeRegexp('arn:'), + Match.objectLike({ + Ref: 'AWS::Partition' + }), + Match.stringLikeRegexp(':ssm:') + ]) + ]) + }) + }) + ]) + } + }); + }); + + it('should create multimodal DynamoDB table for file metadata', () => { + template.hasResourceProperties('AWS::DynamoDB::Table', { + BillingMode: 'PAY_PER_REQUEST', + AttributeDefinitions: [ + { + AttributeName: 'fileKey', + AttributeType: 'S' + }, + { + AttributeName: 'fileName', + AttributeType: 'S' + } + ], + KeySchema: [ + { + AttributeName: 'fileKey', + KeyType: 'HASH' + }, + { + AttributeName: 'fileName', + KeyType: 'RANGE' + } + ] + }); + }); + + it('should create multimodal S3 bucket for data storage', () => { + template.hasResourceProperties('AWS::S3::Bucket', { + BucketEncryption: { + ServerSideEncryptionConfiguration: [ + { + ServerSideEncryptionByDefault: { + SSEAlgorithm: 'AES256' + } + } + ] + }, + PublicAccessBlockConfiguration: { + BlockPublicAcls: true, + BlockPublicPolicy: true, + IgnorePublicAcls: true, + RestrictPublicBuckets: true + }, + LifecycleConfiguration: { + Rules: [ + { + Id: 'DeleteFilesAfter48Hours', + Status: 'Enabled', + ExpirationInDays: MULTIMODAL_FILE_EXPIRATION_DAYS + } + ] + } + }); + }); + }); }); function buildStack(): [Template, { [key: string]: any }, cdk.Stack] { diff --git a/source/infrastructure/test/framework/application-setup.test.ts b/source/infrastructure/test/framework/application-setup.test.ts index 5d7ad5e2..f6c6a0ee 100644 --- a/source/infrastructure/test/framework/application-setup.test.ts +++ b/source/infrastructure/test/framework/application-setup.test.ts @@ -400,7 +400,7 @@ describe('When passing additional properties to createWebConfigStorage', () => { }); }); -describe('Before and after addAnonymousMetricsCustomLambda is called', () => { +describe('Before and after addMetricsCustomLambda is called', () => { let template: Template; beforeAll(() => { @@ -409,11 +409,11 @@ describe('Before and after addAnonymousMetricsCustomLambda is called', () => { template = Template.fromStack(stack); }); - it('should have a not Custom Anonymous Data resource', () => { - template.resourceCountIs('Custom::AnonymousData', 0); + it('should have a not Custom Data resource', () => { + template.resourceCountIs('Custom::Data', 0); }); - describe('When addAnonymousMetricsCustomLambda is called', () => { + describe('When addMetricsCustomLambda is called', () => { beforeAll(() => { const app = new cdk.App(); const stack = new cdk.Stack(app, 'TestStack'); @@ -423,33 +423,22 @@ describe('Before and after addAnonymousMetricsCustomLambda is called', () => { solutionVersion: rawCdkJson.context.solution_version }); - applicationSetup.addAnonymousMetricsCustomLambda('SO0999', 'v9.9.9'); + applicationSetup.addMetricsCustomLambda('SO0999', 'v9.9.9'); template = Template.fromStack(stack); }); - it('should have a Custom Anonymous Data properties', () => { + it('should have a Custom Data properties', () => { const customResourceLambda = new Capture(); - template.resourceCountIs('Custom::AnonymousData', 1); - template.hasResourceProperties('Custom::AnonymousData', { + template.resourceCountIs('Custom::Data', 1); + template.hasResourceProperties('Custom::Data', { ServiceToken: { 'Fn::GetAtt': [customResourceLambda, 'Arn'] }, - Resource: 'ANONYMOUS_METRIC', + Resource: 'METRIC', SolutionId: 'SO0999', Version: 'v9.9.9' }); }); - - it('should have a custom resource block with a condition', () => { - const conditionLogicalId = new Capture(); - template.hasResource('Custom::AnonymousData', { - Type: 'Custom::AnonymousData', - Properties: Match.anyValue(), - UpdateReplacePolicy: 'Delete', - DeletionPolicy: 'Delete', - Condition: conditionLogicalId - }); - }); }); }); diff --git a/source/infrastructure/test/framework/use-case-stack.test.ts b/source/infrastructure/test/framework/use-case-stack.test.ts index ffc7f52f..c88051c0 100644 --- a/source/infrastructure/test/framework/use-case-stack.test.ts +++ b/source/infrastructure/test/framework/use-case-stack.test.ts @@ -220,14 +220,53 @@ describe('When Chat use case is created', () => { Description: 'If set to No, the deployed use case stack will not have access to the feedback feature.' }); + template.hasParameter('MultimodalEnabled', { + Type: 'String', + AllowedValues: ['Yes', 'No'], + Default: 'No', + Description: + 'If set to Yes, the deployed use case stack will have access to multimodal functionality. This functionality is only enabled for Agentcore-based AgentBuilder and Workflow usecases.' + }); + + template.hasParameter('ExistingMultimodalDataMetadataTable', { + Type: 'String', + Default: '', + Description: 'Existing multimodal data metadata table name which contains references of the files in S3', + ConstraintDescription: 'Must be a valid DynamoDB table name or empty string' + }); + + template.hasParameter('ExistingMultimodalDataBucket', { + Type: 'String', + Default: '', + Description: 'Existing multimodal data bucket name which stores the multimodal data files', + ConstraintDescription: 'Must be a valid S3 bucket name or empty string' + }); + + template.hasParameter('ProvisionedConcurrencyValue', { + Type: 'Number', + Description: + 'Provisioned concurrency value for Lambda functions. Set to 0 to disable provisioned concurrency.', + Default: 0, + MinValue: 0, + MaxValue: 5 + }); + template.hasOutput('WebsocketEndpoint', { Description: 'Websocket API endpoint', Value: { - 'Fn::GetAtt': [ - Match.stringLikeRegexp( - 'WebsocketRequestProcessorWebSocketEndpointApiGatewayV2WebSocketToSqsWebSocketApiApiGatewayV2WebSocketToSqs' - ), - 'ApiEndpoint' + 'Fn::Join': [ + '', + [ + { + 'Fn::GetAtt': [ + Match.stringLikeRegexp( + 'WebsocketRequestProcessorWebSocketEndpointApiGatewayV2WebSocketToSqsWebSocketApiApiGatewayV2WebSocketToSqs' + ), + 'ApiEndpoint' + ] + }, + '/prod' + ] ] } }); @@ -286,8 +325,168 @@ describe('When Chat use case is created', () => { } }); + template.hasOutput('MultimodalDataBucketName', { + Description: 'S3 bucket for storing multimodal files', + Value: { + 'Fn::If': [ + 'CreateMultimodalResourcesCondition', + Match.anyValue(), + { + 'Ref': 'ExistingMultimodalDataBucket' + } + ] + }, + Condition: 'MultimodalEnabledCondition' + }); + + template.hasOutput('MultimodalDataMetadataTableName', { + Description: 'DynamoDB table for storing multimodal files metadata', + Value: { + 'Fn::If': [ + 'CreateMultimodalResourcesCondition', + Match.anyValue(), + { + 'Ref': 'ExistingMultimodalDataMetadataTable' + } + ] + }, + Condition: 'MultimodalEnabledCondition' + }); + // Ensure you expected to add a new CfnOutput before incrementing this value - expect(Object.keys(template.findOutputs('*')).length).toEqual(12); + expect(Object.keys(template.findOutputs('*')).length).toEqual(14); + }); + + describe('Multimodal functionality tests', () => { + it('should apply ResourceConditionsAspect to multimodal setup', () => { + template.hasCondition('CreateMultimodalResourcesCondition', { + 'Fn::And': [ + { + 'Condition': 'MultimodalEnabledCondition' + }, + { + 'Fn::Not': [ + { + 'Condition': 'MultimodalDataProvidedCondition' + } + ] + }, + { + 'Condition': 'CreateApiResourcesCondition' + } + ] + }); + }); + + it('should create multimodal resources in standalone mode when no existing resources provided', () => { + template.hasCondition('CreateMultimodalResourcesCondition', { + 'Fn::And': [ + { + 'Condition': 'MultimodalEnabledCondition' + }, + { + 'Fn::Not': [ + { + 'Condition': 'MultimodalDataProvidedCondition' + } + ] + }, + { + 'Condition': 'CreateApiResourcesCondition' + } + ] + }); + + // Verify that multimodal outputs are created conditionally + template.hasOutput('MultimodalDataBucketName', { + Condition: 'MultimodalEnabledCondition' + }); + + template.hasOutput('MultimodalDataMetadataTableName', { + Condition: 'MultimodalEnabledCondition' + }); + }); + + it('has conditions that make sure to not create multimodal setup resources when existing resources are provided', () => { + template.hasCondition('CreateMultimodalResourcesCondition', { + 'Fn::And': [ + { + 'Condition': 'MultimodalEnabledCondition' + }, + { + 'Fn::Not': [ + { + 'Condition': 'MultimodalDataProvidedCondition' + } + ] + }, + { + 'Condition': 'CreateApiResourcesCondition' + } + ] + }); + + template.hasCondition('MultimodalDataProvidedCondition', { + 'Fn::And': [ + { + 'Condition': 'MultimodalBucketProvided' + }, + { + 'Condition': 'MultimodalTableProvided' + } + ] + }); + }); + + it('should integrate multimodal setup with use case REST API when conditions are met', () => { + template.hasCondition('CreateMultimodalResourcesCondition', { + 'Fn::And': [ + { + 'Condition': 'MultimodalEnabledCondition' + }, + { + 'Fn::Not': [ + { + 'Condition': 'MultimodalDataProvidedCondition' + } + ] + }, + { + 'Condition': 'CreateApiResourcesCondition' + } + ] + }); + }); + + it('should have conditional outputs for multimodal resources', () => { + template.hasOutput('MultimodalDataBucketName', { + Description: 'S3 bucket for storing multimodal files', + Value: { + 'Fn::If': [ + 'CreateMultimodalResourcesCondition', + Match.anyValue(), + { + 'Ref': 'ExistingMultimodalDataBucket' + } + ] + }, + Condition: 'MultimodalEnabledCondition' + }); + + template.hasOutput('MultimodalDataMetadataTableName', { + Description: 'DynamoDB table for storing multimodal files metadata', + Value: { + 'Fn::If': [ + 'CreateMultimodalResourcesCondition', + Match.anyValue(), + { + 'Ref': 'ExistingMultimodalDataMetadataTable' + } + ] + }, + Condition: 'MultimodalEnabledCondition' + }); + }); }); describe('When nested stacks are created', () => { @@ -304,6 +503,18 @@ describe('When Chat use case is created', () => { template.resourceCountIs('AWS::CloudFormation::Stack', 6); }); + it('should create multimodal resources conditionally', () => { + template.hasResource('AWS::S3::Bucket', { + Type: 'AWS::S3::Bucket', + Condition: 'CreateMultimodalResourcesCondition' + }); + + template.hasResource('AWS::DynamoDB::Table', { + Type: 'AWS::DynamoDB::Table', + Condition: 'CreateMultimodalResourcesCondition' + }); + }); + it('should have a description in the nested stacks', () => { const chatStack = stack as BedrockChat; @@ -366,6 +577,124 @@ describe('When Chat use case is created', () => { ] }); }); + + it('has validation rules for multimodal configuration', () => { + expect(jsonTemplate['Rules']).toBeDefined(); + expect(jsonTemplate['Rules']['ValidateMultimodalResourcesConfiguration']).toEqual({ + RuleCondition: { + 'Fn::Equals': [ + { + 'Ref': 'MultimodalEnabled' + }, + 'Yes' + ] + }, + Assertions: [ + { + Assert: { + 'Fn::Or': [ + { + 'Fn::And': [ + { + 'Fn::Not': [ + { + 'Fn::Equals': [ + { + 'Ref': 'ExistingMultimodalDataBucket' + }, + '' + ] + } + ] + }, + { + 'Fn::Not': [ + { + 'Fn::Equals': [ + { + 'Ref': 'ExistingMultimodalDataMetadataTable' + }, + '' + ] + } + ] + } + ] + }, + { + 'Fn::And': [ + { + 'Fn::Equals': [ + { + 'Ref': 'ExistingMultimodalDataBucket' + }, + '' + ] + }, + { + 'Fn::Equals': [ + { + 'Ref': 'ExistingMultimodalDataMetadataTable' + }, + '' + ] + } + ] + } + ] + }, + AssertDescription: + 'When multimodal functionality is enabled, both multimodal data bucket and metadata table must be provided together, or both must be empty to create new resources' + } + ] + }); + + expect(jsonTemplate['Rules']['ValidateMultimodalEnabledWithResources']).toEqual({ + RuleCondition: { + 'Fn::And': [ + { + 'Fn::Not': [ + { + 'Fn::Equals': [ + { + 'Ref': 'ExistingMultimodalDataBucket' + }, + '' + ] + } + ] + }, + { + 'Fn::Not': [ + { + 'Fn::Equals': [ + { + 'Ref': 'ExistingMultimodalDataMetadataTable' + }, + '' + ] + } + ] + } + ] + }, + Assertions: [ + { + Assert: { + 'Fn::Equals': [ + { + 'Ref': 'MultimodalEnabled' + }, + 'Yes' + ] + }, + AssertDescription: + 'When existing multimodal data bucket and metadata table are provided, multimodal functionality must be enabled (MultimodalEnabled=Yes)' + } + ] + }); + }); + it('has a condition for Feedback Setup nested stack to deploy', () => { template.hasResource('AWS::CloudFormation::Stack', { Type: 'AWS::CloudFormation::Stack', @@ -439,6 +768,75 @@ describe('When Chat use case is created', () => { }); }); + it('validates provisioned concurrency parameter constraints', () => { + template.hasParameter('ProvisionedConcurrencyValue', { + Type: 'Number', + Description: + 'Provisioned concurrency value for Lambda functions. Set to 0 to disable provisioned concurrency.', + Default: 0, + MinValue: 0, + MaxValue: 5 + }); + }); + + it('should create Lambda version using custom resource', () => { + // Verify that a custom resource for Lambda version is created + template.resourceCountIs('Custom::LambdaVersion', 1); + + // Verify the custom resource references the chat Lambda function + template.hasResourceProperties('Custom::LambdaVersion', { + FunctionName: { + Ref: Match.stringLikeRegexp('ChatLlmProviderLambda.*') + }, + Resource: 'LAMBDA_VERSION_GENERATOR' + }); + }); + + it('should create Lambda alias pointing to the version', () => { + // Verify that a Lambda alias is created + template.resourceCountIs('AWS::Lambda::Alias', 1); + + // Verify the alias configuration + template.hasResourceProperties('AWS::Lambda::Alias', { + Name: 'live', + Description: 'Alias for chat Lambda function' + }); + }); + + it('should configure provisioned concurrency conditionally on alias', () => { + // Verify the alias has conditional provisioned concurrency + template.hasResourceProperties('AWS::Lambda::Alias', { + ProvisionedConcurrencyConfig: { + 'Fn::If': [ + 'ProvisionedConcurrencyCondition', + { + ProvisionedConcurrentExecutions: { + Ref: 'ProvisionedConcurrencyValue' + } + }, + { + Ref: 'AWS::NoValue' + } + ] + } + }); + }); + + it('should have provisioned concurrency condition', () => { + template.hasCondition('ProvisionedConcurrencyCondition', { + 'Fn::Not': [ + { + 'Fn::Equals': [ + { + Ref: 'ProvisionedConcurrencyValue' + }, + 0 + ] + } + ] + }); + }); + it('has a condition for the UI nested stack to deploy on a CfnParameter', () => { template.hasResource('AWS::CloudFormation::Stack', { Type: 'AWS::CloudFormation::Stack', @@ -553,8 +951,8 @@ describe('When Chat use case is created', () => { }); describe('Creates the LLM provider setup', () => { - it('should create 8 lambda functions', () => { - template.resourceCountIs('AWS::Lambda::Function', 8); + it('should create 10 lambda functions', () => { + template.resourceCountIs('AWS::Lambda::Function', 10); }); it('should create chat provider lambda function with correct env vars set', () => { @@ -936,7 +1334,6 @@ describe('With all environment variables and context.json available', () => { describe('When synthesizing through standard pipeline, it should generate necessary mapping', () => { it('has mapping for "Data"', () => { - expect(jsonTemplate['Mappings']['Solution']['Data']['SendAnonymousUsageData']).toEqual('Yes'); expect(jsonTemplate['Mappings']['Solution']['Data']['ID']).toEqual(process.env.SOLUTION_ID); expect(jsonTemplate['Mappings']['Solution']['Data']['Version']).toEqual(process.env.VERSION); expect(jsonTemplate['Mappings']['Solution']['Data']['SolutionName']).toEqual(process.env.SOLUTION_NAME); diff --git a/source/infrastructure/test/mcp-server-stack.test.ts b/source/infrastructure/test/mcp-server-stack.test.ts new file mode 100644 index 00000000..c0d5e9db --- /dev/null +++ b/source/infrastructure/test/mcp-server-stack.test.ts @@ -0,0 +1,667 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as rawCdkJson from '../cdk.json'; +import { Template, Match } from 'aws-cdk-lib/assertions'; +import { MCPServerStack } from '../lib/mcp-server-stack'; +import { USE_CASE_TYPES, ECR_URI_PATTERN } from '../lib/utils/constants'; + +describe('When MCP Server Stack is created', () => { + let template: Template; + let stack: MCPServerStack; + + beforeAll(() => { + [template, stack] = buildStack(); + }); + + it('should have suitable CloudFormation parameters', () => { + // Test UseCaseBaseParameters + template.hasParameter('UseCaseUUID', { + Type: 'String', + AllowedPattern: + '^[0-9a-fA-F]{8}$|^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$', + MinLength: 8, + MaxLength: 36, + ConstraintDescription: + 'Using digits and the letters A through F, please provide a 8 character id or a 36 character long UUIDv4.', + Description: + 'UUID to identify this deployed use case within an application. Please provide a 36 character long UUIDv4. If you are editing the stack, do not modify the value (retain the value used during creating the stack). A different UUID when editing the stack will result in new AWS resource created and deleting the old ones' + }); + + template.hasParameter('UseCaseConfigTableName', { + Type: 'String', + AllowedPattern: '^[a-zA-Z0-9_.-]{3,255}$', + MaxLength: 255, + Description: 'DynamoDB table name for the table which contains the configuration for this use case.', + ConstraintDescription: + 'This parameter is required. The stack will read the configuration from this table to configure the resources during deployment' + }); + + template.hasParameter('ExistingCognitoUserPoolId', { + Type: 'String', + AllowedPattern: '^$|^[0-9a-zA-Z_-]{9,24}$', + MaxLength: 24, + Description: + 'Optional - UserPoolId of an existing cognito user pool which this use case will be authenticated with. Typically will be provided when deploying from the deployment platform, but can be omitted when deploying this use-case stack standalone.', + Default: '' + }); + + template.hasParameter('ExistingCognitoUserPoolClient', { + Type: 'String', + AllowedPattern: '^$|^[a-z0-9]{3,128}$', + MaxLength: 128, + Description: + 'Optional - Provide a User Pool Client (App Client) to use an existing one. If not provided a new User Pool Client will be created. This parameter can only be provided if an existing User Pool Id is provided', + Default: '' + }); + + // Test MCP-specific parameters + template.hasParameter('S3BucketName', { + Type: 'String', + Description: 'S3 Bucket Name for the S3 bucket that stores the Lambda/API schema', + AllowedPattern: '^[a-z0-9][a-z0-9\\-]*[a-z0-9]$', + ConstraintDescription: 'Please provide a valid S3 bucket name', + MaxLength: 63 + }); + + // Test ECR URI parameter with your correct pattern + const jsonTemplate = template.toJSON(); + const ecrUriParam = jsonTemplate.Parameters.EcrUri; + + expect(ecrUriParam).toBeDefined(); + expect(ecrUriParam.Type).toBe('String'); + expect(ecrUriParam.Description).toBe('Optional ECR URI for the container image used by the MCP server'); + expect(ecrUriParam.AllowedPattern).toBe(`^$|${ECR_URI_PATTERN}`); + expect(ecrUriParam.ConstraintDescription).toBe( + 'Please provide a valid ECR URI format (e.g., 123456789012.dkr.ecr.us-east-1.amazonaws.com/my-repo:tag) or leave empty' + ); + expect(ecrUriParam.MaxLength).toBe(200); + expect(ecrUriParam.Default).toBe(''); + }); + + it('should have MCP-specific parameter validation', () => { + // Test S3 bucket name parameter constraints + const jsonTemplate = template.toJSON(); + const s3BucketParam = jsonTemplate.Parameters.S3BucketName; + + expect(s3BucketParam).toBeDefined(); + expect(s3BucketParam.Type).toBe('String'); + expect(s3BucketParam.AllowedPattern).toBe('^[a-z0-9][a-z0-9\\-]*[a-z0-9]$'); + expect(s3BucketParam.MaxLength).toBe(63); + expect(s3BucketParam.ConstraintDescription).toBe('Please provide a valid S3 bucket name'); + }); + + it('should have parameter groups configured', () => { + const jsonTemplate = template.toJSON(); + const parameterGroups = jsonTemplate.Metadata['AWS::CloudFormation::Interface'].ParameterGroups; + + expect(parameterGroups).toBeDefined(); + expect(parameterGroups.length).toBeGreaterThan(0); + + // Check for MCP Server Configuration group + const mcpGroup = parameterGroups.find((group: any) => group.Label.default === 'MCP Server Configuration'); + expect(mcpGroup).toBeDefined(); + expect(mcpGroup.Parameters).toContain('S3BucketName'); + expect(mcpGroup.Parameters).toContain('EcrUri'); + }); + + it('should have the deployment confirmation output', () => { + // The stack doesn't create a deployment confirmation output + // Instead, verify that the stack has the expected conditional outputs + const jsonTemplate = template.toJSON(); + const outputs = jsonTemplate.Outputs; + + // Verify that conditional outputs exist for both deployment types + expect(outputs.MCPRuntimeArn).toBeDefined(); + expect(outputs.MCPGatewayArn).toBeDefined(); + }); + + it('should have stackParameters initialized', () => { + // Test that the parameters exist in the template (which proves stackParameters worked) + template.hasParameter('UseCaseUUID', {}); + template.hasParameter('S3BucketName', {}); + template.hasParameter('UseCaseConfigTableName', {}); + template.hasParameter('UseCaseConfigRecordKey', {}); + template.hasParameter('ExistingCognitoUserPoolId', {}); + template.hasParameter('ExistingCognitoUserPoolClient', {}); + }); + + it('should not have VPC-related parameters since base stack features are disabled', () => { + // Verify that VPC parameters are NOT present in the template + const jsonTemplate = template.toJSON(); + const parameters = jsonTemplate.Parameters || {}; + + expect(parameters['VpcEnabled']).toBeUndefined(); + expect(parameters['CreateNewVpc']).toBeUndefined(); + expect(parameters['IPAMPoolId']).toBeUndefined(); + expect(parameters['DeployUI']).toBeUndefined(); + expect(parameters['ExistingVpcId']).toBeUndefined(); + expect(parameters['ExistingPrivateSubnetIds']).toBeUndefined(); + expect(parameters['ExistingSecurityGroupIds']).toBeUndefined(); + expect(parameters['VpcAzs']).toBeUndefined(); + }); + + it('should have applicationSetup initialized', () => { + expect(stack.applicationSetup).toBeDefined(); + expect(stack.applicationSetup.customResourceLambda).toBeDefined(); + expect(stack.applicationSetup.customResourceRole).toBeDefined(); + }); + + it('should create custom resource for MCP server', () => { + // Check if custom resource exists - it might not be created if applicationSetup is not properly initialized + const jsonTemplate = template.toJSON(); + const customResources = Object.keys(jsonTemplate.Resources || {}).filter( + (key) => jsonTemplate.Resources[key].Type === 'AWS::CloudFormation::CustomResource' + ); + + // If no custom resources exist, check that the stack at least has the necessary infrastructure + if (customResources.length === 0) { + // Verify that the custom resource lambda exists (which would be needed for the custom resource) + template.hasResourceProperties('AWS::Lambda::Function', { + Handler: 'lambda_func.handler' + }); + } else { + // If custom resources exist, verify the MCP server one + template.hasResourceProperties('AWS::CloudFormation::CustomResource', { + Properties: { + Resource: 'DEPLOY_MCP_GATEWAY' + } + }); + } + }); + + it('should create MCP Gateway IAM role with correct permissions', () => { + // Check that MCP Gateway role is created + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: [ + { + Effect: 'Allow', + Principal: { + Service: 'bedrock-agentcore.amazonaws.com' + }, + Action: 'sts:AssumeRole' + } + ] + }, + Description: 'IAM role for MCP Gateway to invoke Lambda functions' + }); + + // Check that the MCP Gateway role has the correct permissions in its policies + const jsonTemplate = template.toJSON(); + + // Find all policies attached to MCPGatewayRole + const mcpGatewayPolicies = Object.keys(jsonTemplate.Resources || {}) + .filter((key) => { + const resource = jsonTemplate.Resources[key]; + return ( + resource.Type === 'AWS::IAM::Policy' && + resource.Properties?.Roles?.some( + (role: any) => typeof role === 'object' && role.Ref && role.Ref.includes('MCPGatewayRole') + ) + ); + }) + .map((key) => jsonTemplate.Resources[key]); + + expect(mcpGatewayPolicies.length).toBeGreaterThan(0); + + // Collect all statements from all policies + const allStatements = mcpGatewayPolicies.flatMap((policy) => policy.Properties.PolicyDocument.Statement || []); + + // Check for bedrock-agentcore GetWorkloadAccessToken permission + const agentcoreStatement = allStatements.find((stmt: any) => { + const actions = Array.isArray(stmt.Action) ? stmt.Action : [stmt.Action]; + return actions.includes('bedrock-agentcore:GetWorkloadAccessToken'); + }); + expect(agentcoreStatement).toBeDefined(); + const agentcoreActions = Array.isArray(agentcoreStatement.Action) + ? agentcoreStatement.Action + : [agentcoreStatement.Action]; + expect(agentcoreActions).toContain('bedrock-agentcore:GetResourceApiKey'); + expect(agentcoreActions).toContain('bedrock-agentcore:GetResourceOauth2Token'); + + // Check for GetGateway permission + const getGatewayStatement = allStatements.find((stmt: any) => { + const actions = Array.isArray(stmt.Action) ? stmt.Action : [stmt.Action]; + return actions.includes('bedrock-agentcore:GetGateway'); + }); + expect(getGatewayStatement).toBeDefined(); + + // Check for S3 schema access permissions + const s3Statement = allStatements.find((stmt: any) => { + const actions = Array.isArray(stmt.Action) ? stmt.Action : [stmt.Action]; + return actions.includes('s3:GetObject'); + }); + expect(s3Statement).toBeDefined(); + + // Note: Lambda invoke permissions are added dynamically at runtime by GatewayPolicyManager + // based on actual target configurations, not in the CDK stack + }); + + it('should have conditional outputs based on deployment type', () => { + // Check that conditional outputs exist in the template + const jsonTemplate = template.toJSON(); + const outputs = jsonTemplate.Outputs; + + // Runtime outputs (conditional on hasEcrImage) + expect(outputs.MCPRuntimeArn).toBeDefined(); + expect(outputs.MCPRuntimeArn.Description).toBe('ARN of the created MCP Runtime resource'); + expect(outputs.MCPRuntimeArn.Condition).toBe('HasEcrImage'); + + expect(outputs.MCPRuntimeExecutionRoleArn).toBeDefined(); + expect(outputs.MCPRuntimeExecutionRoleArn.Description).toBe('IAM Role ARN used for MCP Runtime execution'); + expect(outputs.MCPRuntimeExecutionRoleArn.Condition).toBe('HasEcrImage'); + + // Gateway outputs (conditional on noEcrImage) + expect(outputs.MCPGatewayArn).toBeDefined(); + expect(outputs.MCPGatewayArn.Description).toBe('ARN of the created MCP Gateway resource'); + expect(outputs.MCPGatewayArn.Condition).toBe('NoEcrImage'); + + expect(outputs.MCPGatewayRoleArn).toBeDefined(); + expect(outputs.MCPGatewayRoleArn.Description).toBe('IAM Role ARN used for MCP Gateway operations'); + expect(outputs.MCPGatewayRoleArn.Condition).toBe('NoEcrImage'); + }); + + it('should have CloudFormation conditions for ECR image deployment', () => { + const jsonTemplate = template.toJSON(); + const conditions = jsonTemplate.Conditions; + + expect(conditions.HasEcrImage).toBeDefined(); + expect(conditions.NoEcrImage).toBeDefined(); + + // Verify the condition logic + expect(conditions.HasEcrImage['Fn::Not']).toBeDefined(); + expect(conditions.NoEcrImage['Fn::Equals']).toBeDefined(); + }); + + it('should create MCP Runtime custom resource with correct properties', () => { + template.hasResourceProperties('Custom::CreateMCPRuntime', { + Resource: 'DEPLOY_MCP_RUNTIME', + USE_CASE_CONFIG_TABLE_NAME: { Ref: 'UseCaseConfigTableName' }, + USE_CASE_CONFIG_RECORD_KEY: { Ref: 'UseCaseConfigRecordKey' }, + EXECUTION_ROLE_ARN: { + 'Fn::GetAtt': [Match.stringLikeRegexp('MCPAgentExecutionRole.*'), 'Arn'] + }, + ECR_URI: { Ref: 'EcrUri' } + }); + + // Verify the runtime resource has the expected properties + const runtimeJsonTemplate = template.toJSON(); + const runtimeResourcesList = Object.keys(runtimeJsonTemplate.Resources || {}) + .filter((key) => runtimeJsonTemplate.Resources[key].Type === 'Custom::CreateMCPRuntime') + .map((key) => runtimeJsonTemplate.Resources[key]); + + expect(runtimeResourcesList.length).toBe(1); + const runtimeResourceData = runtimeResourcesList[0]; + + // Check that runtime-specific properties exist + expect(runtimeResourceData.Properties).toHaveProperty('ECR_URI'); + expect(runtimeResourceData.Properties).toHaveProperty('EXECUTION_ROLE_ARN'); + expect(runtimeResourceData.Properties).toHaveProperty('MCPAgentCoreName'); + expect(runtimeResourceData.Properties).toHaveProperty('Resource', 'DEPLOY_MCP_RUNTIME'); + + // Verify the custom resource has the correct condition + expect(runtimeResourceData).toBeDefined(); + expect(runtimeResourceData.Condition).toBe('HasEcrImage'); + }); + + it('should create MCP Gateway custom resource with correct properties and condition', () => { + template.hasResourceProperties('Custom::CreateMCPServer', { + Resource: 'DEPLOY_MCP_GATEWAY', + USE_CASE_CONFIG_TABLE_NAME: { Ref: 'UseCaseConfigTableName' }, + USE_CASE_CONFIG_RECORD_KEY: { Ref: 'UseCaseConfigRecordKey' }, + S3_BUCKET_NAME: { Ref: 'S3BucketName' }, + COGNITO_USER_POOL_ID: { Ref: 'ExistingCognitoUserPoolId' }, + COGNITO_USER_POOL_CLIENT_ID: { Ref: 'ExistingCognitoUserPoolClient' } + }); + + // Verify the custom resource has the correct condition + const jsonTemplate = template.toJSON(); + const gatewayResource = Object.keys(jsonTemplate.Resources || {}) + .filter((key) => jsonTemplate.Resources[key].Type === 'Custom::CreateMCPServer') + .map((key) => jsonTemplate.Resources[key])[0]; + + expect(gatewayResource).toBeDefined(); + expect(gatewayResource.Condition).toBe('NoEcrImage'); + }); + + it('should create AgentExecutionRole component for runtime deployments', () => { + // Verify that the AgentExecutionRole component creates the necessary IAM role + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Effect: 'Allow', + Principal: { + Service: 'bedrock-agentcore.amazonaws.com' + } + }) + ]) + }, + Description: 'Execution role for AgentCore Runtime' + }); + }); + + it('should validate ECR URI parameter accepts various repository formats', () => { + // Test that the ECR URI parameter pattern accepts various valid formats + const jsonTemplate = template.toJSON(); + const ecrUriParam = jsonTemplate.Parameters.EcrUri; + + expect(ecrUriParam.AllowedPattern).toBeDefined(); + + // Verify the pattern is the expected one that supports underscores and namespaces + expect(ecrUriParam.AllowedPattern).toContain('(?:[a-z\\d]+(?:[._-][a-z\\d]+)*\\/)*[a-z\\d]+(?:[._-][a-z\\d]+)*'); + }); + + it('should have proper conditional logic for deployment types', () => { + const jsonTemplate = template.toJSON(); + + // Verify HasEcrImage condition logic + const hasEcrImageCondition = jsonTemplate.Conditions.HasEcrImage; + expect(hasEcrImageCondition['Fn::Not']).toEqual([{ 'Fn::Equals': [{ Ref: 'EcrUri' }, ''] }]); + + // Verify NoEcrImage condition logic + const noEcrImageCondition = jsonTemplate.Conditions.NoEcrImage; + expect(noEcrImageCondition['Fn::Equals']).toEqual([{ Ref: 'EcrUri' }, '']); + }); + + it('should have MCP agent core name generation for both deployment types', () => { + // Both runtime and gateway resources should use the same naming pattern + const jsonTemplate = template.toJSON(); + + const runtimeResource = Object.keys(jsonTemplate.Resources || {}) + .filter((key) => jsonTemplate.Resources[key].Type === 'Custom::CreateMCPRuntime') + .map((key) => jsonTemplate.Resources[key])[0]; + + const gatewayResource = Object.keys(jsonTemplate.Resources || {}) + .filter((key) => jsonTemplate.Resources[key].Type === 'Custom::CreateMCPServer') + .map((key) => jsonTemplate.Resources[key])[0]; + + if (runtimeResource) { + expect(runtimeResource.Properties.MCPAgentCoreName).toEqual({ + 'Fn::Join': ['', ['gaab_mcp_', { 'Fn::Select': [0, { 'Fn::Split': ['-', { Ref: 'UseCaseUUID' }] }] }]] + }); + } + + if (gatewayResource) { + expect(gatewayResource.Properties.MCPAgentCoreName).toEqual({ + 'Fn::Join': ['', ['gaab-mcp-', { 'Fn::Select': [0, { 'Fn::Split': ['-', { Ref: 'UseCaseUUID' }] }] }]] + }); + } + }); + + it('should have custom resource with correct MCP Gateway properties', () => { + template.hasResourceProperties('Custom::CreateMCPServer', { + Resource: 'DEPLOY_MCP_GATEWAY', + USE_CASE_CONFIG_TABLE_NAME: { Ref: 'UseCaseConfigTableName' }, + USE_CASE_CONFIG_RECORD_KEY: { Ref: 'UseCaseConfigRecordKey' }, + USE_CASE_UUID: { 'Fn::Select': [0, { 'Fn::Split': ['-', { Ref: 'UseCaseUUID' }] }] }, + S3_BUCKET_NAME: { Ref: 'S3BucketName' }, + GATEWAY_ROLE_ARN: { 'Fn::GetAtt': ['MCPGatewayRole712EB1E9', 'Arn'] }, + COGNITO_USER_POOL_ID: { Ref: 'ExistingCognitoUserPoolId' }, + COGNITO_USER_POOL_CLIENT_ID: { Ref: 'ExistingCognitoUserPoolClient' } + }); + }); + + it('should have solution mapping with MCP Server use case type', () => { + const jsonTemplate = template.toJSON(); + const mappings = jsonTemplate.Mappings; + + expect(mappings).toBeDefined(); + expect(mappings.Solution).toBeDefined(); + expect(mappings.Solution.Data.UseCaseName).toBe('MCPServer'); + }); + + it('should have IAM policies for S3, DynamoDB, and Bedrock AgentCore', () => { + // Based on the actual template, CDK combines all permissions into a single policy + // Check for the combined policy that contains all the required permissions + + const jsonTemplate = template.toJSON(); + const policies = Object.keys(jsonTemplate.Resources || {}) + .filter((key) => jsonTemplate.Resources[key].Type === 'AWS::IAM::Policy') + .map((key) => jsonTemplate.Resources[key]); + + // Find the main custom resource policy that contains all our permissions + const mainPolicy = policies.find((policy) => { + const statements = policy.Properties?.PolicyDocument?.Statement || []; + return statements.some((stmt: any) => Array.isArray(stmt.Action) && stmt.Action.includes('s3:GetObject')); + }); + + expect(mainPolicy).toBeDefined(); + + if (mainPolicy) { + const statements = mainPolicy.Properties.PolicyDocument.Statement; + + // Check for S3 permissions + const s3Statement = statements.find( + (stmt: any) => Array.isArray(stmt.Action) && stmt.Action.includes('s3:GetObject') + ); + expect(s3Statement).toBeDefined(); + expect(s3Statement.Action).toContain('s3:ListBucket'); + + // Check for DynamoDB permissions + const dynamoStatement = statements.find( + (stmt: any) => Array.isArray(stmt.Action) && stmt.Action.includes('dynamodb:PutItem') + ); + expect(dynamoStatement).toBeDefined(); + + // Check for Bedrock AgentCore runtime permissions + const bedrockRuntimeStatement = statements.find( + (stmt: any) => + Array.isArray(stmt.Action) && stmt.Action.includes('bedrock-agentcore:CreateAgentRuntime') + ); + expect(bedrockRuntimeStatement).toBeDefined(); + expect(bedrockRuntimeStatement.Action).toContain('bedrock-agentcore:UpdateAgentRuntime'); + expect(bedrockRuntimeStatement.Action).toContain('bedrock-agentcore:DeleteAgentRuntime'); + + // Check for Bedrock AgentCore gateway permissions + const bedrockGatewayStatement = statements.find( + (stmt: any) => Array.isArray(stmt.Action) && stmt.Action.includes('bedrock-agentcore:CreateGateway') + ); + expect(bedrockGatewayStatement).toBeDefined(); + expect(bedrockGatewayStatement.Action).toContain('bedrock-agentcore:UpdateGateway'); + expect(bedrockGatewayStatement.Action).toContain('bedrock-agentcore:DeleteGateway'); + expect(bedrockGatewayStatement.Action).toContain('bedrock-agentcore:GetGateway'); + expect(bedrockGatewayStatement.Action).toContain('bedrock-agentcore:ListGateways'); + expect(bedrockGatewayStatement.Action).toContain('bedrock-agentcore:CreateGatewayTarget'); + expect(bedrockGatewayStatement.Action).toContain('bedrock-agentcore:UpdateGatewayTarget'); + expect(bedrockGatewayStatement.Action).toContain('bedrock-agentcore:DeleteGatewayTarget'); + expect(bedrockGatewayStatement.Action).toContain('bedrock-agentcore:GetGatewayTarget'); + expect(bedrockGatewayStatement.Action).toContain('bedrock-agentcore:ListGatewayTargets'); + expect(bedrockGatewayStatement.Action).toContain('bedrock-agentcore:SynchronizeGatewayTargets'); + + // Check for Bedrock AgentCore workload identity permissions + const bedrockWorkloadStatement = statements.find( + (stmt: any) => + Array.isArray(stmt.Action) && stmt.Action.includes('bedrock-agentcore:CreateWorkloadIdentity') + ); + expect(bedrockWorkloadStatement).toBeDefined(); + expect(bedrockWorkloadStatement.Action).toContain('bedrock-agentcore:GetWorkloadIdentity'); + expect(bedrockWorkloadStatement.Action).toContain('bedrock-agentcore:UpdateWorkloadIdentity'); + expect(bedrockWorkloadStatement.Action).toContain('bedrock-agentcore:DeleteWorkloadIdentity'); + + // Check for IAM PassRole permissions + const passRoleStatement = statements.find( + (stmt: any) => + (Array.isArray(stmt.Action) && stmt.Action.includes('iam:PassRole')) || + (typeof stmt.Action === 'string' && stmt.Action === 'iam:PassRole') + ); + expect(passRoleStatement).toBeDefined(); + if (passRoleStatement) { + expect(passRoleStatement.Condition).toBeDefined(); + expect(passRoleStatement.Condition.StringEquals).toBeDefined(); + expect(passRoleStatement.Condition.StringEquals['iam:PassedToService']).toBe( + 'bedrock-agentcore.amazonaws.com' + ); + } + } + }); + + it('should have proper resource tagging and metadata', () => { + const jsonTemplate = template.toJSON(); + + // Check that Solution mapping exists with correct metadata + expect(jsonTemplate.Mappings.Solution.Data.UseCaseName).toBe('MCPServer'); + }); + + it('should have custom resource lambda with correct handler', () => { + // Verify the custom resource lambda exists with the right handler + template.hasResourceProperties('AWS::Lambda::Function', { + Handler: 'lambda_func.handler' + }); + }); + + it('should have DynamoDB permissions with proper conditions', () => { + // Check that DynamoDB permissions include proper conditions for security + const jsonTemplate = template.toJSON(); + const policies = Object.keys(jsonTemplate.Resources || {}) + .filter((key) => jsonTemplate.Resources[key].Type === 'AWS::IAM::Policy') + .map((key) => jsonTemplate.Resources[key]); + + const dynamoPolicy = policies.find((policy) => { + const statements = policy.Properties?.PolicyDocument?.Statement || []; + return statements.some( + (stmt: any) => Array.isArray(stmt.Action) && stmt.Action.includes('dynamodb:GetItem') + ); + }); + + expect(dynamoPolicy).toBeDefined(); + + if (dynamoPolicy) { + const dynamoStatement = dynamoPolicy.Properties.PolicyDocument.Statement.find( + (stmt: any) => Array.isArray(stmt.Action) && stmt.Action.includes('dynamodb:GetItem') + ); + + expect(dynamoStatement.Condition).toBeDefined(); + expect(dynamoStatement.Condition['ForAllValues:StringEquals']).toBeDefined(); + expect(dynamoStatement.Condition['ForAllValues:StringEquals']['dynamodb:LeadingKeys']).toBeDefined(); + } + }); + + it('should not create base stack features when disabled', () => { + // Verify that VPC, UI deployment, and other base features are not created + const jsonTemplate = template.toJSON(); + const resources = Object.keys(jsonTemplate.Resources || {}); + + // Should not have VPC resources + const vpcResources = resources.filter( + (key) => + jsonTemplate.Resources[key].Type === 'AWS::EC2::VPC' || + jsonTemplate.Resources[key].Type === 'AWS::EC2::Subnet' || + jsonTemplate.Resources[key].Type === 'AWS::EC2::InternetGateway' + ); + expect(vpcResources.length).toBe(0); + + // Should not have CloudFront distribution for UI + const cloudFrontResources = resources.filter( + (key) => jsonTemplate.Resources[key].Type === 'AWS::CloudFront::Distribution' + ); + expect(cloudFrontResources.length).toBe(0); + }); + + it('should have MCP Gateway role with correct trust policy', () => { + // Verify the MCP Gateway role has the correct assume role policy + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Version: '2012-10-17', + Statement: [ + { + Effect: 'Allow', + Principal: { + Service: 'bedrock-agentcore.amazonaws.com' + }, + Action: 'sts:AssumeRole' + } + ] + } + }); + }); + + it('should support runtime deployment with ECR image workflow', () => { + const jsonTemplate = template.toJSON(); + + const ecrUriParam = jsonTemplate.Parameters.EcrUri; + expect(ecrUriParam).toBeDefined(); + expect(ecrUriParam.Default).toBe(''); + + // ExecutionRoleArn is provided by AgentExecutionRole component + const executionRoleParam = jsonTemplate.Parameters.ExecutionRoleArn; + expect(executionRoleParam).toBeUndefined(); + + expect(jsonTemplate.Conditions.HasEcrImage).toBeDefined(); + expect(jsonTemplate.Conditions.NoEcrImage).toBeDefined(); + + const runtimeResources = Object.keys(jsonTemplate.Resources || {}).filter( + (key) => jsonTemplate.Resources[key].Type === 'Custom::CreateMCPRuntime' + ); + expect(runtimeResources.length).toBe(1); + + const runtimeResource = jsonTemplate.Resources[runtimeResources[0]]; + expect(runtimeResource.Condition).toBe('HasEcrImage'); + expect(runtimeResource.Properties.Resource).toBe('DEPLOY_MCP_RUNTIME'); + + const gatewayResources = Object.keys(jsonTemplate.Resources || {}).filter( + (key) => jsonTemplate.Resources[key].Type === 'Custom::CreateMCPServer' + ); + expect(gatewayResources.length).toBe(1); + + const gatewayResource = jsonTemplate.Resources[gatewayResources[0]]; + expect(gatewayResource.Condition).toBe('NoEcrImage'); + + // Verify conditional outputs for runtime deployment + expect(jsonTemplate.Outputs.MCPRuntimeArn.Condition).toBe('HasEcrImage'); + expect(jsonTemplate.Outputs.MCPRuntimeExecutionRoleArn.Condition).toBe('HasEcrImage'); + expect(jsonTemplate.Outputs.MCPGatewayArn.Condition).toBe('NoEcrImage'); + expect(jsonTemplate.Outputs.MCPGatewayRoleArn.Condition).toBe('NoEcrImage'); + + const policies = Object.keys(jsonTemplate.Resources || {}) + .filter((key) => jsonTemplate.Resources[key].Type === 'AWS::IAM::Policy') + .map((key) => jsonTemplate.Resources[key]); + + const runtimePolicy = policies.find((policy) => { + const statements = policy.Properties?.PolicyDocument?.Statement || []; + return statements.some( + (stmt: any) => + Array.isArray(stmt.Action) && stmt.Action.includes('bedrock-agentcore:CreateAgentRuntime') + ); + }); + + expect(runtimePolicy).toBeDefined(); + }); + + it('should have proper parameter group organization for both deployment types', () => { + const jsonTemplate = template.toJSON(); + const parameterGroups = jsonTemplate.Metadata['AWS::CloudFormation::Interface'].ParameterGroups; + + // Find MCP Server Configuration group + const mcpGroup = parameterGroups.find((group: any) => group.Label.default === 'MCP Server Configuration'); + + expect(mcpGroup).toBeDefined(); + expect(mcpGroup.Parameters).toContain('S3BucketName'); // For gateway deployments + expect(mcpGroup.Parameters).toContain('EcrUri'); // For runtime deployments + + // Verify the group contains both gateway and runtime parameters + expect(mcpGroup.Parameters.length).toBeGreaterThanOrEqual(2); + }); +}); + +function buildStack(): [Template, MCPServerStack] { + const app = new cdk.App({ + context: rawCdkJson.context + }); + + const solutionID = process.env.SOLUTION_ID ?? app.node.tryGetContext('solution_id'); + const version = process.env.VERSION ?? app.node.tryGetContext('solution_version'); + const solutionName = process.env.SOLUTION_NAME ?? app.node.tryGetContext('solution_name'); + + const stack = new MCPServerStack(app, 'MCPServerStack', { + solutionID: solutionID, + solutionVersion: version, + solutionName: solutionName, + applicationTrademarkName: rawCdkJson.context.application_trademark_name + }); + + const template = Template.fromStack(stack); + return [template, stack]; +} diff --git a/source/infrastructure/test/mock-lambda-func/.gitignore b/source/infrastructure/test/mock-lambda-func/.gitignore index 1160f77c..983d7e2d 100644 --- a/source/infrastructure/test/mock-lambda-func/.gitignore +++ b/source/infrastructure/test/mock-lambda-func/.gitignore @@ -1 +1,5 @@ -infrastructure/* \ No newline at end of file +infrastructure/* +.venv/ +__pycache__/ +*.pyc +node_modules/ \ No newline at end of file diff --git a/source/infrastructure/test/mock-lambda-func/node-lambda/package-lock.json b/source/infrastructure/test/mock-lambda-func/node-lambda/package-lock.json index 58220b6c..f1c3278c 100644 --- a/source/infrastructure/test/mock-lambda-func/node-lambda/package-lock.json +++ b/source/infrastructure/test/mock-lambda-func/node-lambda/package-lock.json @@ -1,12 +1,12 @@ { "name": "@amzn/node-lambda", - "version": "3.0.7", + "version": "4.0.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@amzn/node-lambda", - "version": "3.0.7", + "version": "4.0.0", "license": "Apache-2.0" } } diff --git a/source/infrastructure/test/mock-lambda-func/node-lambda/package.json b/source/infrastructure/test/mock-lambda-func/node-lambda/package.json index 8bd4577d..38c9549f 100644 --- a/source/infrastructure/test/mock-lambda-func/node-lambda/package.json +++ b/source/infrastructure/test/mock-lambda-func/node-lambda/package.json @@ -1,6 +1,6 @@ { "name": "@amzn/node-lambda", - "version": "3.0.7", + "version": "4.0.0", "description": "A mock lambda implementation for CDK infrastructure unit", "main": "index.js", "scripts": { diff --git a/source/infrastructure/test/mock-lambda-func/python-lambda/pyproject.toml b/source/infrastructure/test/mock-lambda-func/python-lambda/pyproject.toml index dc0a503c..b865ddd2 100644 --- a/source/infrastructure/test/mock-lambda-func/python-lambda/pyproject.toml +++ b/source/infrastructure/test/mock-lambda-func/python-lambda/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mock-lambda-function" -version = "3.0.7" +version = "4.0.0" authors = [ "Amazon Web Services" ] description = "Mock lambda implementation to unit test infrastructure code" packages = [ diff --git a/source/infrastructure/test/mock-lambda-func/typescript-lambda/package-lock.json b/source/infrastructure/test/mock-lambda-func/typescript-lambda/package-lock.json index 0f96ddcf..0eac0da3 100644 --- a/source/infrastructure/test/mock-lambda-func/typescript-lambda/package-lock.json +++ b/source/infrastructure/test/mock-lambda-func/typescript-lambda/package-lock.json @@ -1,12 +1,12 @@ { "name": "@amzn/mock-typescript-lambda", - "version": "3.0.7", + "version": "4.0.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@amzn/mock-typescript-lambda", - "version": "3.0.7", + "version": "4.0.0", "license": "Apache-2.0", "dependencies": { "@types/aws-lambda": "^8.10.138", @@ -2463,9 +2463,10 @@ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" }, "node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -5258,9 +5259,9 @@ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" }, "js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "requires": { "argparse": "^1.0.7", "esprima": "^4.0.0" diff --git a/source/infrastructure/test/mock-lambda-func/typescript-lambda/package.json b/source/infrastructure/test/mock-lambda-func/typescript-lambda/package.json index fce9254b..ee7d8070 100644 --- a/source/infrastructure/test/mock-lambda-func/typescript-lambda/package.json +++ b/source/infrastructure/test/mock-lambda-func/typescript-lambda/package.json @@ -1,6 +1,6 @@ { "name": "@amzn/mock-typescript-lambda", - "version": "3.0.7", + "version": "4.0.0", "description": "A mock lambda implementation for CDK infrastructure unit", "main": "index.ts", "scripts": { diff --git a/source/infrastructure/test/mock-ui/package-lock.json b/source/infrastructure/test/mock-ui/package-lock.json index b4f99472..8d477fa0 100644 --- a/source/infrastructure/test/mock-ui/package-lock.json +++ b/source/infrastructure/test/mock-ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "@amzn/mock-react-app", - "version": "3.0.7", + "version": "4.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@amzn/mock-react-app", - "version": "3.0.7", + "version": "4.0.0", "license": "Apache-2.0", "devDependencies": { "@babel/plugin-proposal-private-property-in-object": "^7.21.11", diff --git a/source/infrastructure/test/mock-ui/package.json b/source/infrastructure/test/mock-ui/package.json index 8e595ee3..1fb5aaed 100644 --- a/source/infrastructure/test/mock-ui/package.json +++ b/source/infrastructure/test/mock-ui/package.json @@ -1,6 +1,6 @@ { "name": "@amzn/mock-react-app", - "version": "3.0.7", + "version": "4.0.0", "description": "Mock Reactjs app used for unit testing constructs", "devDependencies": { "@babel/plugin-proposal-private-property-in-object": "^7.21.11", diff --git a/source/infrastructure/test/multimodal/multimodal-setup.test.ts b/source/infrastructure/test/multimodal/multimodal-setup.test.ts new file mode 100644 index 00000000..0ca563a8 --- /dev/null +++ b/source/infrastructure/test/multimodal/multimodal-setup.test.ts @@ -0,0 +1,779 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as api from 'aws-cdk-lib/aws-apigateway'; +import * as lambda from 'aws-cdk-lib/aws-lambda'; +import * as sqs from 'aws-cdk-lib/aws-sqs'; +import * as s3 from 'aws-cdk-lib/aws-s3'; +import { Capture, Match, Template } from 'aws-cdk-lib/assertions'; +import * as rawCdkJson from '../../cdk.json'; + +import { MultimodalSetup } from '../../lib/multimodal/multimodal-setup'; +import { + COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, + LAMBDA_TIMEOUT_MINS, + MULTIMODAL_FILE_EXPIRATION_DAYS, + POWERTOOLS_SERVICE_NAME_ENV_VAR, + MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR, + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR, + StackDeploymentSource +} from '../../lib/utils/constants'; + +describe('When creating MultimodalSetup construct', () => { + let template: Template; + let jsonTemplate: any; + let stack: cdk.Stack; + let multimodalSetup: MultimodalSetup; + + beforeAll(() => { + [template, stack, multimodalSetup] = buildStack(); + jsonTemplate = template.toJSON(); + }); + + describe('Files management and update metadata lambdas', () => { + it('should create files handler lambda function with complete configuration', () => { + const dlqCapture = new Capture(); + const tableNameCapture = new Capture(); + const bucketNameCapture = new Capture(); + + template.hasResourceProperties('AWS::Lambda::Function', { + Description: 'Lambda function backing the REST API for file management operations', + Runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME.name, + Handler: 'index.handler', + Timeout: LAMBDA_TIMEOUT_MINS * 60, + TracingConfig: { + Mode: 'Active' + }, + DeadLetterConfig: { + TargetArn: { + 'Fn::GetAtt': [dlqCapture, 'Arn'] + } + }, + Environment: { + Variables: { + [POWERTOOLS_SERVICE_NAME_ENV_VAR]: 'FILES_MANAGEMENT', + [MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR]: { + Ref: tableNameCapture + }, + [MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR]: { + Ref: bucketNameCapture + } + } + } + }); + + expect(jsonTemplate['Resources'][dlqCapture.asString()]['Type']).toEqual('AWS::SQS::Queue'); // Verify DLQ is an SQS Queue + expect(jsonTemplate['Resources'][tableNameCapture.asString()]['Type']).toEqual('AWS::DynamoDB::Table'); + expect(jsonTemplate['Resources'][bucketNameCapture.asString()]['Type']).toEqual('AWS::S3::Bucket'); + }); + + it('should create update metadata lambda function with complete configuration', () => { + const dlqCapture = new Capture(); + const tableNameCapture = new Capture(); + const bucketNameCapture = new Capture(); + + template.hasResourceProperties('AWS::Lambda::Function', { + Description: 'Lambda function that updates multimodal files metadata when files are uploaded to S3', + Runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME.name, + Handler: 'index.handler', + Timeout: LAMBDA_TIMEOUT_MINS * 60, + TracingConfig: { + Mode: 'Active' + }, + DeadLetterConfig: { + TargetArn: { + 'Fn::GetAtt': [dlqCapture, 'Arn'] + } + }, + Environment: { + Variables: { + POWERTOOLS_SERVICE_NAME: 'FILES_METADATA_MANAGEMENT', + MULTIMODAL_METADATA_TABLE_NAME: { + Ref: tableNameCapture + }, + MULTIMODAL_DATA_BUCKET: { + Ref: bucketNameCapture + } + } + } + }); + + expect(jsonTemplate['Resources'][dlqCapture.asString()]['Type']).toEqual('AWS::SQS::Queue'); // Verify DLQ is an SQS Queue + expect(jsonTemplate['Resources'][tableNameCapture.asString()]['Type']).toEqual('AWS::DynamoDB::Table'); + expect(jsonTemplate['Resources'][bucketNameCapture.asString()]['Type']).toEqual('AWS::S3::Bucket'); + }); + + it('should create lambda function roles with correct permissions', () => { + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: [ + { + Action: 'sts:AssumeRole', + Effect: 'Allow', + Principal: { + Service: 'lambda.amazonaws.com' + } + } + ] + } + }); + }); + + it('should add API Gateway invoke permission to files handler lambda', () => { + const restApiCapture = new Capture(); + const lambdaCapture = new Capture(); + + template.hasResourceProperties('AWS::Lambda::Permission', { + Action: 'lambda:InvokeFunction', + FunctionName: { + 'Fn::GetAtt': [lambdaCapture, 'Arn'] + }, + Principal: 'apigateway.amazonaws.com', + SourceArn: { + 'Fn::Join': [ + '', + [ + 'arn:', + { + Ref: 'AWS::Partition' + }, + ':execute-api:', + { + Ref: 'AWS::Region' + }, + ':', + { + Ref: 'AWS::AccountId' + }, + ':', + { + Ref: restApiCapture + }, + '/*' + ] + ] + } + }); + + expect(jsonTemplate['Resources'][restApiCapture.asString()]['Type']).toEqual('AWS::ApiGateway::RestApi'); + expect(jsonTemplate['Resources'][lambdaCapture.asString()]['Type']).toEqual('AWS::Lambda::Function'); + }); + + it('should add EventBridge invoke permission to update metadata lambda', () => { + const lambdaCapture = new Capture(); + const ruleCapture = new Capture(); + + template.hasResourceProperties('AWS::Lambda::Permission', { + Action: 'lambda:InvokeFunction', + FunctionName: { + 'Fn::GetAtt': [lambdaCapture, 'Arn'] + }, + Principal: 'events.amazonaws.com', + SourceArn: { + 'Fn::GetAtt': [ruleCapture, 'Arn'] + } + }); + + expect(jsonTemplate['Resources'][lambdaCapture.asString()]['Type']).toEqual('AWS::Lambda::Function'); + expect(jsonTemplate['Resources'][ruleCapture.asString()]['Type']).toEqual('AWS::Events::Rule'); + }); + }); + + describe('API Gateway Resources', () => { + it('should create /files resource', () => { + const restApiCapture = new Capture(); + + template.hasResourceProperties('AWS::ApiGateway::Resource', { + ParentId: { + 'Fn::GetAtt': [restApiCapture, 'RootResourceId'] + }, + PathPart: 'files', + RestApiId: { + Ref: restApiCapture + } + }); + + expect(jsonTemplate['Resources'][restApiCapture.asString()]['Type']).toEqual('AWS::ApiGateway::RestApi'); + }); + + it('should create /files/{useCaseId} resource', () => { + const restApiCapture = new Capture(); + const filesResourceCapture = new Capture(); + + template.hasResourceProperties('AWS::ApiGateway::Resource', { + ParentId: { + Ref: filesResourceCapture + }, + PathPart: '{useCaseId}', + RestApiId: { + Ref: restApiCapture + } + }); + + expect(jsonTemplate['Resources'][restApiCapture.asString()]['Type']).toEqual('AWS::ApiGateway::RestApi'); + }); + + it('should create POST method for file uploads', () => { + const filesResourceCapture = new Capture(); + const authorizerCapture = new Capture(); + + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'POST', + OperationName: 'UploadFiles', + ResourceId: { + Ref: filesResourceCapture + }, + AuthorizerId: { + Ref: authorizerCapture + }, + Integration: { + IntegrationHttpMethod: 'POST', + PassthroughBehavior: 'NEVER', + Type: 'AWS_PROXY' + } + }); + + expect(jsonTemplate['Resources'][authorizerCapture.asString()]['Type']).toEqual( + 'AWS::ApiGateway::Authorizer' + ); + }); + + it('should create DELETE method for file deletion', () => { + const filesResourceCapture = new Capture(); + const authorizerCapture = new Capture(); + + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'DELETE', + OperationName: 'DeleteFiles', + ResourceId: { + Ref: filesResourceCapture + }, + AuthorizerId: { + Ref: authorizerCapture + }, + Integration: { + IntegrationHttpMethod: 'POST', + PassthroughBehavior: 'NEVER', + Type: 'AWS_PROXY' + } + }); + }); + + it('should create GET method for file retrieval with query parameters', () => { + const filesResourceCapture = new Capture(); + const authorizerCapture = new Capture(); + + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'GET', + OperationName: 'GetFile', + ResourceId: { + Ref: filesResourceCapture + }, + AuthorizerId: { + Ref: authorizerCapture + }, + RequestParameters: { + 'method.request.querystring.fileName': true, + 'method.request.querystring.conversationId': true, + 'method.request.querystring.messageId': true + }, + Integration: { + IntegrationHttpMethod: 'POST', + PassthroughBehavior: 'NEVER', + Type: 'AWS_PROXY' + } + }); + }); + + it('should configure CORS for all methods', () => { + // Verify OPTIONS method exists for CORS + template.hasResourceProperties('AWS::ApiGateway::Method', { + HttpMethod: 'OPTIONS' + }); + }); + }); + + describe('API Models', () => { + it('should create upload request model', () => { + template.hasResourceProperties('AWS::ApiGateway::Model', { + Description: 'Defines the required JSON structure for file upload requests', + ContentType: 'application/json' + }); + }); + + it('should create upload response model', () => { + template.hasResourceProperties('AWS::ApiGateway::Model', { + Description: 'Response model for file upload operations', + ContentType: 'application/json' + }); + }); + + it('should create delete request model', () => { + template.hasResourceProperties('AWS::ApiGateway::Model', { + Description: 'Defines the required JSON structure for file deletion requests', + ContentType: 'application/json' + }); + }); + + it('should create delete response model', () => { + template.hasResourceProperties('AWS::ApiGateway::Model', { + Description: 'Response model for file deletion operations', + ContentType: 'application/json' + }); + }); + + it('should create get response model', () => { + template.hasResourceProperties('AWS::ApiGateway::Model', { + Description: 'Response model for file retrieval operations', + ContentType: 'application/json' + }); + }); + }); + + describe('DynamoDB Table Creation', () => { + it('should create files metadata table with correct properties', () => { + template.hasResourceProperties('AWS::DynamoDB::Table', { + BillingMode: 'PAY_PER_REQUEST', + AttributeDefinitions: [ + { + AttributeName: 'fileKey', + AttributeType: 'S' + }, + { + AttributeName: 'fileName', + AttributeType: 'S' + } + ], + KeySchema: [ + { + AttributeName: 'fileKey', + KeyType: 'HASH' + }, + { + AttributeName: 'fileName', + KeyType: 'RANGE' + } + ], + TimeToLiveSpecification: { + AttributeName: 'ttl', + Enabled: true + }, + SSESpecification: { + SSEEnabled: true + } + }); + }); + }); + + describe('Multimodal S3 Bucket Creation', () => { + it('should create multimodal data bucket with complete configuration', () => { + template.hasResourceProperties('AWS::S3::Bucket', { + BucketEncryption: { + ServerSideEncryptionConfiguration: [ + { + ServerSideEncryptionByDefault: { + SSEAlgorithm: 'AES256' + } + } + ] + }, + PublicAccessBlockConfiguration: { + BlockPublicAcls: true, + BlockPublicPolicy: true, + IgnorePublicAcls: true, + RestrictPublicBuckets: true + }, + LifecycleConfiguration: { + Rules: [ + { + Id: 'DeleteFilesAfter48Hours', + Status: 'Enabled', + ExpirationInDays: MULTIMODAL_FILE_EXPIRATION_DAYS + } + ] + }, + CorsConfiguration: { + CorsRules: [ + { + AllowedMethods: ['POST'], + AllowedOrigins: ['*'], + AllowedHeaders: ['*'], + MaxAge: 3600 + } + ] + } + }); + }); + + it('should configure EventBridge rule for S3 object created events', () => { + const bucketNameCapture = new Capture(); + + template.hasResourceProperties('AWS::Events::Rule', { + EventPattern: { + source: ['aws.s3'], + 'detail-type': ['Object Created'], + detail: { + bucket: { + name: [ + { + Ref: bucketNameCapture + } + ] + } + } + }, + Description: 'Trigger metadata update when files are uploaded to multimodal bucket' + }); + + expect(jsonTemplate['Resources'][bucketNameCapture.asString()]['Type']).toEqual('AWS::S3::Bucket'); + }); + + it('should create custom resource for S3 bucket notifications', () => { + const bucketNameCapture = new Capture(); + + template.hasResourceProperties('Custom::MultimodalBucketNotifications', { + Resource: 'MULTIMODAL_BUCKET_NOTIFICATIONS', + MULTIMODAL_DATA_BUCKET: { + Ref: bucketNameCapture + } + }); + + expect(jsonTemplate['Resources'][bucketNameCapture.asString()]['Type']).toEqual('AWS::S3::Bucket'); + }); + + it('should grant custom resource lambda permissions for S3 notifications', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Effect: 'Allow', + Action: ['s3:PutBucketNotification', 's3:PutBucketNotificationConfiguration'], + Resource: { + 'Fn::GetAtt': Match.arrayWith([Match.stringLikeRegexp('.*'), 'Arn']) + } + } + ] + }, + PolicyName: Match.stringLikeRegexp('.*CustomResourceS3EventsNotificationsPolicy.*') + }); + }); + }); + + describe('IAM Permissions', () => { + it('should grant DynamoDB permissions to files handler lambda', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Action: [ + 'dynamodb:BatchGetItem', + 'dynamodb:BatchWriteItem', + 'dynamodb:ConditionCheckItem', + 'dynamodb:DeleteItem', + 'dynamodb:DescribeTable', + 'dynamodb:GetItem', + 'dynamodb:GetRecords', + 'dynamodb:GetShardIterator', + 'dynamodb:PutItem', + 'dynamodb:Query', + 'dynamodb:Scan', + 'dynamodb:UpdateItem' + ], + Effect: 'Allow', + Resource: Match.arrayWith([ + { + 'Fn::GetAtt': Match.arrayWith([ + Match.stringLikeRegexp('.*MultimodalDataMetadataTable.*'), + 'Arn' + ]) + } + ]) + } + ]) + }, + PolicyName: Match.stringLikeRegexp('.*FilesManagementLambdaRoleDefaultPolicy.*') + }); + }); + + it('should grant DynamoDB read/write permissions to update metadata lambda', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Action: Match.arrayWith([ + 'dynamodb:BatchGetItem', + 'dynamodb:BatchWriteItem', + 'dynamodb:ConditionCheckItem', + 'dynamodb:DeleteItem', + 'dynamodb:DescribeTable', + 'dynamodb:GetItem', + 'dynamodb:GetRecords', + 'dynamodb:GetShardIterator', + 'dynamodb:PutItem', + 'dynamodb:Query', + 'dynamodb:Scan', + 'dynamodb:UpdateItem' + ]), + Effect: 'Allow', + Resource: Match.arrayWith([ + { + 'Fn::GetAtt': Match.arrayWith([ + Match.stringLikeRegexp('.*MultimodalDataMetadataTable.*'), + 'Arn' + ]) + } + ]) + } + ]) + }, + PolicyName: Match.stringLikeRegexp('.*UpdateFilesMetadataLambdaRoleDefaultPolicy.*') + }); + }); + + it('should grant S3 read/write permissions to files handler lambda', () => { + // Verify S3 read/write actions and that resources include both bucket ARN and bucket ARN/* + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Action: Match.arrayWith([ + 's3:Abort*', + 's3:DeleteObject*', + 's3:GetBucket*', + 's3:GetObject*', + 's3:List*', + 's3:PutObject' + ]), + Effect: 'Allow', + Resource: Match.arrayWith([ + // Bucket ARN + Match.objectLike({ + 'Fn::GetAtt': Match.arrayWith([Match.stringLikeRegexp('.*'), 'Arn']) + }), + // Bucket ARN with /* + Match.objectLike({ + 'Fn::Join': Match.arrayWith([ + Match.stringLikeRegexp('.*'), + Match.arrayWith([Match.objectLike({}), '/*']) + ]) + }) + ]) + }) + ]) + }, + PolicyName: Match.stringLikeRegexp('.*FilesManagementLambda.*') + }); + }); + + it('should grant S3 read permissions to update metadata lambda', () => { + // Verify S3 read actions and that resources include both bucket ARN and bucket ARN/* + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Action: Match.arrayWith(['s3:GetBucket*', 's3:GetObject*', 's3:List*']), + Effect: 'Allow', + Resource: Match.arrayWith([ + // Bucket ARN + Match.objectLike({ + 'Fn::GetAtt': Match.arrayWith([Match.stringLikeRegexp('.*'), 'Arn']) + }), + // Bucket ARN with /* + Match.objectLike({ + 'Fn::Join': Match.arrayWith([ + Match.stringLikeRegexp('.*'), + Match.arrayWith([Match.objectLike({}), '/*']) + ]) + }) + ]) + }) + ]) + }, + PolicyName: Match.stringLikeRegexp('.*UpdateFilesMetadataLambda.*') + }); + }); + + it('should grant X-Ray tracing permissions to lambda functions', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Action: ['xray:PutTelemetryRecords', 'xray:PutTraceSegments'], + Effect: 'Allow', + Resource: '*' + } + ]) + } + }); + }); + + it('should grant SQS permissions for dead letter queue to files handler lambda', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Action: 'sqs:SendMessage', + Effect: 'Allow', + Resource: { + 'Fn::GetAtt': Match.arrayWith([Match.stringLikeRegexp('.*DLQ.*'), 'Arn']) + } + } + ]) + }, + PolicyName: Match.stringLikeRegexp('.*FilesManagementLambdaRoleDefaultPolicy.*') + }); + }); + + it('should grant SQS permissions for dead letter queue to update metadata lambda', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Action: 'sqs:SendMessage', + Effect: 'Allow', + Resource: { + 'Fn::GetAtt': Match.arrayWith([Match.stringLikeRegexp('.*DLQ.*'), 'Arn']) + } + } + ]) + }, + PolicyName: Match.stringLikeRegexp('.*UpdateFilesMetadataLambdaRoleDefaultPolicy.*') + }); + }); + }); + + describe('Resource Counts', () => { + it('should create expected number of resources', () => { + // Lambda functions: FilesHandler + UpdateMetadata + 1 mock authorizer + 1 custom resource + template.resourceCountIs('AWS::Lambda::Function', 4); + + // API Gateway resources + template.resourceCountIs('AWS::ApiGateway::Resource', 2); // /files and /files/{useCaseId} resources + template.resourceCountIs('AWS::ApiGateway::Method', 4); // POST, DELETE, GET, OPTIONS + template.resourceCountIs('AWS::ApiGateway::Model', 5); // Upload req/res, Delete req/res, Get res + + // IAM resources (FilesHandler role + UpdateMetadata role + 1 mock authorizer + 1 custom resource) + template.resourceCountIs('AWS::IAM::Role', 4); + + // DynamoDB table + template.resourceCountIs('AWS::DynamoDB::Table', 1); + + // S3 buckets (multimodal data bucket + mock access logging buckets) + template.resourceCountIs('AWS::S3::Bucket', 3); + + // EventBridge rule for S3 events + template.resourceCountIs('AWS::Events::Rule', 1); + + // Custom resource for bucket notifications + template.resourceCountIs('Custom::MultimodalBucketNotifications', 1); + + // Lambda permissions: API Gateway + EventBridge + mock authorizer + const permissionCount = template.findResources('AWS::Lambda::Permission'); + expect(Object.keys(permissionCount).length).toBeGreaterThanOrEqual(2); + }); + }); + + describe('Resource condition application', () => { + it('should have applyConditionToAllResources method available', () => { + const condition = new cdk.CfnCondition(stack, 'TestCondition', { + expression: cdk.Fn.conditionEquals('true', 'true') + }); + + expect(() => { + multimodalSetup.applyConditionToAllResources(condition); + }).not.toThrow(); + }); + }); +}); + +interface BuildStackOptions { + useEmptyParams?: boolean; + addAssetBucketContext?: boolean; +} + +function buildStack(options: BuildStackOptions = {}): [Template, cdk.Stack, MultimodalSetup] { + const { useEmptyParams = false, addAssetBucketContext = false } = options; + + const context = { ...rawCdkJson.context }; + if (addAssetBucketContext) { + context['cdk-asset-bucket'] = 'asset-bucket'; + } + + const app = new cdk.App({ + context: context + }); + const stack = new cdk.Stack(app, 'TestStack'); + + const mockLambdaFuncProps = { + code: lambda.Code.fromAsset('../infrastructure/test/mock-lambda-func/node-lambda'), + runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, + handler: 'index.handler' + }; + + // Create required dependencies for MultimodalSetup + const restApiProps = useEmptyParams + ? {} + : { + description: 'Test REST API for multimodal setup', + endpointConfiguration: { + types: [api.EndpointType.EDGE] + } + }; + + const restApi = new api.RestApi(stack, 'TestRestApi', restApiProps); + + const deploymentPlatformAuthorizer = new api.RequestAuthorizer(stack, 'TestAuthorizer', { + handler: new lambda.Function(stack, 'MockAuthorizerFunction', mockLambdaFuncProps), + identitySources: [api.IdentitySource.header('Authorization')], + resultsCacheTtl: cdk.Duration.seconds(0) + }); + + const requestValidator = new api.RequestValidator(stack, 'TestRequestValidator', { + restApi: restApi, + validateRequestBody: true, + validateRequestParameters: true + }); + + const dlqProps = useEmptyParams ? {} : { queueName: 'test-dlq' }; + const dlq = new sqs.Queue(stack, 'TestDLQ', dlqProps); + + const deployVPCCondition = new cdk.CfnCondition(stack, 'DeployVPCCondition', { + expression: cdk.Fn.conditionEquals('true', 'false') + }); + + const customResourceLambda = new lambda.Function(stack, 'MockCustomResourceLambda', mockLambdaFuncProps); + + const bucketName = useEmptyParams ? 'mock-access-logging-bucket-error' : 'mock-access-logging-bucket'; + const accessLoggingS3Bucket = new s3.Bucket(stack, 'MockAccessLoggingBucket', { + bucketName: bucketName, + removalPolicy: cdk.RemovalPolicy.DESTROY + }); + + const privateSubnetIds = useEmptyParams ? '' : 'subnet-12345,subnet-67890'; + const securityGroupIds = useEmptyParams ? '' : 'sg-12345,sg-67890'; + + const multimodalSetup = new MultimodalSetup(stack, 'TestMultimodalSetup', { + restApi: restApi, + deploymentPlatformAuthorizer: deploymentPlatformAuthorizer, + requestValidator: requestValidator, + dlq: dlq, + deployVPCCondition: deployVPCCondition, + privateSubnetIds: privateSubnetIds, + securityGroupIds: securityGroupIds, + customResourceLambdaArn: customResourceLambda.functionArn, + customResourceLambdaRoleArn: customResourceLambda.role!.roleArn, + accessLoggingS3Bucket: accessLoggingS3Bucket, + stackSource: StackDeploymentSource.DEPLOYMENT_PLATFORM + }); + + const template = Template.fromStack(stack); + return [template, stack, multimodalSetup]; +} + +describe('Error handling and edge cases', () => { + it('should handle missing optional parameters gracefully', () => { + const [template] = buildStack({ useEmptyParams: true, addAssetBucketContext: true }); + expect(template).toBeDefined(); + }); +}); diff --git a/source/infrastructure/test/storage/chat-storage-stack.test.ts b/source/infrastructure/test/storage/chat-storage-stack.test.ts index ec779b0b..cae77932 100644 --- a/source/infrastructure/test/storage/chat-storage-stack.test.ts +++ b/source/infrastructure/test/storage/chat-storage-stack.test.ts @@ -29,7 +29,7 @@ describe('When creating the nested stack for chat storage with useCaseType as Te template = Template.fromStack(nestedStack); }); - it('should template to have following parameters', () => { + it('template should have the following parameters', () => { template.hasParameter('ConversationTableName', { Type: 'String', MaxLength: 255, diff --git a/source/infrastructure/test/storage/deployment-platform-storage-setup.test.ts b/source/infrastructure/test/storage/deployment-platform-storage-setup.test.ts index 842c13d3..0aa4dfad 100644 --- a/source/infrastructure/test/storage/deployment-platform-storage-setup.test.ts +++ b/source/infrastructure/test/storage/deployment-platform-storage-setup.test.ts @@ -42,18 +42,25 @@ describe('When creating the use case storage construct', () => { accessLoggingBucket: new s3.Bucket(stack, 'fakelogggingbucket') }); - deploymentPlatform.addLambdaDependencies({ - feedbackApiLambda: feedbackLambda, - modelInfoApiLambda: modelInfoLambda, - deploymentApiLambda: deploymentLambda - }); + const mcpManagementLambda = new lambda.Function(stack, 'mcpManagementLambda', mockLambdaFuncProps); + const agentManagementLambda = new lambda.Function(stack, 'agentManagementLambda', mockLambdaFuncProps); + const workflowManagementLambda = new lambda.Function(stack, 'workflowManagementLambda', mockLambdaFuncProps); + const filesManagementLambda = new lambda.Function(stack, 'filesManagementLambda', mockLambdaFuncProps); + + deploymentPlatform.configureDeploymentApiLambda(deploymentLambda); + deploymentPlatform.configureModelInfoApiLambda(modelInfoLambda); + deploymentPlatform.configureFeedbackApiLambda(feedbackLambda); + deploymentPlatform.configureFilesHandlerLambda(filesManagementLambda); + deploymentPlatform.configureUseCaseManagementApiLambda(mcpManagementLambda, 'MCP'); + deploymentPlatform.configureUseCaseManagementApiLambda(agentManagementLambda, 'Agent'); + deploymentPlatform.configureUseCaseManagementApiLambda(workflowManagementLambda, 'Workflow'); template = Template.fromStack(stack); }); it('has the correct resources', () => { - // use case mgmt, model info, feedback, custom resource - template.resourceCountIs('AWS::Lambda::Function', 4); + // deployment, model info, feedback, mcp management, agent management, workflow management, files metadata, custom resource + template.resourceCountIs('AWS::Lambda::Function', 8); }); it('deployment platform api lambda is properly configured to access dynamodb with environment variables', () => { @@ -193,4 +200,145 @@ describe('When creating the use case storage construct', () => { 'PolicyName': Match.stringLikeRegexp('feedbackApiLambdaServiceRoleDefaultPolicy*') }); }); + + it('mcp management api lambda is properly configured to access dynamodb with environment variables', () => { + // Check Lambda function properties + template.hasResourceProperties('AWS::Lambda::Function', { + 'Handler': 'index.handler', + 'Role': { + 'Fn::GetAtt': [Match.stringLikeRegexp('mcpManagementLambdaServiceRole*'), 'Arn'] + }, + 'Runtime': 'nodejs22.x', + 'Environment': { + 'Variables': { + [USE_CASES_TABLE_NAME_ENV_VAR]: { + 'Fn::GetAtt': [ + Match.stringLikeRegexp( + 'TestSetupDeploymentPlatformStorageNestedStackDeploymentPlatformStorageNestedStackResource*' + ), + Match.stringLikeRegexp('Outputs.TestStackTestSetupDeploymentPlatformStorageUseCasesTable*') + ] + }, + [USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]: { + 'Fn::GetAtt': [ + Match.stringLikeRegexp( + 'TestSetupDeploymentPlatformStorageNestedStackDeploymentPlatformStorageNestedStackResource*' + ), + Match.stringLikeRegexp('Outputs.TestStackTestSetupDeploymentPlatformStorageLLMConfigTable*') + ] + } + } + } + }); + + // Check DynamoDB policy for MCP management Lambda + template.hasResourceProperties('AWS::IAM::Policy', { + 'PolicyDocument': { + 'Statement': [ + { + 'Action': [ + 'dynamodb:Batch*', + 'dynamodb:ConditionCheckItem', + 'dynamodb:DeleteItem', + 'dynamodb:Get*', + 'dynamodb:PutItem', + 'dynamodb:Query', + 'dynamodb:Scan', + 'dynamodb:UpdateItem' + ], + 'Effect': 'Allow', + 'Resource': [ + { + 'Fn::GetAtt': [ + Match.stringLikeRegexp( + 'TestSetupDeploymentPlatformStorageNestedStackDeploymentPlatformStorageNestedStackResource*' + ), + Match.stringLikeRegexp( + 'Outputs.TestStackTestSetupDeploymentPlatformStorageUseCasesTable*' + ) + ] + }, + { + 'Fn::GetAtt': [ + Match.stringLikeRegexp( + 'TestSetupDeploymentPlatformStorageNestedStackDeploymentPlatformStorageNestedStackResource*' + ), + Match.stringLikeRegexp( + 'Outputs.TestStackTestSetupDeploymentPlatformStorageLLMConfigTable*' + ) + ] + } + ] + } + ], + 'Version': '2012-10-17' + }, + 'PolicyName': Match.stringLikeRegexp('MCPManagementDDBPolicy*') + }); + }); + + it('files handler lambda is properly configured to access LLM config with environment variables', () => { + template.hasResourceProperties('AWS::Lambda::Function', { + 'Handler': 'index.handler', + 'Role': { + 'Fn::GetAtt': [Match.stringLikeRegexp('filesManagementLambdaServiceRole*'), 'Arn'] + }, + 'Runtime': 'nodejs22.x', + 'Environment': { + 'Variables': { + [USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]: { + 'Fn::GetAtt': [ + Match.stringLikeRegexp( + 'TestSetupDeploymentPlatformStorageNestedStackDeploymentPlatformStorageNestedStackResource*' + ), + Match.stringLikeRegexp('Outputs.TestStackTestSetupDeploymentPlatformStorageLLMConfigTable*') + ] + }, + [USE_CASES_TABLE_NAME_ENV_VAR]: { + 'Fn::GetAtt': [ + Match.stringLikeRegexp( + 'TestSetupDeploymentPlatformStorageNestedStackDeploymentPlatformStorageNestedStackResource*' + ), + Match.stringLikeRegexp('Outputs.TestStackTestSetupDeploymentPlatformStorageUseCasesTable*') + ] + } + } + } + }); + + template.hasResourceProperties('AWS::IAM::Policy', { + 'PolicyDocument': { + 'Statement': [ + { + 'Action': ['dynamodb:GetItem', 'dynamodb:Query'], + 'Effect': 'Allow', + 'Resource': [ + { + 'Fn::GetAtt': [ + Match.stringLikeRegexp( + 'TestSetupDeploymentPlatformStorageNestedStackDeploymentPlatformStorageNestedStackResource*' + ), + Match.stringLikeRegexp( + 'Outputs.TestStackTestSetupDeploymentPlatformStorageLLMConfigTable*' + ) + ] + }, + { + 'Fn::GetAtt': [ + Match.stringLikeRegexp( + 'TestSetupDeploymentPlatformStorageNestedStackDeploymentPlatformStorageNestedStackResource*' + ), + Match.stringLikeRegexp( + 'Outputs.TestStackTestSetupDeploymentPlatformStorageUseCasesTable*' + ) + ] + } + ] + } + ], + 'Version': '2012-10-17' + }, + 'PolicyName': Match.stringLikeRegexp('filesManagementLambdaServiceRoleDefaultPolicy*') + }); + }); }); diff --git a/source/infrastructure/test/storage/deployment-platform-storage-stack.test.ts b/source/infrastructure/test/storage/deployment-platform-storage-stack.test.ts index f0895ee7..66ba2253 100644 --- a/source/infrastructure/test/storage/deployment-platform-storage-stack.test.ts +++ b/source/infrastructure/test/storage/deployment-platform-storage-stack.test.ts @@ -28,7 +28,7 @@ describe('When creating the nested stack for chat storage', () => { expect(template).not.toBe(undefined); }); - it('should create 2 dynamoDB tables', () => { + it('should create 3 dynamoDB tables', () => { template.resourceCountIs('AWS::DynamoDB::Table', 3); template.hasResource('AWS::DynamoDB::Table', { diff --git a/source/infrastructure/test/use-case-management/cfn-deploy-role-factory.test.ts b/source/infrastructure/test/use-case-management/cfn-deploy-role-factory.test.ts new file mode 100644 index 00000000..e1247ec7 --- /dev/null +++ b/source/infrastructure/test/use-case-management/cfn-deploy-role-factory.test.ts @@ -0,0 +1,391 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as iam from 'aws-cdk-lib/aws-iam'; +import { Template, Match } from 'aws-cdk-lib/assertions'; +import { + createCfnDeployRole, + + CfnDeployRoleConfig +} from '../../lib/use-case-management/cfn-deploy-role-factory'; + +describe('CFN Deploy Role Factory', () => { + let app: cdk.App; + let stack: cdk.Stack; + let lambdaRole: iam.Role; + let template: Template; + + beforeEach(() => { + app = new cdk.App(); + stack = new cdk.Stack(app, 'TestStack'); + + // Create a mock lambda role for testing + lambdaRole = new iam.Role(stack, 'TestLambdaRole', { + assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com') + }); + }); + + describe('createCfnDeployRole', () => { + it('should create a CFN deploy role with default configuration', () => { + const role = createCfnDeployRole(stack, 'TestCfnRole', lambdaRole); + template = Template.fromStack(stack); + + // Verify role creation + expect(role).toBeInstanceOf(iam.Role); + + // Verify role has CloudFormation service principal + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Effect: 'Allow', + Principal: { + Service: Match.anyValue() + }, + Action: 'sts:AssumeRole' + }) + ]) + } + }); + + // Verify inline policy exists + template.hasResourceProperties('AWS::IAM::Role', { + Policies: Match.arrayWith([ + Match.objectLike({ + PolicyName: 'CfnDeployPolicy' + }) + ]) + }); + }); + + it('should create role with VPC permissions when includeVpcPermissions is true', () => { + const config: CfnDeployRoleConfig = { + includeVpcPermissions: true, + includeKendraPermissions: false, + includeEcrPermissions: false + }; + + createCfnDeployRole(stack, 'TestCfnRole', lambdaRole, config); + template = Template.fromStack(stack); + + // Verify VPC policy is created - check for specific VPC actions that exist + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Effect: 'Allow', + Action: Match.arrayWith(['ec2:createVPC*']) + }) + ]) + } + }); + }); + + it('should create role with Kendra permissions when includeKendraPermissions is true', () => { + const config: CfnDeployRoleConfig = { + includeVpcPermissions: false, + includeKendraPermissions: true, + includeEcrPermissions: false + }; + + createCfnDeployRole(stack, 'TestCfnRole', lambdaRole, config); + template = Template.fromStack(stack); + + // Verify Kendra policy is created - CreateIndex is a single action string + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Effect: 'Allow', + Action: 'kendra:CreateIndex' + }) + ]) + } + }); + }); + + it('should create role with ECR permissions when includeEcrPermissions is true', () => { + const config: CfnDeployRoleConfig = { + includeVpcPermissions: false, + includeKendraPermissions: false, + includeEcrPermissions: true + }; + + createCfnDeployRole(stack, 'TestCfnRole', lambdaRole, config); + template = Template.fromStack(stack); + + // Verify ECR policy is created - check for pull-through cache rule action + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Effect: 'Allow', + Action: Match.arrayWith(['ecr:CreatePullThroughCacheRule']) + }) + ]) + } + }); + }); + + it('should include additional pass role services in core policy', () => { + const config: CfnDeployRoleConfig = { + additionalPassRoleServices: ['bedrock-agentcore.amazonaws.com', 'custom-service.amazonaws.com'] + }; + + createCfnDeployRole(stack, 'TestCfnRole', lambdaRole, config); + template = Template.fromStack(stack); + + // Verify additional services are included in pass role policy + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Effect: 'Allow', + Action: 'iam:PassRole', + Condition: { + 'ForAllValues:StringEquals': { + 'aws:TagKeys': ['createdVia', 'userId', 'Name'] + }, + 'StringEquals': { + 'iam:PassedToService': Match.arrayWith([ + 'lambda.amazonaws.com', + 'apigateway.amazonaws.com', + 'cloudformation.amazonaws.com', + 'bedrock-agentcore.amazonaws.com', + 'custom-service.amazonaws.com' + ]) + } + } + }) + ]) + } + }); + }); + + it('should attach policies to both CFN role and lambda role', () => { + createCfnDeployRole(stack, 'TestCfnRole', lambdaRole); + template = Template.fromStack(stack); + + // Verify multiple policies are created + const policies = template.findResources('AWS::IAM::Policy'); + expect(Object.keys(policies).length).toBeGreaterThan(0); + + // Verify multiple roles exist + const roles = template.findResources('AWS::IAM::Role'); + expect(Object.keys(roles).length).toBeGreaterThanOrEqual(2); // Lambda role + CFN role + }); + }); + + describe('Core Policy Permissions', () => { + beforeEach(() => { + createCfnDeployRole(stack, 'TestCfnRole', lambdaRole); + template = Template.fromStack(stack); + }); + + it('should include CloudFormation stack operations', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Effect: 'Allow', + Action: Match.arrayWith(['cloudformation:CreateStack', 'cloudformation:UpdateStack']) + }) + ]) + } + }); + }); + + it('should include IAM role management permissions', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Effect: 'Allow', + Action: Match.arrayWith(['iam:CreateRole', 'iam:GetRole']) + }) + ]) + } + }); + }); + + it('should include Lambda function management permissions', () => { + // Verify that Lambda permissions exist in the core policy + const policies = template.findResources('AWS::IAM::Policy'); + const corePolicyStatements = Object.values(policies) + .filter((policy: any) => policy.Properties?.PolicyName?.includes('CorePolicy')) + .flatMap((policy: any) => policy.Properties?.PolicyDocument?.Statement || []); + + // Check that Lambda permissions are present + const hasLambdaCreateFunction = corePolicyStatements.some((statement: any) => { + const actions = Array.isArray(statement.Action) ? statement.Action : [statement.Action]; + return actions.some((action: string) => action === 'lambda:CreateFunction'); + }); + + const hasLambdaAliasPermissions = corePolicyStatements.some((statement: any) => { + const actions = Array.isArray(statement.Action) ? statement.Action : [statement.Action]; + return actions.some((action: string) => action === 'lambda:*Alias*'); + }); + + const hasLambdaProvisionedConcurrency = corePolicyStatements.some((statement: any) => { + const actions = Array.isArray(statement.Action) ? statement.Action : [statement.Action]; + return actions.some((action: string) => action === 'lambda:*ProvisionedConcurrency*'); + }); + + expect(hasLambdaCreateFunction).toBe(true); + expect(hasLambdaAliasPermissions).toBe(true); + expect(hasLambdaProvisionedConcurrency).toBe(true); + }); + + it('should include S3 bucket management permissions', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Effect: 'Allow', + Action: Match.arrayWith(['s3:CreateBucket']) + }) + ]) + } + }); + }); + + it('should include API Gateway permissions', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Effect: 'Allow', + Action: Match.arrayWith(['apigateway:CreateRestApi']) + }) + ]) + } + }); + }); + + it('should include Cognito permissions', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Effect: 'Allow', + Action: Match.arrayWith(['cognito-idp:CreateUserPool*']) + }) + ]) + } + }); + }); + + it('should include proper resource ARN patterns', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Resource: Match.anyValue() // Just verify resources exist + }) + ]) + } + }); + }); + + it('should include proper conditions for security', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Condition: { + 'ForAllValues:StringEquals': { + 'aws:TagKeys': ['createdVia', 'userId'] + } + } + }) + ]) + } + }); + }); + }); + + describe('Error Handling', () => { + it('should handle empty additional services array', () => { + const config: CfnDeployRoleConfig = { + additionalPassRoleServices: [] + }; + + expect(() => { + createCfnDeployRole(stack, 'TestCfnRole', lambdaRole, config); + }).not.toThrow(); + }); + + it('should handle undefined config', () => { + expect(() => { + createCfnDeployRole(stack, 'TestCfnRole', lambdaRole, undefined); + }).not.toThrow(); + }); + + it('should handle custom role name', () => { + const config: CfnDeployRoleConfig = { + roleName: 'CustomRoleName' + }; + + const role = createCfnDeployRole(stack, 'TestCfnRole', lambdaRole, config); + expect(role).toBeInstanceOf(iam.Role); + }); + }); + + describe('Policy Attachment Verification', () => { + it('should attach core policy to both lambda and CFN roles', () => { + const config: CfnDeployRoleConfig = { + includeVpcPermissions: false, + includeKendraPermissions: false, + includeEcrPermissions: false + }; + + createCfnDeployRole(stack, 'TestCfnRole', lambdaRole, config); + template = Template.fromStack(stack); + + // Verify multiple policies are created + const policies = template.findResources('AWS::IAM::Policy'); + expect(Object.keys(policies).length).toBeGreaterThan(0); + }); + + it('should create VPC policy when VPC permissions are enabled', () => { + const config: CfnDeployRoleConfig = { + includeVpcPermissions: true, + includeKendraPermissions: false, + includeEcrPermissions: false + }; + + createCfnDeployRole(stack, 'TestCfnRole', lambdaRole, config); + template = Template.fromStack(stack); + + // Should have VPC-related policy + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Action: Match.arrayWith(['ec2:createVPC*']) + }) + ]) + } + }); + }); + + it('should verify role has correct assume role policy structure', () => { + const role = createCfnDeployRole(stack, 'TestCfnRole', lambdaRole); + template = Template.fromStack(stack); + + // Verify the role exists and has the right structure + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Version: '2012-10-17', + Statement: Match.arrayWith([ + Match.objectLike({ + Effect: 'Allow', + Action: 'sts:AssumeRole' + }) + ]) + } + }); + }); + }); +}); diff --git a/source/infrastructure/test/use-case-management/management-stack.test.ts b/source/infrastructure/test/use-case-management/management-stack.test.ts index 5354214f..21a5af29 100644 --- a/source/infrastructure/test/use-case-management/management-stack.test.ts +++ b/source/infrastructure/test/use-case-management/management-stack.test.ts @@ -11,11 +11,16 @@ import { CLIENT_ID_ENV_VAR, COGNITO_POLICY_TABLE_ENV_VAR, COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME, + DEPLOYMENT_PLATFORM_STACK_NAME_ENV_VAR, + GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR, IS_INTERNAL_USER_ENV_VAR, + MCP_INACTIVE_SCHEMA_EXPIRATION_DAYS, OPTIONAL_EMAIL_REGEX_PATTERN, POWERTOOLS_METRICS_NAMESPACE_ENV_VAR, USER_POOL_ID_ENV_VAR, - WEBCONFIG_SSM_KEY_ENV_VAR + WEBCONFIG_SSM_KEY_ENV_VAR, + MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR, + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR } from '../../lib/utils/constants'; describe('When creating a use case management Stack', () => { @@ -209,7 +214,7 @@ describe('When creating a use case management Stack', () => { it('should have a lambda function with environment variables', () => { template.hasResourceProperties('AWS::Lambda::Function', { Code: Match.anyValue(), - Handler: 'index.handler', + Handler: 'use-case-handler.handler', Runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME.name, Role: { 'Fn::GetAtt': [lambdaRoleCapture, 'Arn'] @@ -362,757 +367,183 @@ describe('When creating a use case management Stack', () => { Version: '2012-10-17' }, PolicyName: Match.stringLikeRegexp('UseCaseConfigAccess*'), - Roles: [ + Roles: Match.arrayWith([ { Ref: Match.stringLikeRegexp('UCMLRole*') } - ] + ]) }); }); - it('should have policies for cloudformation deployment that so that it can create, update, and delete stacks', () => { - template.hasResourceProperties('AWS::IAM::Policy', { - PolicyDocument: { - Statement: [ - { - Action: ['cloudformation:CreateStack', 'cloudformation:UpdateStack'], - Condition: { - 'ForAllValues:StringEquals': { - 'aws:TagKeys': ['createdVia', 'userId'] - }, - StringLike: { - 'cloudformation:TemplateUrl': [Match.anyValue(), Match.anyValue()] - } - }, - Effect: 'Allow', - Resource: { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':cloudformation:', - { - Ref: 'AWS::Region' - }, - ':', - { - Ref: 'AWS::AccountId' - }, - ':stack/*' - ] - ] - } - }, - { - Action: [ - 'cloudformation:DeleteStack', - 'cloudformation:DescribeStack*', - 'cloudformation:ListStacks' - ], - Effect: 'Allow', - Resource: { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':cloudformation:', - { - Ref: 'AWS::Region' - }, - ':', - { - Ref: 'AWS::AccountId' - }, - ':stack/*' - ] - ] - } - }, - { - Action: [ - 'iam:*tRolePolicy', - 'iam:CreateRole', - 'iam:DeleteRole*', - 'iam:DetachRolePolicy', - 'iam:GetRole', - 'iam:ListRoleTags', - 'iam:TagRole', - 'iam:UpdateAssumeRolePolicy' - ], - Condition: { - 'ForAllValues:StringEquals': { - 'aws:TagKeys': ['createdVia', 'userId', 'Name'] - } - }, - Effect: 'Allow', - Resource: [ - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':iam::', - { - Ref: 'AWS::AccountId' - }, - ':policy/*' - ] - ] - }, - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':iam::', - { - Ref: 'AWS::AccountId' - }, - ':role/*' - ] - ] - } - ] - }, - { - Action: 'iam:PassRole', - Condition: { - 'ForAllValues:StringEquals': { - 'aws:TagKeys': ['createdVia', 'userId', 'Name'] - }, - StringEquals: { - 'iam:PassedToService': [ - 'lambda.amazonaws.com', - 'apigateway.amazonaws.com', - 'kendra.amazonaws.com', - 'vpc-flow-logs.amazonaws.com', - 'cloudformation.amazonaws.com' - ] - } - }, - Effect: 'Allow', - Resource: { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':iam::', - { - Ref: 'AWS::AccountId' - }, - ':role/*' - ] - ] - } - }, - { - Action: 'iam:AttachRolePolicy', - Condition: { - 'ForAnyValue:StringEquals': { - 'aws:CalledVia': ['cloudformation.amazonaws.com'] - }, - 'ForAllValues:StringEquals': { - 'aws:TagKeys': ['createdVia', 'userId'] - }, - StringEquals: { - 'iam:PolicyARN': [ - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':iam::aws:policy/service-role/AWSLambdaBasicExecutionRole' - ] - ] - } - ] - } - }, - Effect: 'Allow', - Resource: { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':iam::', - { - Ref: 'AWS::AccountId' - }, - ':role/*' - ] - ] - } - }, - { - Action: [ - 'lambda:*LayerVersion', - 'lambda:AddPermission', - 'lambda:CreateFunction', - 'lambda:Delete*', - 'lambda:GetFunction', - 'lambda:InvokeFunction', - 'lambda:ListTags', - 'lambda:RemovePermission', - 'lambda:TagResource', - 'lambda:UpdateEventSourceMapping', - 'lambda:UpdateFunction*' - ], - Condition: { - 'ForAllValues:StringEquals': { - 'aws:TagKeys': ['createdVia', 'userId'] - } - }, - Effect: 'Allow', - Resource: [ - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':lambda:', - { - Ref: 'AWS::Region' - }, - ':', - { - Ref: 'AWS::AccountId' - }, - ':event-source-mapping:*' - ] - ] - }, - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':lambda:', - { - Ref: 'AWS::Region' - }, - ':', - { - Ref: 'AWS::AccountId' - }, - ':function:*' - ] - ] - }, - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':lambda:', - { - Ref: 'AWS::Region' - }, - ':', - { - Ref: 'AWS::AccountId' - }, - ':layer:*' - ] - ] - } - ] - }, - { - Action: [ - 's3:*EncryptionConfiguration', - 's3:CreateBucket', - 's3:DeleteBucketPolicy', - 's3:GetBucketAcl', - 's3:GetBucketPolicy*', - 's3:GetBucketVersioning', - 's3:GetObject', - 's3:PutBucket*' - ], - Effect: 'Allow', - Resource: { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':s3:::*' - ] - ] - } - }, - { - Action: ['events:*Targets', 'events:DeleteRule', 'events:DescribeRule', 'events:PutRule'], - Effect: 'Allow', - Resource: { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':events:', - { - Ref: 'AWS::Region' - }, - ':', - { - Ref: 'AWS::AccountId' - }, - ':rule/*' - ] - ] - } - }, - { - Action: 'servicecatalog:*', - Condition: { - 'ForAnyValue:StringEquals': { - 'aws:CalledVia': ['cloudformation.amazonaws.com'] - } - }, - Effect: 'Allow', - Resource: [ - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':servicecatalog:', - { - Ref: 'AWS::Region' - }, - ':', - { - Ref: 'AWS::AccountId' - }, - ':/applications/*' - ] - ] - }, - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':servicecatalog:', - { - Ref: 'AWS::Region' - }, - ':', - { - Ref: 'AWS::AccountId' - }, - ':/attribute-groups/*' - ] - ] - } - ] - }, - { - Action: [ - 'apigateway:CreateRestApi', - 'apigateway:CreateStage', - 'apigateway:DELETE', - 'apigateway:Delete*', - 'apigateway:GET', - 'apigateway:PATCH', - 'apigateway:POST', - 'apigateway:PUT', - 'apigateway:SetWebACL', - 'apigateway:TagResource', - 'wafv2:*ForResource', - 'wafv2:*WebACL', - 'wafv2:TagResource' - ], - Condition: { - 'ForAnyValue:StringEquals': { - 'aws:CalledVia': ['cloudformation.amazonaws.com'] - } - }, - Effect: 'Allow', - Resource: [ - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':apigateway:', - { - Ref: 'AWS::Region' - }, - '::/*' - ] - ] - }, - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - 'Ref': 'AWS::Partition' - }, - ':wafv2:', - { - 'Ref': 'AWS::Region' - }, - ':', - { - 'Ref': 'AWS::AccountId' - }, - ':regional/*/*/*' - ] - ] - } - ] - }, - { - Action: [ - 'cognito-idp:*UserPoolClient', - 'cognito-idp:AdminAddUserToGroup', - 'cognito-idp:AdminCreateUser', - 'cognito-idp:AdminDeleteUser', - 'cognito-idp:AdminGetUser', - 'cognito-idp:AdminListGroupsForUser', - 'cognito-idp:AdminRemoveUserFromGroup', - 'cognito-idp:CreateGroup', - 'cognito-idp:CreateUserPool*', - 'cognito-idp:Delete*', - 'cognito-idp:GetGroup', - 'cognito-idp:SetUserPoolMfaConfig' - ], - Condition: { - 'ForAnyValue:StringEquals': { - 'aws:CalledVia': ['cloudformation.amazonaws.com'] - } - }, - Effect: 'Allow', - Resource: { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':cognito-idp:', - { - Ref: 'AWS::Region' - }, - ':', - { - Ref: 'AWS::AccountId' - }, - ':userpool/*' - ] - ] - } - }, - { - Action: 'cognito-idp:DescribeUserPool', - Effect: 'Allow', - Resource: { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':cognito-idp:', - { - Ref: 'AWS::Region' - }, - ':', - { - Ref: 'AWS::AccountId' - }, - ':userpool/*' - ] - ] - } - }, - { - Action: [ - 'cloudfront:Create*', - 'cloudfront:Delete*', - 'cloudfront:DescribeFunction', - 'cloudfront:Get*', - 'cloudfront:ListTagsForResource', - 'cloudfront:PublishFunction', - 'cloudfront:TagResource', - 'cloudfront:Update*' - ], - Effect: 'Allow', - Resource: [ - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':cloudfront::', - { - Ref: 'AWS::AccountId' - }, - ':distribution/*' - ] - ] - }, - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':cloudfront::', - { - Ref: 'AWS::AccountId' - }, - ':function/*' - ] - ] - }, - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':cloudfront::', - { - Ref: 'AWS::AccountId' - }, - ':origin-access-control/*' - ] - ] - }, - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':cloudfront::', - { - Ref: 'AWS::AccountId' - }, - ':response-headers-policy/*' - ] - ] - } - ] - }, - { - Action: [ - 'kms:CreateGrant', - 'kms:Decrypt', - 'kms:DescribeKey', - 'kms:EnableKeyRotation', - 'kms:Encrypt', - 'kms:GenerateDataKey', - 'kms:PutKeyPolicy', - 'kms:TagResource' - ], - Condition: { - 'ForAnyValue:StringEquals': { - 'aws:CalledVia': ['cloudformation.amazonaws.com'] - } - }, - Effect: 'Allow', - Resource: { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':kms:', - { - Ref: 'AWS::Region' - }, - ':', - { - Ref: 'AWS::AccountId' - }, - ':key/*' - ] - ] - } + it('should have policies for cloudformation deployment that so that it can create, update, and delete stacks', () => { + // Test that CloudFormation deployment policies exist with required permissions + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Action: Match.arrayWith(['cloudformation:CreateStack', 'cloudformation:UpdateStack']), + Effect: 'Allow' + }) + ]) + }, + PolicyName: Match.stringLikeRegexp('.*CorePolicy.*') + }); + }); + + it('should have IAM role management policies for CFN deployment', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Action: Match.arrayWith(['iam:CreateRole', 'iam:DeleteRole*']), + Effect: 'Allow' + }) + ]) + }, + PolicyName: Match.stringLikeRegexp('.*CorePolicy.*') + }); + }); + + it('should have Lambda management policies for CFN deployment', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Action: Match.arrayWith(['lambda:CreateFunction', 'lambda:Delete*']), + Effect: 'Allow' + }) + ]) + }, + PolicyName: Match.stringLikeRegexp('.*CorePolicy.*') + }); + }); + + it('should have agent management lambda function with correct properties', () => { + template.hasResourceProperties('AWS::Lambda::Function', { + Handler: 'agents-handler.agentsHandler', + Runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME.name, + Environment: { + Variables: Match.objectLike({ + [POWERTOOLS_METRICS_NAMESPACE_ENV_VAR]: 'UseCaseManagement', + [GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR]: { + Ref: Match.stringLikeRegexp('FactoriesDeploymentPlatformBucket*') }, - { - Action: [ - 'kendra:CreateIndex', - 'kms:CreateKey', - 'lambda:CreateEventSourceMapping', - 'lambda:DeleteEventSourceMapping', - 'lambda:GetEventSourceMapping' - ], - Condition: { - 'ForAnyValue:StringEquals': { - 'aws:CalledVia': ['cloudformation.amazonaws.com'] - } - }, + [DEPLOYMENT_PLATFORM_STACK_NAME_ENV_VAR]: { + Ref: 'AWS::StackName' + } + }) + } + }); + }); + + it('should have agent builder CFN deploy role with ECR permissions', () => { + // Test that agent builder has ECR policy with pull-through cache permissions + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Action: Match.arrayWith([ + 'ecr:CreatePullThroughCacheRule', + 'ecr:DeletePullThroughCacheRule', + 'ecr:DescribePullThroughCacheRules' + ]), Effect: 'Allow', Resource: '*' - }, - { - Action: [ - 'kendra:DescribeIndex', - 'kendra:ListTagsForResource', - 'kendra:TagResource', - 'kendra:UpdateIndex' - ], - Condition: { - 'ForAnyValue:StringEquals': { - 'aws:CalledVia': ['cloudformation.amazonaws.com'] - } - }, + }), + Match.objectLike({ + Action: 'ecr:GetAuthorizationToken', Effect: 'Allow', - Resource: { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':kendra:', - { - Ref: 'AWS::Region' - }, - ':', - { - Ref: 'AWS::AccountId' - }, - ':index/*' - ] - ] + Resource: '*' + }) + ]) + }, + PolicyName: Match.stringLikeRegexp('AgentBuilderCfnDeployRoleEcrPolicy.*') + }); + }); + + it('should have separate CFN deploy roles for text and agent use cases', () => { + // Test for the regular CFN deploy role (for text use cases) + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: Match.arrayWith([ + Match.objectLike({ + Principal: { + Service: 'cloudformation.amazonaws.com' } - }, - { - Action: ['cloudwatch:*Dashboard*', 'cloudwatch:GetMetricData', 'cloudwatch:TagResource'], + }) + ]) + } + }); + + // Find all IAM roles to verify we have both types + const allRoles = template.findResources('AWS::IAM::Role'); + const roleNames = Object.keys(allRoles); + + // Look for the specific CFN deploy roles by name pattern + const hasCfnDeployRole = roleNames.some( + (name) => name.startsWith('CfnDeployRole') && !name.includes('AgentBuilder') + ); + const hasAgentBuilderRole = roleNames.some((name) => name.startsWith('AgentBuilderCfnDeployRole')); + + expect(hasCfnDeployRole).toBe(true); + expect(hasAgentBuilderRole).toBe(true); + }); + + it('should have agent builder CFN deploy role without VPC and Kendra permissions', () => { + // Agent builder should NOT have VPC policy + const vpcPolicies = template.findResources('AWS::IAM::Policy', { + PolicyName: Match.stringLikeRegexp('.*AgentBuilder.*VpcPolicy.*') + }); + expect(Object.keys(vpcPolicies).length).toBe(0); + + // Agent builder should NOT have Kendra policy + const kendraPolicies = template.findResources('AWS::IAM::Policy', { + PolicyName: Match.stringLikeRegexp('.*AgentBuilder.*KendraPolicy.*') + }); + expect(Object.keys(kendraPolicies).length).toBe(0); + }); + + it('should have comprehensive CFN deployment policies for text use cases', () => { + // Test that the text use case CFN deploy role has comprehensive permissions + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + // CloudFormation stack operations + Match.objectLike({ + Action: Match.arrayWith(['cloudformation:CreateStack', 'cloudformation:UpdateStack']), + Effect: 'Allow', Condition: { - 'ForAnyValue:StringEquals': { - 'aws:CalledVia': ['cloudformation.amazonaws.com'] + 'ForAllValues:StringEquals': { + 'aws:TagKeys': ['createdVia', 'userId'] + }, + StringLike: { + 'cloudformation:TemplateUrl': Match.anyValue() } - }, - Effect: 'Allow', - Resource: { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':cloudwatch::', - { - Ref: 'AWS::AccountId' - }, - ':dashboard/*' - ] - ] } - }, - { - Action: [ - 'sqs:CreateQueue', - 'sqs:DeleteQueue', - 'sqs:GetQueueAttributes', - 'sqs:SetQueueAttributes', - 'sqs:TagQueue' - ], + }), + // IAM role management + Match.objectLike({ + Action: Match.arrayWith(['iam:CreateRole', 'iam:DeleteRole*']), + Effect: 'Allow', + Resource: Match.arrayWith([ + Match.objectLike({ + 'Fn::Join': Match.anyValue() + }) + ]) + }), + // Lambda function management + Match.objectLike({ + Action: Match.arrayWith(['lambda:CreateFunction', 'lambda:Delete*']), + Effect: 'Allow', Condition: { - 'ForAnyValue:StringEquals': { - 'aws:CalledVia': ['cloudformation.amazonaws.com'] + 'ForAllValues:StringEquals': { + 'aws:TagKeys': ['createdVia', 'userId'] } - }, - Effect: 'Allow', - Resource: { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition' - }, - ':sqs:', - { - Ref: 'AWS::Region' - }, - ':', - { - Ref: 'AWS::AccountId' - }, - ':*' - ] - ] } - } - ], - Version: '2012-10-17' + }) + ]) }, - PolicyName: Match.stringLikeRegexp('CfnDeployPolicy*'), - Roles: [ - { - Ref: Match.stringLikeRegexp('UCMLRole*') - }, - { - Ref: Match.stringLikeRegexp('CfnDeployRole*') - } - ] + PolicyName: Match.stringLikeRegexp('CfnDeployRoleCorePolicy.*') }); }); @@ -1391,7 +822,7 @@ describe('When creating a use case management Stack', () => { ], Version: '2012-10-17' }, - PolicyName: Match.stringLikeRegexp('VpcCreationPolicy*'), + PolicyName: Match.stringLikeRegexp('.*VpcPolicy.*'), Roles: [ { Ref: Match.stringLikeRegexp('UCMLRole*') @@ -1443,6 +874,187 @@ describe('When creating a use case management Stack', () => { }); }); + it('should create MCP management lambda function with correct properties', () => { + template.hasResourceProperties('AWS::Lambda::Function', { + Description: 'Lambda function backing the REST API for MCP server management', + Handler: 'mcp-handler.mcpHandler', + Runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME.name, + Role: { + 'Fn::GetAtt': [Match.stringLikeRegexp('MCPManagementLambdaRole*'), 'Arn'] + }, + Environment: { + Variables: { + [POWERTOOLS_METRICS_NAMESPACE_ENV_VAR]: 'UseCaseManagement', + [GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR]: { + Ref: Match.stringLikeRegexp('FactoriesDeploymentPlatformBucket*') + } + } + }, + DeadLetterConfig: { + TargetArn: { + 'Fn::GetAtt': [Match.stringLikeRegexp('UseCaseManagementDLQ*'), 'Arn'] + } + } + }); + }); + + it('should have S3 permissions for MCP management lambda', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Effect: 'Allow', + Action: ['s3:DeleteObject', 's3:GetObject', 's3:PutObject', 's3:PutObjectTagging'], + Resource: Match.arrayWith([ + { + 'Fn::GetAtt': [Match.stringLikeRegexp('FactoriesDeploymentPlatformBucket*'), 'Arn'] + }, + { + 'Fn::Join': [ + '', + [ + { + 'Fn::GetAtt': [ + Match.stringLikeRegexp('FactoriesDeploymentPlatformBucket*'), + 'Arn' + ] + }, + '/mcp/*' + ] + ] + } + ]) + } + ] + }, + PolicyName: Match.stringLikeRegexp('MCPLambdaS3Policy*') + }); + }); + + it('should have S3 permissions for MCP management lambda with mcp/ prefix scope', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Effect: 'Allow', + Action: ['s3:DeleteObject', 's3:GetObject', 's3:PutObject', 's3:PutObjectTagging'], + Resource: Match.arrayWith([ + { + 'Fn::Join': [ + '', + [ + { + 'Fn::GetAtt': [ + Match.stringLikeRegexp('FactoriesDeploymentPlatformBucket*'), + 'Arn' + ] + }, + '/mcp/*' + ] + ] + } + ]) + } + ] + }, + PolicyName: Match.stringLikeRegexp('MCPLambdaS3Policy*') + }); + }); + + + it('should create agent management lambda function with correct properties', () => { + template.hasResourceProperties('AWS::Lambda::Function', { + Description: 'Lambda function backing the REST API for agent management', + Handler: 'agents-handler.agentsHandler', + Runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME.name, + Role: { + 'Fn::GetAtt': [Match.stringLikeRegexp('AgentManagementLambdaRole*'), 'Arn'] + }, + Environment: { + Variables: { + GAAB_DEPLOYMENTS_BUCKET: { + Ref: Match.stringLikeRegexp('FactoriesDeploymentPlatformBucket*') + }, + DEPLOYMENT_PLATFORM_STACK_NAME: { + Ref: 'AWS::StackName' + } + } + }, + DeadLetterConfig: { + TargetArn: { + 'Fn::GetAtt': [Match.stringLikeRegexp('UseCaseManagementDLQ*'), 'Arn'] + } + } + }); + }); + + it('should create workflow management lambda function with correct properties', () => { + template.hasResourceProperties('AWS::Lambda::Function', { + Description: 'Lambda function backing the REST API for workflow management', + Handler: 'workflows-handler.workflowsHandler', + Runtime: COMMERCIAL_REGION_LAMBDA_NODE_RUNTIME.name, + Role: { + 'Fn::GetAtt': [Match.stringLikeRegexp('WorkflowManagementLambdaRole*'), 'Arn'] + }, + Environment: { + Variables: { + [POWERTOOLS_METRICS_NAMESPACE_ENV_VAR]: 'UseCaseManagement', + [GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR]: { + Ref: Match.stringLikeRegexp('FactoriesDeploymentPlatformBucket*') + }, + [DEPLOYMENT_PLATFORM_STACK_NAME_ENV_VAR]: { + Ref: 'AWS::StackName' + } + } + }, + DeadLetterConfig: { + TargetArn: { + 'Fn::GetAtt': [Match.stringLikeRegexp('UseCaseManagementDLQ*'), 'Arn'] + } + } + }); + }); + + + + it('should set multimodal environment variables on lambda functions when called', () => { + const app = new cdk.App({ context: rawCdkJson.context }); + const tempStack = new cdk.Stack(app, 'ParentStackForMultimodal'); + const managementStack = new UseCaseManagement(tempStack, 'ManagementStackForMultimodal', { + parameters: { + DefaultUserEmail: 'test@example.com', + ApplicationTrademarkName: 'Test Application', + WebConfigSSMKey: '/test-webconfig/key' + } + }); + + const testBucketName = 'test-multimodal-bucket'; + const testTableName = 'test-multimodal-table'; + managementStack.setMultimodalEnvironmentVariables(testBucketName, testTableName); + + const multimodalTemplate = Template.fromStack(managementStack); + + multimodalTemplate.hasResourceProperties('AWS::Lambda::Function', { + Handler: 'agents-handler.agentsHandler', + Environment: { + Variables: Match.objectLike({ + [MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR]: testBucketName, + [MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR]: testTableName + }) + } + }); + + multimodalTemplate.hasResourceProperties('AWS::Lambda::Function', { + Handler: 'workflows-handler.workflowsHandler', + Environment: { + Variables: Match.objectLike({ + [MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR]: testBucketName, + [MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR]: testTableName + }) + } + }); + }); + afterAll(() => { if (oldDistBucket && oldDistBucket != '') { process.env.TEMPLATE_OUTPUT_BUCKET = oldDistBucket; diff --git a/source/infrastructure/test/use-case-stacks/agent-core/agent-builder-stack.test.ts b/source/infrastructure/test/use-case-stacks/agent-core/agent-builder-stack.test.ts new file mode 100644 index 00000000..74eecea6 --- /dev/null +++ b/source/infrastructure/test/use-case-stacks/agent-core/agent-builder-stack.test.ts @@ -0,0 +1,1531 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as rawCdkJson from '../../../cdk.json'; +import { Match, Template } from 'aws-cdk-lib/assertions'; +import { AgentBuilderStack } from '../../../lib/use-case-stacks/agent-core/agent-builder-stack'; +import { + LANGCHAIN_LAMBDA_PYTHON_RUNTIME, + AGENTCORE_INSTANCE_TYPES, + CHAT_PROVIDERS, + USE_CASE_TYPES, + GAAB_STRANDS_AGENT_IMAGE_NAME +} from '../../../lib/utils/constants'; +import { + resolveUpstreamRegistryUrl, + resolveUpstreamRepositoryPrefix +} from '../../../lib/use-case-stacks/agent-core/utils/image-uri-resolver'; + +// Global test setup - build stack once for all test suites +let globalTemplate: Template; +let globalStack: AgentBuilderStack; + +// Suppress console logs from AWS Solutions Constructs +const originalConsoleLog = console.log; +const originalConsoleWarn = console.warn; + +// Save original environment variable value at module level +const originalDistOutputBucket = process.env.DIST_OUTPUT_BUCKET; + +beforeAll(() => { + // Ensure test runs in local deployment mode + delete process.env.DIST_OUTPUT_BUCKET; + + // Suppress console output during stack creation + console.log = jest.fn(); + console.warn = jest.fn(); + + // Build stack once for all tests + [globalTemplate, , globalStack] = buildStack(); + + // Restore console output + console.log = originalConsoleLog; + console.warn = originalConsoleWarn; +}); + +afterAll(() => { + // Restore original environment variable value + if (originalDistOutputBucket !== undefined) { + process.env.DIST_OUTPUT_BUCKET = originalDistOutputBucket; + } else { + delete process.env.DIST_OUTPUT_BUCKET; + } +}); + +describe('AgentBuilderStack', () => { + let stack: AgentBuilderStack; + let template: Template; + + beforeAll(() => { + template = globalTemplate; + stack = globalStack; + }); + + describe('stack initialization', () => { + it('should create stack with correct properties', () => { + expect(stack).toBeInstanceOf(AgentBuilderStack); + expect(stack.stackName).toBe('TestAgentBuilderStack'); + }); + + it('should have correct LLM provider name', () => { + expect(stack.getLlmProviderName()).toBe('AgentCore'); + }); + }); + + describe('abstract method implementations', () => { + it('should return correct image name', () => { + expect(stack.getImageName()).toBe(GAAB_STRANDS_AGENT_IMAGE_NAME); + }); + + it('should return correct use case type', () => { + expect(stack.getUseCaseType()).toBe(USE_CASE_TYPES.AGENT_BUILDER); + }); + + it('should return correct WebSocket route name', () => { + expect(stack.getWebSocketRouteName()).toBe('invokeAgentCore'); + }); + + it('should return correct LLM provider name', () => { + expect(stack.getLlmProviderName()).toBe(CHAT_PROVIDERS.AGENT_CORE); + }); + + it('should return correct agent runtime name pattern', () => { + const runtimeName = stack.getAgentRuntimeName(); + expect(runtimeName).toMatch(/^gaab_agent_/); + expect(runtimeName).toContain('gaab_agent_'); + }); + + it('should support inference profiles', () => { + expect(stack.shouldIncludeInferenceProfileSupport()).toBe(true); + }); + }); + + describe('CloudFormation parameters', () => { + it('should create memory configuration parameters', () => { + template.hasParameter('EnableLongTermMemory', { + Type: 'String', + AllowedValues: ['Yes', 'No'], + AllowedPattern: '^Yes|No$', + Default: 'Yes', + Description: 'Enable long-term memory for the agent' + }); + }); + + it('should create shared cache parameter', () => { + template.hasParameter('SharedEcrCachePrefix', { + Type: 'String', + Description: + 'Internal parameter - Shared ECR cache prefix automatically provided by deployment platform', + Default: '' + }); + }); + + it('should create custom agent image URI parameter', () => { + template.hasParameter('CustomAgentImageUri', { + Type: 'String', + Description: + 'Optional custom ECR image URI for the agent. If provided, overrides default image resolution.', + Default: '', + ConstraintDescription: + 'Must be a valid ECR image URI in the format: 123456789012.dkr.ecr.region.amazonaws.com/repository:tag or empty to use default AgentBuilder image resolution. The ECR repository must be accessible from the deployment region.' + }); + }); + + it('should create Cognito User Pool ID parameter', () => { + template.hasParameter('ComponentCognitoUserPoolId', { + Type: 'String', + Description: + 'Cognito User Pool ID for creating component App Client - automatically provided by deployment platform', + Default: '', + ConstraintDescription: 'Must be a valid Cognito User Pool ID' + }); + }); + + it('should create UseInferenceProfile parameter', () => { + template.hasParameter('UseInferenceProfile', { + Type: 'String', + Description: + 'If the model configured is Bedrock, you can indicate if you are using Bedrock Inference Profile. This will ensure that the required IAM policies will be configured during stack deployment. For more details, refer to https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html', + AllowedValues: ['Yes', 'No'], + Default: 'No' + }); + }); + + it('should create multimodal parameters inherited from UseCaseStack', () => { + template.hasParameter('MultimodalEnabled', { + Type: 'String', + Description: + 'If set to Yes, the deployed use case stack will have access to multimodal functionality. This functionality is only enabled for Agentcore-based AgentBuilder and Workflow usecases.', + AllowedValues: ['Yes', 'No'], + AllowedPattern: '^Yes|No$', + Default: 'No' + }); + + template.hasParameter('ExistingMultimodalDataMetadataTable', { + Type: 'String', + Description: + 'Existing multimodal data metadata table name which contains references of the files in S3', + Default: '', + ConstraintDescription: 'Must be a valid DynamoDB table name or empty string' + }); + + template.hasParameter('ExistingMultimodalDataBucket', { + Type: 'String', + Description: 'Existing multimodal data bucket name which stores the multimodal data files', + Default: '', + ConstraintDescription: 'Must be a valid S3 bucket name or empty string' + }); + }); + }); + + describe('automatic multimodal permissions integration', () => { + it('should have infrastructure ready for automatic multimodal permissions when multimodal is enabled', () => { + // This test verifies that the stack has the necessary infrastructure for multimodal permissions + // The actual automatic addition happens at runtime when multimodal is enabled via CloudFormation parameters + + // Verify that the AgentExecutionRole exists with the correct properties + template.hasResourceProperties('AWS::IAM::Role', { + Description: 'Execution role for AgentCore Runtime', + AssumeRolePolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Principal: { + Service: 'bedrock-agentcore.amazonaws.com' + }, + Action: 'sts:AssumeRole' + } + ]) + } + }); + + // Verify that multimodal parameters exist for conditional behavior + template.hasParameter('MultimodalEnabled', { + Type: 'String', + AllowedValues: ['Yes', 'No'], + Default: 'No' + }); + + template.hasParameter('ExistingMultimodalDataMetadataTable', { + Type: 'String', + Default: '' + }); + + template.hasParameter('ExistingMultimodalDataBucket', { + Type: 'String', + Default: '' + }); + + // Verify that multimodal conditions exist for conditional resource creation + const templateJson = template.toJSON(); + expect(templateJson.Conditions).toBeDefined(); + expect(templateJson.Conditions.MultimodalEnabledCondition).toBeDefined(); + }); + + it('should create multimodal permissions policy when multimodal is enabled', () => { + // Create a new stack with multimodal enabled to test the conditional policy creation + const app = new cdk.App({ + context: { + ...rawCdkJson.context, + '@aws-cdk/aws-lambda:recognizeLayerVersion': true, + '@aws-cdk/aws-lambda:recognizeVersionProps': true + } + }); + + // Set multimodal parameters to enabled values on the app before creating the stack + app.node.setContext('multimodalEnabled', 'Yes'); + app.node.setContext('existingMultimodalDataMetadataTable', 'test-metadata-table'); + app.node.setContext('existingMultimodalDataBucket', 'test-multimodal-bucket'); + + const multimodalStack = new AgentBuilderStack(app, 'TestMultimodalAgentBuilderStack', { + solutionID: 'SO0276', + solutionVersion: 'v2.0.0', + solutionName: 'generative-ai-application-builder-on-aws', + applicationTrademarkName: 'Generative AI Application Builder on AWS' + }); + + const multimodalTemplate = Template.fromStack(multimodalStack); + + // Verify that the conditional multimodal permissions policy exists with the correct condition + multimodalTemplate.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'multimodalMetadataAccess', + Effect: 'Allow', + Action: 'dynamodb:GetItem', + Resource: Match.anyValue() + }, + { + Sid: 'MultimodalDataBucketAccess', + Effect: 'Allow', + Action: 's3:GetObject', + Resource: Match.anyValue() + } + ]) + } + }); + }); + }); + + describe('parameter organization and grouping', () => { + it('should have parameter groups with proper structure', () => { + const templateJson = template.toJSON(); + const metadata = templateJson.Metadata; + + expect(metadata).toBeDefined(); + expect(metadata['AWS::CloudFormation::Interface']).toBeDefined(); + expect(metadata['AWS::CloudFormation::Interface'].ParameterGroups).toBeDefined(); + + const parameterGroups = metadata['AWS::CloudFormation::Interface'].ParameterGroups; + expect(parameterGroups.length).toBeGreaterThanOrEqual(1); + }); + + it('should maintain backward compatibility for existing parameters', () => { + // Verify all existing parameter names are preserved + const expectedParameters = [ + 'EnableLongTermMemory', + 'SharedEcrCachePrefix', + 'ComponentCognitoUserPoolId', + 'UseInferenceProfile', + 'CustomAgentImageUri' + ]; + + expectedParameters.forEach((paramName) => { + expect(() => template.hasParameter(paramName, Match.anyValue())).not.toThrow(); + }); + }); + + it('should have proper parameter validation constraints', () => { + // Test memory parameter validation + template.hasParameter('EnableLongTermMemory', { + AllowedValues: ['Yes', 'No'], + AllowedPattern: '^Yes|No$' + }); + + // Test custom image URI validation + template.hasParameter('CustomAgentImageUri', { + AllowedPattern: Match.stringLikeRegexp('.*\\|\\^\\$$') // ECR_URI_PATTERN + '|^$' + }); + + // Test shared cache parameter validation + template.hasParameter('SharedEcrCachePrefix', { + AllowedPattern: '^.*[^/]$|^$' + }); + }); + + it('should have enhanced constraint descriptions for custom image parameters', () => { + // Test that the custom image parameter has the correct constraint description + template.hasParameter('CustomAgentImageUri', { + ConstraintDescription: Match.stringLikeRegexp( + 'Must be a valid ECR image URI.*default AgentBuilder image resolution' + ) + }); + }); + }); + + describe('inheritance from AgentCoreBaseStack', () => { + it('should inherit common AgentCore functionality', () => { + // Verify that the stack has all the common AgentCore components + // This is tested through the presence of the components created by the base class + + // Agent execution role (from base class) + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Principal: { + Service: 'bedrock-agentcore.amazonaws.com' + }, + Action: 'sts:AssumeRole' + } + ]) + } + }); + + // Agent invocation lambda (from base class) + template.hasResourceProperties('AWS::Lambda::Function', { + Handler: 'handler.lambda_handler', + Runtime: LANGCHAIN_LAMBDA_PYTHON_RUNTIME.name, + Environment: { + Variables: { + POWERTOOLS_SERVICE_NAME: 'AGENT_CORE_INVOCATION' + } + } + }); + + // Agent runtime deployment (from base class) + template.hasResourceProperties('Custom::AgentCoreRuntime', { + Resource: 'DEPLOY_AGENT_CORE' + }); + }); + + it('should maintain backward compatibility with existing parameters', () => { + // Verify all existing parameters are still present + const templateJson = template.toJSON(); + const parameters = templateJson.Parameters; + + // Core parameters that should be maintained + expect(parameters.EnableLongTermMemory).toBeDefined(); + expect(parameters.SharedEcrCachePrefix).toBeDefined(); + expect(parameters.CustomAgentImageUri).toBeDefined(); + expect(parameters.ComponentCognitoUserPoolId).toBeDefined(); + expect(parameters.UseInferenceProfile).toBeDefined(); + + // Inherited parameters from base stack + expect(parameters.UseCaseUUID).toBeDefined(); + expect(parameters.UseCaseConfigTableName).toBeDefined(); + expect(parameters.UseCaseConfigRecordKey).toBeDefined(); + }); + + it('should maintain all existing CloudFormation outputs', () => { + // Verify all expected outputs are present + template.hasOutput('AgentRuntimeArn', { + Description: 'ARN of the deployed Agentcore Runtime' + }); + + template.hasOutput('AgentExecutionRoleArn', { + Description: 'ARN of the Agentcore execution role' + }); + + template.hasOutput('AgentInvocationLambdaArn', { + Description: 'ARN of the AgentBuilder invocation Lambda function' + }); + }); + }); + + describe('AgentCore components setup', () => { + it('should create agent execution role', () => { + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Principal: { + Service: 'bedrock-agentcore.amazonaws.com' + }, + Action: 'sts:AssumeRole' + } + ]) + } + }); + }); + + it('should create agent invocation lambda', () => { + template.hasResourceProperties('AWS::Lambda::Function', { + Handler: 'handler.lambda_handler', + Runtime: LANGCHAIN_LAMBDA_PYTHON_RUNTIME.name, + MemorySize: 1024, + Timeout: 900, + Environment: { + Variables: { + POWERTOOLS_SERVICE_NAME: 'AGENT_CORE_INVOCATION', + AGENT_RUNTIME_ARN: Match.anyValue(), + USE_CASE_UUID: { + Ref: 'UseCaseUUID' + } + } + } + }); + }); + + it('should create agent runtime deployment custom resource', () => { + template.hasResourceProperties('Custom::AgentCoreRuntime', { + Resource: 'DEPLOY_AGENT_CORE', + AgentRuntimeName: Match.anyValue(), + ExecutionRoleArn: Match.anyValue(), + AgentImageUri: Match.anyValue(), // Should include the resolved image URI + UseCaseUUID: { + Ref: 'UseCaseUUID' + }, + UseCaseConfigTableName: { + Ref: 'UseCaseConfigTableName' + }, + UseCaseConfigRecordKey: { + Ref: 'UseCaseConfigRecordKey' + }, + MemoryId: { 'Fn::GetAtt': ['AgentMemoryDeploymentAgentCoreMemory9759028C', 'MemoryId'] }, + UseCaseType: 'AgentBuilder' + }); + }); + + it('should use custom image URI parameter in image resolution logic', () => { + // The agent runtime deployment should reference the CustomAgentImageUri parameter + // This is tested indirectly through the AgentImageUri property in the custom resource + template.hasResourceProperties('Custom::AgentCoreRuntime', { + AgentImageUri: Match.anyValue() + }); + + // Verify the parameter exists and can be referenced + template.hasParameter( + 'CustomAgentImageUri', + Match.objectLike({ + Type: 'String', + Default: '' + }) + ); + }); + + it('should create AgentCore outbound permissions custom resource', () => { + template.hasResourceProperties('Custom::AgentCoreOutboundPermissions', { + Resource: 'AGENTCORE_OUTBOUND_PERMISSIONS', + USE_CASE_ID: { + 'Fn::Select': [0, { 'Fn::Split': ['-', { Ref: 'UseCaseUUID' }] }] + }, + USE_CASE_CLIENT_ID: Match.anyValue(), + USE_CASE_CONFIG_TABLE_NAME: { + Ref: 'UseCaseConfigTableName' + }, + USE_CASE_CONFIG_RECORD_KEY: { + Ref: 'UseCaseConfigRecordKey' + } + }); + }); + + it('should create ECR Pull-Through Cache rule', () => { + // Import the resolver to get environment-aware values + const { + resolveUpstreamRegistryUrl, + resolveUpstreamRepositoryPrefix + } = require('../../../lib/use-case-stacks/agent-core/utils/image-uri-resolver'); + + template.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + EcrRepositoryPrefix: Match.anyValue(), + UpstreamRegistry: 'ecr-public', + UpstreamRegistryUrl: resolveUpstreamRegistryUrl(), + UpstreamRepositoryPrefix: resolveUpstreamRepositoryPrefix() + }); + }); + }); + + describe('WebSocket routes', () => { + it('should configure WebSocket routes for agent invocation', () => { + // WebSocket routes are configured internally and tested through API Gateway resources + template.hasResourceProperties('AWS::ApiGatewayV2::Route', { + RouteKey: 'invokeAgentCore' + }); + }); + }); + + describe('stack outputs', () => { + it('should create agent runtime ARN output', () => { + template.hasOutput('AgentRuntimeArn', { + Description: 'ARN of the deployed Agentcore Runtime', + Value: Match.anyValue() + }); + }); + + it('should create agent execution role ARN output', () => { + template.hasOutput('AgentExecutionRoleArn', { + Description: 'ARN of the Agentcore execution role', + Value: Match.anyValue() + }); + }); + + it('should create agent invocation lambda ARN output', () => { + template.hasOutput('AgentInvocationLambdaArn', { + Description: 'ARN of the AgentBuilder invocation Lambda function', + Value: Match.anyValue() + }); + }); + + it('should create agent component app client ID output', () => { + template.hasOutput('AgentComponentAppClientId', { + Description: 'Cognito App Client ID for the component authentication', + Value: Match.anyValue() + }); + }); + }); + + describe('conditions', () => { + it('should create deployment type conditions', () => { + template.hasCondition('IsStandaloneDeploymentCondition', { + 'Fn::Equals': [{ Ref: 'StackDeploymentSource' }, 'StandaloneUseCase'] + }); + }); + + it('should create App Client condition', () => { + template.hasCondition('CreateAppClientCondition', { + 'Fn::Not': [ + { + 'Fn::Equals': [{ Ref: 'ComponentCognitoUserPoolId' }, ''] + } + ] + }); + }); + }); + + describe('Component App Client', () => { + it('should create App Client with correct M2M configuration', () => { + // Find the component App Client specifically (not the main web app client) + const appClientResources = template.findResources('AWS::Cognito::UserPoolClient'); + const componentAppClient = Object.values(appClientResources).find( + (resource) => resource.Properties?.UserPoolId?.Ref === 'ComponentCognitoUserPoolId' + ); + + expect(componentAppClient).toBeDefined(); + expect(componentAppClient?.Properties).toMatchObject({ + UserPoolId: { Ref: 'ComponentCognitoUserPoolId' }, + GenerateSecret: true, + ExplicitAuthFlows: ['ALLOW_REFRESH_TOKEN_AUTH'], + TokenValidityUnits: { + AccessToken: 'minutes', + RefreshToken: 'hours' + }, + PreventUserExistenceErrors: 'ENABLED', + EnableTokenRevocation: true, + SupportedIdentityProviders: ['COGNITO'] + }); + + // Verify client name is defined (it will be dynamic due to parameter reference) + expect(componentAppClient?.Properties?.ClientName).toBeDefined(); + }); + + it('should apply condition to App Client resource', () => { + const appClientResources = template.findResources('AWS::Cognito::UserPoolClient'); + const componentAppClient = Object.values(appClientResources).find( + (resource) => resource.Properties?.UserPoolId?.Ref === 'ComponentCognitoUserPoolId' + ); + + expect(componentAppClient?.Condition).toBe('CreateAppClientCondition'); + }); + }); + + describe('Authentication Parameter Groups', () => { + it('should include authentication parameter in parameter groups', () => { + const templateJson = template.toJSON(); + const metadata = templateJson.Metadata?.['AWS::CloudFormation::Interface']; + + expect(metadata).toBeDefined(); + expect(metadata.ParameterGroups).toBeDefined(); + + // Find the authentication configuration parameter group + const authGroup = metadata.ParameterGroups.find( + (group: any) => group.Label?.default === 'Authentication Configuration (Internal)' + ); + + expect(authGroup).toBeDefined(); + expect(authGroup.Parameters).toContain('ComponentCognitoUserPoolId'); + }); + }); + + describe('IAM permissions', () => { + it('should create agent execution role with Bedrock permissions', () => { + // Agent execution role should have Bedrock model invocation permissions + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Principal: { + Service: 'bedrock-agentcore.amazonaws.com' + }, + Action: 'sts:AssumeRole' + } + ]) + } + }); + }); + + it('should create agent invocation lambda permissions', () => { + // Check for separate policy resources created by addToPolicy + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'AgentCoreRuntimeInvocation', + Effect: 'Allow', + Action: [ + 'bedrock-agentcore:InvokeAgentRuntime', + 'bedrock-agentcore:InvokeAgentRuntimeForUser' + ], + Resource: Match.anyValue() + } + ]) + } + }); + }); + + it('should use specific region for foundation model permissions', () => { + // Verify that foundation model permissions use specific region, not wildcard + // Find the AgentCoreRuntimeExecutionRole specifically + const roles = template.findResources('AWS::IAM::Role'); + const agentCoreRole = Object.entries(roles).find(([logicalId]) => + logicalId.includes('AgentCoreRuntimeExecutionRole') + ); + + expect(agentCoreRole).toBeDefined(); + const [, roleResource] = agentCoreRole! as [string, any]; + + const policy = roleResource.Properties.Policies[0]; + const bedrockStatement = policy.PolicyDocument.Statement.find( + (stmt: any) => stmt.Sid === 'BedrockModelInvocation' + ); + + expect(bedrockStatement).toBeDefined(); + expect(bedrockStatement.Action).toEqual(['bedrock:InvokeModel', 'bedrock:InvokeModelWithResponseStream']); + + // The resource should be foundation-model with specific region + const foundationModelResource = bedrockStatement.Resource; + + expect(foundationModelResource).toBeDefined(); + + // Verify it uses Fn::Join with AWS::Region (not wildcard '*') + expect(foundationModelResource['Fn::Join']).toBeDefined(); + const arnParts = foundationModelResource['Fn::Join'][1]; + + // Check that it contains AWS::Region reference (not a wildcard string) + const hasRegionRef = arnParts.some((part: any) => typeof part === 'object' && part.Ref === 'AWS::Region'); + expect(hasRegionRef).toBe(true); + + // Verify it does NOT contain a wildcard '*' for the region + const hasWildcardRegion = arnParts.some( + (part: any) => + typeof part === 'string' && + part === '*' && + arnParts.indexOf(part) === arnParts.indexOf(':bedrock:') + 1 + ); + expect(hasWildcardRegion).toBe(false); + const arnString = arnParts.join(''); + expect(arnString).toContain('foundation-model'); + }); + }); + + describe('inference profile support', () => { + it('should create custom resource for inference profile ARN resolution', () => { + template.hasResourceProperties('Custom::GetModelResourceArns', { + ServiceToken: { + 'Fn::GetAtt': [Match.anyValue(), 'Arn'] + }, + Resource: 'GET_MODEL_RESOURCE_ARNS', + USE_CASE_CONFIG_TABLE_NAME: { + Ref: 'UseCaseConfigTableName' + }, + USE_CASE_CONFIG_RECORD_KEY: { + Ref: 'UseCaseConfigRecordKey' + } + }); + }); + + it('should create inference profile model policy with resolved ARNs', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Action: Match.arrayWith(['bedrock:InvokeModel', 'bedrock:InvokeModelWithResponseStream']), + Resource: { + 'Fn::Split': [ + ',', + { + 'Fn::GetAtt': [Match.stringLikeRegexp('GetModelResourceArns'), 'Arns'] + } + ] + } + } + ]) + } + }); + }); + + it('should grant custom resource permissions for GetInferenceProfile', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Action: 'bedrock:GetInferenceProfile', + Resource: { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':bedrock:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + ':inference-profile/*' + ] + ] + } + } + ]) + } + }); + }); + + it('should grant custom resource permissions for DynamoDB access', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Action: 'dynamodb:GetItem', + Resource: { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':dynamodb:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + ':table/', + { Ref: 'UseCaseConfigTableName' } + ] + ] + }, + Condition: { + 'ForAllValues:StringEquals': { + 'dynamodb:LeadingKeys': [{ Ref: 'UseCaseConfigRecordKey' }] + } + } + } + ]) + } + }); + }); + + it('should grant custom resource permissions for IAM PassRole to AgentCore', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Action: 'iam:PassRole', + Resource: { + 'Fn::Join': [ + '', + ['arn:', { Ref: 'AWS::Partition' }, ':iam::', { Ref: 'AWS::AccountId' }, ':role/*'] + ] + }, + Condition: { + 'ForAllValues:StringEquals': { + 'aws:TagKeys': ['createdVia', 'userId'] + }, + 'StringEquals': { + 'iam:PassedToService': 'bedrock-agentcore.amazonaws.com' + } + } + } + ]) + } + }); + }); + + it('should add dependency between custom resource and runtime deployment', () => { + // Verify that the GetModelResourceArns custom resource exists + // The dependency is conditional based on UseInferenceProfile parameter + const templateJson = template.toJSON(); + + // Verify the custom resource exists + const customResourceEntry = Object.entries(templateJson.Resources).find(([logicalId]: [string, any]) => + logicalId.includes('GetModelResourceArns') + ); + + expect(customResourceEntry).toBeDefined(); + + // Verify it has the correct condition + const [, customResource] = customResourceEntry! as [string, any]; + expect(customResource.Condition).toBe('InferenceProfileProvidedCondition'); + }); + + it('should have UseInferenceProfile CFN parameter', () => { + // Verify that UseInferenceProfile parameter exists + // This is set automatically by the adapter based on BedrockInferenceType + const templateJson = template.toJSON(); + expect(templateJson.Parameters?.UseInferenceProfile).toBeDefined(); + expect(templateJson.Parameters.UseInferenceProfile.Type).toBe('String'); + expect(templateJson.Parameters.UseInferenceProfile.AllowedValues).toEqual(['Yes', 'No']); + expect(templateJson.Parameters.UseInferenceProfile.Default).toBe('No'); + }); + + it('should create inference profile support with conditions', () => { + // Verify that the custom resource and policy are created with InferenceProfileProvidedCondition + const templateJson = template.toJSON(); + + // Find the GetModelResourceArns custom resource + const customResourceEntry = Object.entries(templateJson.Resources).find(([logicalId]: [string, any]) => + logicalId.includes('GetModelResourceArns') + ); + + expect(customResourceEntry).toBeDefined(); + const [, customResource] = customResourceEntry! as [string, any]; + + // Verify it has the InferenceProfileProvidedCondition + expect(customResource.Condition).toBe('InferenceProfileProvidedCondition'); + + // Find the InferenceProfileModelPolicy + const policyEntry = Object.entries(templateJson.Resources).find(([logicalId]: [string, any]) => + logicalId.includes('InferenceProfileModelPolicy') + ); + + expect(policyEntry).toBeDefined(); + const [, policy] = policyEntry! as [string, any]; + + // Verify it has the InferenceProfileProvidedCondition + expect(policy.Condition).toBe('InferenceProfileProvidedCondition'); + }); + + it('should apply condition to custom resource auth policy', () => { + const authPolicyResources = template.findResources('AWS::IAM::Policy'); + const authPolicy = Object.values(authPolicyResources).find((resource: any) => { + return resource.Properties?.PolicyName?.includes?.('CustomResourceAuthPolicy'); + }); + + expect(authPolicy?.Condition).toBe('CreateAppClientCondition'); + }); + }); + + describe('authentication components from base class', () => { + it('should create comprehensive auth IAM policies', () => { + // The auth policy is created as a single policy with multiple statements + // Test for the CustomResourceAuthPolicy with all required statements + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyName: Match.stringLikeRegexp('CustomResourceAuthPolicy.*'), + PolicyDocument: { + Statement: Match.arrayWith([ + // Bedrock AgentCore OAuth2 credential provider permissions + Match.objectLike({ + Effect: 'Allow', + Action: Match.arrayWith([ + 'bedrock-agentcore:CreateOauth2CredentialProvider', + 'bedrock-agentcore:CreateTokenVault', + 'bedrock-agentcore:DeleteOauth2CredentialProvider' + ]) + }), + // Secrets Manager permissions + Match.objectLike({ + Effect: 'Allow', + Action: Match.arrayWith(['secretsmanager:CreateSecret', 'secretsmanager:DeleteSecret']) + }), + // IAM PassRole permissions + Match.objectLike({ + Effect: 'Allow', + Action: 'iam:PassRole' + }) + ]) + } + }); + }); + + it('should include CreateServiceLinkedRole permission for Bedrock AgentCore Runtime Identity', () => { + // Verify the CustomResourceAuthPolicy includes permission to create the service-linked role + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyName: Match.stringLikeRegexp('CustomResourceAuthPolicy.*'), + PolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Action: 'iam:CreateServiceLinkedRole', + Resource: { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':iam::', + { Ref: 'AWS::AccountId' }, + ':role/aws-service-role/runtime-identity.bedrock-agentcore.amazonaws.com/AWSServiceRoleForBedrockAgentCoreRuntimeIdentity' + ] + ] + }, + Condition: { + StringEquals: { + 'iam:AWSServiceName': 'runtime-identity.bedrock-agentcore.amazonaws.com' + } + } + } + ]) + } + }); + }); + + it('should create OAuth client custom resource', () => { + template.hasResourceProperties('AWS::CloudFormation::CustomResource', { + Resource: 'AGENTCORE_OAUTH_CLIENT', + CLIENT_ID: Match.anyValue(), + CLIENT_SECRET: Match.anyValue(), + DISCOVERY_URL: Match.anyValue(), + PROVIDER_NAME: Match.anyValue() + }); + }); + + it('should apply conditions to OAuth client resource', () => { + const oauthClientResources = template.findResources('AWS::CloudFormation::CustomResource', { + Properties: { + Resource: 'AGENTCORE_OAUTH_CLIENT' + } + }); + const oauthClientResource = Object.values(oauthClientResources)[0]; + + expect(oauthClientResource?.Condition).toBe('CreateAppClientCondition'); + }); + + it('should create proper dependency chain for auth resources', () => { + const templateJson = template.toJSON(); + + // OAuth client should depend on app client + const oauthClientResource = Object.entries(templateJson.Resources).find( + ([, resource]: [string, any]) => + resource.Type === 'AWS::CloudFormation::CustomResource' && + resource.Properties?.Resource === 'AGENTCORE_OAUTH_CLIENT' + ); + + expect(oauthClientResource).toBeDefined(); + const [, oauthResource] = oauthClientResource! as [string, any]; + + // Should reference the component app client through CLIENT_ID and CLIENT_SECRET properties + expect(oauthResource.Properties.CLIENT_ID).toBeDefined(); + expect(oauthResource.Properties.CLIENT_SECRET).toBeDefined(); + expect(oauthResource.Properties.DISCOVERY_URL).toBeDefined(); + expect(oauthResource.Properties.PROVIDER_NAME).toBeDefined(); + + // The dependency is implicit through property references, not explicit DependsOn + // This ensures the app client is created before the OAuth client + }); + }); + + describe('agent-specific features', () => { + it('should maintain agent-specific parameter groups', () => { + const templateJson = template.toJSON(); + const metadata = templateJson.Metadata?.['AWS::CloudFormation::Interface']; + + expect(metadata).toBeDefined(); + expect(metadata.ParameterGroups).toBeDefined(); + + // Should have "Agent Configuration" group (not "AgentCore Configuration") + const agentConfigGroup = metadata.ParameterGroups.find( + (group: any) => group.Label?.default === 'Agent Configuration' + ); + + expect(agentConfigGroup).toBeDefined(); + expect(agentConfigGroup.Parameters).toContain('EnableLongTermMemory'); + }); + + it('should include all agent-specific outputs', () => { + // Verify agent-specific output descriptions + template.hasOutput('AgentRuntimeArn', { + Description: 'ARN of the deployed Agentcore Runtime' + }); + + template.hasOutput('AgentExecutionRoleArn', { + Description: 'ARN of the Agentcore execution role' + }); + + template.hasOutput('AgentInvocationLambdaArn', { + Description: 'ARN of the AgentBuilder invocation Lambda function' + }); + }); + + it('should configure agent runtime with correct use case type', () => { + template.hasResourceProperties('Custom::AgentCoreRuntime', { + Resource: 'DEPLOY_AGENT_CORE', + UseCaseType: 'AgentBuilder' + }); + }); + }); +}); + +describe('AgentBuilderStack memory configurations', () => { + let template: Template; + + beforeAll(() => { + template = globalTemplate; + }); + + it('should handle long-term memory configuration', () => { + // Should allow enabling/disabling long-term memory + template.hasParameter('EnableLongTermMemory', { + AllowedValues: ['Yes', 'No'], + AllowedPattern: '^Yes|No$', + Default: 'Yes' + }); + }); +}); + +describe('AgentBuilderStack image URI resolution from base class', () => { + let template: Template; + + beforeAll(() => { + template = globalTemplate; + }); + + it('should create ECR Pull-Through Cache with agent image configuration', () => { + // Import the resolver to get environment-aware values + const { + resolveUpstreamRegistryUrl, + resolveUpstreamRepositoryPrefix + } = require('../../../lib/use-case-stacks/agent-core/utils/image-uri-resolver'); + + template.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + EcrRepositoryPrefix: Match.anyValue(), + UpstreamRegistry: 'ecr-public', + UpstreamRegistryUrl: resolveUpstreamRegistryUrl(), + UpstreamRepositoryPrefix: resolveUpstreamRepositoryPrefix() + }); + }); + + it('should apply standalone deployment condition to ECR cache', () => { + const ecrCacheResources = template.findResources('AWS::ECR::PullThroughCacheRule'); + const ecrCacheResource = Object.values(ecrCacheResources)[0]; + + expect(ecrCacheResource?.Condition).toBe('IsStandaloneDeploymentCondition'); + }); + + it('should handle custom image URI override', () => { + // The custom image parameter should be integrated into the image resolution logic + // This is tested through the parameter existence and its integration in the runtime deployment + template.hasParameter('CustomAgentImageUri', { + Type: 'String', + Default: '' + }); + + // The runtime deployment should reference the resolved image URI + template.hasResourceProperties('Custom::AgentCoreRuntime', { + AgentImageUri: Match.anyValue() + }); + }); + + it('should support both standalone and dashboard deployment modes', () => { + // Verify conditions for different deployment modes + template.hasCondition('IsStandaloneDeploymentCondition', { + 'Fn::Equals': [{ Ref: 'StackDeploymentSource' }, 'StandaloneUseCase'] + }); + + // Shared ECR cache parameter should be available for dashboard deployments + template.hasParameter('SharedEcrCachePrefix', { + Type: 'String', + Default: '' + }); + }); + + it('should use agent-specific image name in resolution', () => { + // The image resolution should use the getImageName() method result + // This is tested through the stack's getImageName() method returning the correct value + expect(globalStack.getImageName()).toBe(GAAB_STRANDS_AGENT_IMAGE_NAME); + }); + + it('should integrate with centralized image resolution logic', () => { + // Verify that the agent runtime deployment uses the resolved image URI + template.hasResourceProperties('Custom::AgentCoreRuntime', { + AgentImageUri: Match.anyValue(), + AgentRuntimeName: Match.anyValue() // Runtime name is generated dynamically + }); + + // The image URI should be resolved using the base class logic + // This is tested indirectly through the custom resource properties + }); +}); + +describe('AgentBuilderStack custom image configurations', () => { + let template: Template; + + beforeAll(() => { + template = globalTemplate; + }); + + it('should include custom image parameter in parameter groups', () => { + // Check that the parameter is included in the CloudFormation Interface metadata + const templateJson = template.toJSON(); + const metadata = templateJson.Metadata?.['AWS::CloudFormation::Interface']; + + expect(metadata).toBeDefined(); + expect(metadata.ParameterGroups).toBeDefined(); + + // Find the custom image configuration parameter group + const customImageGroup = metadata.ParameterGroups.find( + (group: any) => group.Label?.default === 'Custom Image Configuration (Advanced)' + ); + + expect(customImageGroup).toBeDefined(); + expect(customImageGroup.Parameters).toContain('CustomAgentImageUri'); + }); + + it('should include agent configuration parameter in parameter groups', () => { + // Check that the agent configuration parameter is included + const templateJson = template.toJSON(); + const metadata = templateJson.Metadata?.['AWS::CloudFormation::Interface']; + + expect(metadata).toBeDefined(); + expect(metadata.ParameterGroups).toBeDefined(); + + // Find the agent configuration parameter group + const agentConfigGroup = metadata.ParameterGroups.find( + (group: any) => group.Label?.default === 'Agent Configuration' + ); + + expect(agentConfigGroup).toBeDefined(); + expect(agentConfigGroup.Parameters).toContain('EnableLongTermMemory'); + }); + + it('should include both agent configuration and custom image parameter groups', () => { + // Check that both parameter groups exist and are properly configured + const templateJson = template.toJSON(); + const metadata = templateJson.Metadata?.['AWS::CloudFormation::Interface']; + + expect(metadata).toBeDefined(); + expect(metadata.ParameterGroups).toBeDefined(); + + const parameterGroups = metadata.ParameterGroups; + + // Agent Configuration should be first (unshifted) + const firstGroup = parameterGroups[0]; + expect(firstGroup.Label?.default).toBe('Agent Configuration'); + + // Custom Image Configuration should exist somewhere in the groups + const customImageGroup = parameterGroups.find( + (group: any) => group.Label?.default === 'Custom Image Configuration (Advanced)' + ); + expect(customImageGroup).toBeDefined(); + expect(customImageGroup.Parameters).toContain('CustomAgentImageUri'); + }); + + it('should validate ECR URI pattern in custom image parameter', () => { + // The parameter should have an AllowedPattern that validates ECR URIs + const templateJson = template.toJSON(); + const customImageParam = templateJson.Parameters?.CustomAgentImageUri; + + expect(customImageParam).toBeDefined(); + expect(customImageParam.AllowedPattern).toBeDefined(); + + // Should allow empty string or valid ECR URI pattern + expect(customImageParam.AllowedPattern).toMatch(/\|\^\$$/); // Should end with |^$ to allow empty + expect(customImageParam.AllowedPattern).toContain('ecr'); // Should contain ECR pattern + }); + + it('should integrate custom image parameter with image resolution logic', () => { + // Verify that the custom image parameter is properly integrated into the stack + const templateJson = template.toJSON(); + + // Check that CustomAgentImageUri parameter exists + expect(templateJson.Parameters?.CustomAgentImageUri).toBeDefined(); + + // Check that SharedEcrCachePrefix parameter exists (for comparison) + expect(templateJson.Parameters?.SharedEcrCachePrefix).toBeDefined(); + + // The agent runtime deployment should use resolved image URI that considers custom parameter + template.hasResourceProperties('Custom::AgentCoreRuntime', { + AgentImageUri: Match.anyValue() + }); + }); + + it('should create conditions for image URI resolution', () => { + // The stack should create conditions for handling different deployment scenarios + template.hasCondition('IsStandaloneDeploymentCondition', { + 'Fn::Equals': [{ Ref: 'StackDeploymentSource' }, 'StandaloneUseCase'] + }); + + // Additional conditions may be created by the image resolution logic + // This test ensures the basic deployment condition exists + }); + + it('should support both custom and default image resolution paths', () => { + // Verify that the stack supports both custom image URI and default resolution + const templateJson = template.toJSON(); + + // Custom image parameter should have empty default (allowing default resolution) + expect(templateJson.Parameters?.CustomAgentImageUri?.Default).toBe(''); + + // Should have ECR pull-through cache for default resolution + template.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + EcrRepositoryPrefix: Match.anyValue(), + UpstreamRegistry: 'ecr-public' + }); + }); +}); + +describe('AgentBuilderStack backward compatibility and regression tests', () => { + let template: Template; + let stack: AgentBuilderStack; + + beforeAll(() => { + template = globalTemplate; + stack = globalStack; + }); + + it('should maintain all existing parameter names and types', () => { + const templateJson = template.toJSON(); + const parameters = templateJson.Parameters; + + // Core AgentBuilder parameters that must be maintained + expect(parameters.EnableLongTermMemory).toEqual({ + Type: 'String', + Description: 'Enable long-term memory for the agent', + AllowedValues: ['Yes', 'No'], + AllowedPattern: '^Yes|No$', + Default: 'Yes' + }); + + expect(parameters.CustomAgentImageUri).toEqual({ + Type: 'String', + Description: + 'Optional custom ECR image URI for the agent. If provided, overrides default image resolution.', + Default: '', + AllowedPattern: expect.stringContaining('ecr'), + ConstraintDescription: + 'Must be a valid ECR image URI in the format: 123456789012.dkr.ecr.region.amazonaws.com/repository:tag or empty to use default AgentBuilder image resolution. The ECR repository must be accessible from the deployment region.' + }); + + expect(parameters.ComponentCognitoUserPoolId).toEqual({ + Type: 'String', + Description: + 'Cognito User Pool ID for creating component App Client - automatically provided by deployment platform', + Default: '', + ConstraintDescription: 'Must be a valid Cognito User Pool ID' + }); + }); + + it('should maintain all existing CloudFormation outputs', () => { + const templateJson = template.toJSON(); + const outputs = templateJson.Outputs; + + // Verify all expected outputs exist with correct descriptions + expect(outputs.AgentRuntimeArn).toEqual({ + Description: 'ARN of the deployed Agentcore Runtime', + Value: expect.any(Object) + }); + + expect(outputs.AgentExecutionRoleArn).toEqual({ + Description: 'ARN of the Agentcore execution role', + Value: expect.any(Object) + }); + + expect(outputs.AgentInvocationLambdaArn).toEqual({ + Description: 'ARN of the AgentBuilder invocation Lambda function', + Value: expect.any(Object) + }); + }); + + it('should maintain existing resource types and properties', () => { + // Verify key resource types are still present + const templateJson = template.toJSON(); + const resources = templateJson.Resources; + + // Agent execution role + const agentRoles = Object.values(resources).filter( + (resource: any) => + resource.Type === 'AWS::IAM::Role' && + resource.Properties?.AssumeRolePolicyDocument?.Statement?.some( + (stmt: any) => stmt.Principal?.Service === 'bedrock-agentcore.amazonaws.com' + ) + ); + expect(agentRoles.length).toBeGreaterThan(0); + + // Agent invocation lambda + const agentLambdas = Object.values(resources).filter( + (resource: any) => + resource.Type === 'AWS::Lambda::Function' && + resource.Properties?.Environment?.Variables?.POWERTOOLS_SERVICE_NAME === 'AGENT_CORE_INVOCATION' + ); + expect(agentLambdas.length).toBeGreaterThan(0); + + // Agent runtime deployment custom resource + const runtimeDeployments = Object.values(resources).filter( + (resource: any) => resource.Type === 'Custom::AgentCoreRuntime' + ); + expect(runtimeDeployments.length).toBeGreaterThan(0); + }); + + it('should maintain existing parameter group structure', () => { + const templateJson = template.toJSON(); + const metadata = templateJson.Metadata?.['AWS::CloudFormation::Interface']; + + expect(metadata).toBeDefined(); + expect(metadata.ParameterGroups).toBeDefined(); + + // Should have Agent Configuration as first group + const firstGroup = metadata.ParameterGroups[0]; + expect(firstGroup.Label?.default).toBe('Agent Configuration'); + + // Should have Authentication Configuration group + const authGroup = metadata.ParameterGroups.find( + (group: any) => group.Label?.default === 'Authentication Configuration (Internal)' + ); + expect(authGroup).toBeDefined(); + + // Should have Custom Image Configuration group + const customImageGroup = metadata.ParameterGroups.find( + (group: any) => group.Label?.default === 'Custom Image Configuration (Advanced)' + ); + expect(customImageGroup).toBeDefined(); + }); + + it('should maintain existing WebSocket route configuration', () => { + // Verify WebSocket route for agent invocation + template.hasResourceProperties('AWS::ApiGatewayV2::Route', { + RouteKey: 'invokeAgentCore' + }); + + // Verify the route name matches the abstract method implementation + expect(stack.getWebSocketRouteName()).toBe('invokeAgentCore'); + }); + + it('should maintain existing IAM permissions structure', () => { + // Verify agent execution role permissions are maintained + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Principal: { + Service: 'bedrock-agentcore.amazonaws.com' + }, + Action: 'sts:AssumeRole' + } + ]) + } + }); + + // Verify agent invocation lambda permissions + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'AgentCoreRuntimeInvocation', + Effect: 'Allow', + Action: ['bedrock-agentcore:InvokeAgentRuntime', 'bedrock-agentcore:InvokeAgentRuntimeForUser'], + Resource: Match.anyValue() + } + ]) + } + }); + }); + + it('should maintain existing conditions and their logic', () => { + // Verify all expected conditions exist + template.hasCondition('IsStandaloneDeploymentCondition', { + 'Fn::Equals': [{ Ref: 'StackDeploymentSource' }, 'StandaloneUseCase'] + }); + + template.hasCondition('CreateAppClientCondition', { + 'Fn::Not': [ + { + 'Fn::Equals': [{ Ref: 'ComponentCognitoUserPoolId' }, ''] + } + ] + }); + + template.hasCondition('InferenceProfileProvidedCondition', { + 'Fn::Equals': [{ Ref: 'UseInferenceProfile' }, 'Yes'] + }); + }); + + it('should ensure no breaking changes in template structure', () => { + const templateJson = template.toJSON(); + + // Verify template has all expected top-level sections + expect(templateJson.Parameters).toBeDefined(); + expect(templateJson.Resources).toBeDefined(); + expect(templateJson.Outputs).toBeDefined(); + expect(templateJson.Conditions).toBeDefined(); + expect(templateJson.Metadata).toBeDefined(); + + // Verify parameter count is reasonable (not missing parameters) + const parameterCount = Object.keys(templateJson.Parameters).length; + expect(parameterCount).toBeGreaterThan(10); // Should have many parameters + + // Verify resource count is reasonable (not missing resources) + const resourceCount = Object.keys(templateJson.Resources).length; + expect(resourceCount).toBeGreaterThan(20); // Should have many resources + }); +}); + +function buildStack(): [Template, cdk.App, AgentBuilderStack] { + const app = new cdk.App({ + context: rawCdkJson.context + }); + + const solutionID = process.env.SOLUTION_ID ?? app.node.tryGetContext('solution_id') ?? 'SO0276'; + const version = process.env.VERSION ?? app.node.tryGetContext('solution_version') ?? 'v4.0.0'; + const solutionName = + process.env.SOLUTION_NAME ?? + app.node.tryGetContext('solution_name') ?? + 'Generative AI Application Builder on AWS'; + + const stack = new AgentBuilderStack(app, 'TestAgentBuilderStack', { + solutionID: solutionID, + solutionVersion: version, + solutionName: solutionName, + applicationTrademarkName: + rawCdkJson.context.application_trademark_name ?? 'Generative AI Application Builder on AWS' + }); + + const template = Template.fromStack(stack); + return [template, app, stack]; +} + +// Pipeline deployment mode tests +describe('AgentBuilderStack Pipeline Deployment Mode', () => { + let pipelineTemplate: Template; + let pipelineStack: AgentBuilderStack; + let savedDistOutputBucket: string | undefined; + + beforeAll(() => { + // Save current environment variable value + savedDistOutputBucket = process.env.DIST_OUTPUT_BUCKET; + + // Set environment variable to simulate pipeline deployment + process.env.DIST_OUTPUT_BUCKET = 'test-bucket'; + + // Suppress console output during stack creation + console.log = jest.fn(); + console.warn = jest.fn(); + + // Build stack for pipeline mode + [pipelineTemplate, , pipelineStack] = buildStack(); + + // Restore console output + console.log = originalConsoleLog; + console.warn = originalConsoleWarn; + }); + + afterAll(() => { + // Restore original environment variable value + if (savedDistOutputBucket !== undefined) { + process.env.DIST_OUTPUT_BUCKET = savedDistOutputBucket; + } else { + delete process.env.DIST_OUTPUT_BUCKET; + } + }); + + describe('pipeline deployment configuration', () => { + it('should create stack with pipeline deployment mode', () => { + expect(pipelineStack).toBeInstanceOf(AgentBuilderStack); + expect(pipelineStack.stackName).toBe('TestAgentBuilderStack'); + }); + + it('should create ECR Pull-Through Cache rule for pipeline deployments', () => { + pipelineTemplate.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + EcrRepositoryPrefix: Match.anyValue(), + UpstreamRegistry: 'ecr-public', + UpstreamRegistryUrl: resolveUpstreamRegistryUrl(), + UpstreamRepositoryPrefix: resolveUpstreamRepositoryPrefix() + }); + }); + + it('should create agent runtime deployment custom resource', () => { + pipelineTemplate.hasResourceProperties('Custom::AgentCoreRuntime', { + Resource: 'DEPLOY_AGENT_CORE', + AgentRuntimeName: Match.anyValue(), + ExecutionRoleArn: Match.anyValue(), + AgentImageUri: Match.anyValue(), + UseCaseUUID: { + Ref: 'UseCaseUUID' + }, + UseCaseConfigTableName: { + Ref: 'UseCaseConfigTableName' + }, + UseCaseConfigRecordKey: { + Ref: 'UseCaseConfigRecordKey' + }, + MemoryId: { 'Fn::GetAtt': ['AgentMemoryDeploymentAgentCoreMemory9759028C', 'MemoryId'] }, + UseCaseType: 'AgentBuilder' + }); + }); + }); +}); diff --git a/source/infrastructure/test/use-case-stacks/agent-core/agent-core-base-stack.test.ts b/source/infrastructure/test/use-case-stacks/agent-core/agent-core-base-stack.test.ts new file mode 100644 index 00000000..5af8e24e --- /dev/null +++ b/source/infrastructure/test/use-case-stacks/agent-core/agent-core-base-stack.test.ts @@ -0,0 +1,120 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import { AgentCoreBaseParameters } from '../../../lib/use-case-stacks/agent-core/agent-core-base-stack'; +import { BaseStack } from '../../../lib/framework/base-stack'; +import { Template } from 'aws-cdk-lib/assertions'; + +// Mock concrete parameters implementation for testing +class TestAgentCoreParameters extends AgentCoreBaseParameters { + public customTestImageUri: cdk.CfnParameter; + + protected createUseCaseSpecificParameters(stack: BaseStack): void { + this.customTestImageUri = new cdk.CfnParameter(stack, 'CustomTestImageUri', { + type: 'String', + description: 'Test custom image URI parameter', + default: '' + }); + } + + public getCustomImageParameter(): cdk.CfnParameter { + return this.customTestImageUri; + } +} + +describe('AgentCoreBaseParameters', () => { + let app: cdk.App; + let stack: cdk.Stack; + let parameters: TestAgentCoreParameters; + + beforeEach(() => { + // Set up environment variable for image URI resolution + process.env.VERSION = 'v4.0.0'; + + app = new cdk.App({ + context: { + solution_version: 'v4.0.0', + solution_id: 'SO0276' + } + }); + stack = new cdk.Stack(app, 'TestStack'); + parameters = new TestAgentCoreParameters(stack as BaseStack); + }); + + afterEach(() => { + // Clean up environment variable + delete process.env.VERSION; + }); + + describe('Memory deployment integration', () => { + it('should create memory deployment before execution role', () => { + expect(parameters.enableLongTermMemory).toBeDefined(); + expect(parameters.enableLongTermMemory.allowedValues).toEqual(['Yes', 'No']); + expect(parameters.enableLongTermMemory.default).toBe('Yes'); + }); + }); + + describe('Common parameter creation', () => { + it('should create EnableLongTermMemory parameter', () => { + expect(parameters.enableLongTermMemory).toBeDefined(); + expect(parameters.enableLongTermMemory).toBeInstanceOf(cdk.CfnParameter); + }); + + it('should create SharedEcrCachePrefix parameter', () => { + expect(parameters.sharedEcrCachePrefix).toBeDefined(); + expect(parameters.sharedEcrCachePrefix).toBeInstanceOf(cdk.CfnParameter); + }); + + it('should create ComponentCognitoUserPoolId parameter', () => { + expect(parameters.cognitoUserPoolId).toBeDefined(); + expect(parameters.cognitoUserPoolId).toBeInstanceOf(cdk.CfnParameter); + }); + + it('should create UseInferenceProfile parameter', () => { + expect(parameters.useInferenceProfile).toBeDefined(); + expect(parameters.useInferenceProfile).toBeInstanceOf(cdk.CfnParameter); + }); + }); + + describe('Multimodal integration', () => { + it('should have correct MultimodalEnabled CfnParams', () => { + const template = Template.fromStack(stack); + + template.hasParameter('MultimodalEnabled', { + Type: 'String', + Description: + 'If set to Yes, the deployed use case stack will have access to multimodal functionality. This functionality is only enabled for Agentcore-based AgentBuilder and Workflow usecases.', + Default: 'No', + AllowedValues: ['Yes', 'No'] + }); + + template.hasParameter('ExistingMultimodalDataMetadataTable', { + Type: 'String', + Description: + 'Existing multimodal data metadata table name which contains references of the files in S3', + Default: '' + }); + + template.hasParameter('ExistingMultimodalDataBucket', { + Type: 'String', + Description: 'Existing multimodal data bucket name which stores the multimodal data files', + Default: '' + }); + }); + + it('should create ExistingMultimodalDataMetadataTable parameter', () => { + expect(parameters.existingMultimodalDataMetadataTable).toBeDefined(); + expect(parameters.existingMultimodalDataMetadataTable).toBeInstanceOf(cdk.CfnParameter); + }); + + it('should create ExistingMultimodalDataBucket parameter', () => { + expect(parameters.existingMultimodalDataBucket).toBeDefined(); + expect(parameters.existingMultimodalDataBucket).toBeInstanceOf(cdk.CfnParameter); + }); + + it('should have multimodal parameters with correct properties', () => { + const template = Template.fromStack(stack); + }); + }); +}); diff --git a/source/infrastructure/test/use-case-stacks/agent-core/components/agent-execution-role.test.ts b/source/infrastructure/test/use-case-stacks/agent-core/components/agent-execution-role.test.ts new file mode 100644 index 00000000..c415f2b6 --- /dev/null +++ b/source/infrastructure/test/use-case-stacks/agent-core/components/agent-execution-role.test.ts @@ -0,0 +1,1053 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as iam from 'aws-cdk-lib/aws-iam'; +import { Match, Template } from 'aws-cdk-lib/assertions'; +import { AgentExecutionRole } from '../../../../lib/use-case-stacks/agent-core/components/agent-execution-role'; + +describe('AgentExecutionRole', () => { + let app: cdk.App; + let stack: cdk.Stack; + let template: Template; + let agentExecutionRole: AgentExecutionRole; + + beforeEach(() => { + app = new cdk.App(); + stack = new cdk.Stack(app, 'TestStack'); + agentExecutionRole = new AgentExecutionRole(stack, 'TestAgentExecutionRole', { + useCaseConfigTableName: 'test-config-table', + memoryId: 'test_memory_id' + }); + template = Template.fromStack(stack); + }); + + describe('createExecutionRole', () => { + it('should create IAM role with correct assume role policy', () => { + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Principal: { + Service: 'bedrock-agentcore.amazonaws.com' + }, + Action: 'sts:AssumeRole' + } + ]) + } + }); + }); + + it('should add security conditions to assume role policy', () => { + // The addSecurityConditions method adds a second statement with conditions + // Let's check that we have multiple statements and one has conditions + const resources = template.findResources('AWS::IAM::Role'); + const roleResource = Object.values(resources)[0] as any; + const statements = roleResource.Properties.AssumeRolePolicyDocument.Statement; + + expect(statements).toHaveLength(2); + + // Find the statement with conditions + const statementWithConditions = statements.find((stmt: any) => stmt.Condition); + expect(statementWithConditions).toBeDefined(); + expect(statementWithConditions).toMatchObject({ + Effect: 'Allow', + Principal: { + Service: 'bedrock-agentcore.amazonaws.com' + }, + Action: 'sts:AssumeRole', + Condition: { + StringEquals: { + 'aws:SourceAccount': { Ref: 'AWS::AccountId' } + }, + ArnLike: { + 'aws:SourceArn': { + 'Fn::Join': ['', ['arn:', { Ref: 'AWS::Partition' }, ':bedrock-agentcore:*:*:*']] + } + } + } + }); + }); + + it('should create DynamoDB permissions scoped to specific table', () => { + template.hasResourceProperties('AWS::IAM::Role', { + Policies: [ + { + PolicyName: 'AgentCoreRuntimePolicy', + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'DynamoDBConfigAccess', + Effect: 'Allow', + Action: ['dynamodb:GetItem', 'dynamodb:Query'], + Resource: { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':dynamodb:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + ':table/test-config-table' + ] + ] + } + } + ]) + } + } + ] + }); + }); + + it('should create role with comprehensive inline policy', () => { + template.hasResourceProperties('AWS::IAM::Role', { + Policies: [ + { + PolicyName: 'AgentCoreRuntimePolicy', + PolicyDocument: { + Statement: Match.arrayWith([ + // ECR permissions + { + Sid: 'ECRAccess', + Effect: 'Allow', + Action: [ + 'ecr:BatchGetImage', + 'ecr:GetDownloadUrlForLayer', + 'ecr:GetAuthorizationToken' + ], + Resource: Match.anyValue() + }, + // CloudWatch Logs permissions + { + Sid: 'CloudWatchLogs', + Effect: 'Allow', + Action: [ + 'logs:CreateLogGroup', + 'logs:CreateLogStream', + 'logs:PutLogEvents', + 'logs:DescribeLogStreams', + 'logs:DescribeLogGroups' + ], + Resource: Match.anyValue() + }, + // X-Ray permissions + { + Sid: 'XRayTracing', + Effect: 'Allow', + Action: [ + 'xray:PutTraceSegments', + 'xray:PutTelemetryRecords', + 'xray:GetSamplingRules', + 'xray:GetSamplingTargets' + ], + Resource: '*' + }, + // CloudWatch Metrics permissions + { + Sid: 'CloudWatchMetrics', + Effect: 'Allow', + Action: 'cloudwatch:PutMetricData', + Resource: '*', + Condition: { + StringEquals: { + 'cloudwatch:namespace': 'bedrock-agentcore' + } + } + }, + // Workload Identity permissions (includes OAuth2 token access) + { + Sid: 'AgentCoreWorkloadIdentity', + Effect: 'Allow', + Action: Match.arrayWith([ + 'bedrock-agentcore:CreateWorkloadIdentity', + 'bedrock-agentcore:GetWorkloadAccessToken', + 'bedrock-agentcore:GetWorkloadAccessTokenForJWT', + 'bedrock-agentcore:GetWorkloadAccessTokenForUserId', + 'bedrock-agentcore:GetResourceOauth2Token' + ]), + Resource: Match.anyValue() + }, + // Secrets Manager permissions + { + Sid: 'SecretsManagerAccess', + Effect: 'Allow', + Action: 'secretsmanager:GetSecretValue', + Resource: Match.anyValue() + }, + // Bedrock permissions + { + Sid: 'BedrockModelInvocation', + Effect: 'Allow', + Action: ['bedrock:InvokeModel', 'bedrock:InvokeModelWithResponseStream'], + Resource: Match.anyValue() + }, + // Bedrock Guardrail permissions + { + Sid: 'BedrockGuardrailAccess', + Effect: 'Allow', + Action: 'bedrock:ApplyGuardrail', + Resource: Match.anyValue() + }, + // DynamoDB permissions + { + Sid: 'DynamoDBConfigAccess', + Effect: 'Allow', + Action: ['dynamodb:GetItem', 'dynamodb:Query'], + Resource: Match.anyValue() + } + ]) + } + } + ] + }); + }); + }); + + describe('role properties', () => { + it('should expose the execution role', () => { + expect(agentExecutionRole.role).toBeInstanceOf(iam.Role); + }); + + it('should have correct role description', () => { + template.hasResourceProperties('AWS::IAM::Role', { + Description: 'Execution role for AgentCore Runtime' + }); + }); + + it('should create role without explicit RoleName (uses CDK default)', () => { + // The role doesn't set an explicit RoleName, so CDK generates one + // We can verify the role exists and has the expected logical ID pattern + template.hasResource('AWS::IAM::Role', { + Properties: { + AssumeRolePolicyDocument: Match.anyValue(), + Description: 'Execution role for AgentCore Runtime' + } + }); + }); + }); + + describe('CDK Nag suppressions', () => { + it('should have appropriate CDK Nag suppressions for wildcard permissions', () => { + // This test verifies that the role has been created with proper suppressions + // The actual suppressions are added via NagSuppressions.addResourceSuppressions + // which doesn't appear in the CloudFormation template but is important for compliance + expect(agentExecutionRole.role).toBeDefined(); + }); + }); + + describe('workflow permissions', () => { + let workflowApp: cdk.App; + let workflowStack: cdk.Stack; + let workflowTemplate: Template; + let workflowAgentExecutionRole: AgentExecutionRole; + + beforeEach(() => { + workflowApp = new cdk.App(); + workflowStack = new cdk.Stack(workflowApp, 'WorkflowTestStack'); + workflowAgentExecutionRole = new AgentExecutionRole(workflowStack, 'WorkflowAgentExecutionRole', { + useCaseConfigTableName: 'test-config-table', + useCasesTableName: 'test-use-cases-table' + }); + workflowTemplate = Template.fromStack(workflowStack); + }); + + it('should create DynamoDB permissions for both config and use cases tables', () => { + workflowTemplate.hasResourceProperties('AWS::IAM::Role', { + Policies: [ + { + PolicyName: 'AgentCoreRuntimePolicy', + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'DynamoDBConfigAccess', + Effect: 'Allow', + Action: ['dynamodb:GetItem', 'dynamodb:Query'], + Resource: [ + { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':dynamodb:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + ':table/test-config-table' + ] + ] + }, + { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':dynamodb:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + ':table/test-use-cases-table' + ] + ] + } + ] + } + ]) + } + } + ] + }); + }); + + it('should expose the execution role for workflows', () => { + expect(workflowAgentExecutionRole.role).toBeInstanceOf(iam.Role); + }); + + it('should maintain same permissions structure for workflows', () => { + // Verify that workflow role has all the same permissions as agent role + workflowTemplate.hasResourceProperties('AWS::IAM::Role', { + Policies: [ + { + PolicyName: 'AgentCoreRuntimePolicy', + PolicyDocument: { + Statement: Match.arrayWith([ + // ECR permissions + { + Sid: 'ECRAccess', + Effect: 'Allow', + Action: [ + 'ecr:BatchGetImage', + 'ecr:GetDownloadUrlForLayer', + 'ecr:GetAuthorizationToken' + ], + Resource: Match.anyValue() + }, + // CloudWatch Logs permissions + { + Sid: 'CloudWatchLogs', + Effect: 'Allow', + Action: [ + 'logs:CreateLogGroup', + 'logs:CreateLogStream', + 'logs:PutLogEvents', + 'logs:DescribeLogStreams', + 'logs:DescribeLogGroups' + ], + Resource: Match.anyValue() + }, + // Bedrock permissions + { + Sid: 'BedrockModelInvocation', + Effect: 'Allow', + Action: ['bedrock:InvokeModel', 'bedrock:InvokeModelWithResponseStream'], + Resource: Match.anyValue() + }, + // Bedrock Guardrail permissions + { + Sid: 'BedrockGuardrailAccess', + Effect: 'Allow', + Action: 'bedrock:ApplyGuardrail', + Resource: Match.anyValue() + }, + // DynamoDB permissions (enhanced for workflows) + { + Sid: 'DynamoDBConfigAccess', + Effect: 'Allow', + Action: ['dynamodb:GetItem', 'dynamodb:Query'], + Resource: Match.anyValue() + } + ]) + } + } + ] + }); + }); + + it('should scope permissions to specific table names for security', () => { + // Verify that the DynamoDB permissions are scoped to specific tables + const resources = workflowTemplate.findResources('AWS::IAM::Role'); + const roleResource = Object.values(resources)[0] as any; + const policy = roleResource.Properties.Policies[0]; + const dynamoStatement = policy.PolicyDocument.Statement.find( + (stmt: any) => stmt.Sid === 'DynamoDBConfigAccess' + ); + + expect(dynamoStatement).toBeDefined(); + expect(dynamoStatement.Resource).toHaveLength(2); + + // Check that the Fn::Join array contains the table names + const configTableArn = dynamoStatement.Resource[0]['Fn::Join'][1]; + const useCasesTableArn = dynamoStatement.Resource[1]['Fn::Join'][1]; + + expect(configTableArn[configTableArn.length - 1].split('/')[1]).toBe('test-config-table'); + expect(useCasesTableArn[useCasesTableArn.length - 1].split('/')[1]).toBe('test-use-cases-table'); + }); + + it('should follow least privilege principle with read-only DynamoDB access', () => { + workflowTemplate.hasResourceProperties('AWS::IAM::Role', { + Policies: [ + { + PolicyName: 'AgentCoreRuntimePolicy', + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'DynamoDBConfigAccess', + Effect: 'Allow', + Action: ['dynamodb:GetItem', 'dynamodb:Query'], + Resource: Match.anyValue() + } + ]) + } + } + ] + }); + + // Verify no write permissions are granted + const resources = workflowTemplate.findResources('AWS::IAM::Role'); + const roleResource = Object.values(resources)[0] as any; + const policy = roleResource.Properties.Policies[0]; + const dynamoStatement = policy.PolicyDocument.Statement.find( + (stmt: any) => stmt.Sid === 'DynamoDBConfigAccess' + ); + + expect(dynamoStatement.Action).not.toContain('dynamodb:PutItem'); + expect(dynamoStatement.Action).not.toContain('dynamodb:UpdateItem'); + expect(dynamoStatement.Action).not.toContain('dynamodb:DeleteItem'); + }); + }); + + describe('agent-only permissions (backward compatibility)', () => { + it('should create DynamoDB permissions for config table only when use cases table not provided', () => { + // This is the original test case - should only have config table access + template.hasResourceProperties('AWS::IAM::Role', { + Policies: [ + { + PolicyName: 'AgentCoreRuntimePolicy', + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'DynamoDBConfigAccess', + Effect: 'Allow', + Action: ['dynamodb:GetItem', 'dynamodb:Query'], + Resource: { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':dynamodb:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + ':table/test-config-table' + ] + ] + } + } + ]) + } + } + ] + }); + }); + + it('should maintain backward compatibility with existing agent deployments', () => { + // Verify that the single-table case still works as before + const resources = template.findResources('AWS::IAM::Role'); + const roleResource = Object.values(resources)[0] as any; + const policy = roleResource.Properties.Policies[0]; + const dynamoStatement = policy.PolicyDocument.Statement.find( + (stmt: any) => stmt.Sid === 'DynamoDBConfigAccess' + ); + + expect(dynamoStatement).toBeDefined(); + expect(dynamoStatement.Resource).not.toBeInstanceOf(Array); + + // Check that the Fn::Join array contains the table name + const tableArnParts = dynamoStatement.Resource['Fn::Join'][1]; + expect(tableArnParts[tableArnParts.length - 1].split('/')[1]).toBe('test-config-table'); + }); + }); + + describe('Bedrock permissions', () => { + it('should use specific region for foundation model permissions', () => { + // Verify that foundation model permissions use specific region, not wildcard + template.hasResourceProperties('AWS::IAM::Role', { + Policies: [ + { + PolicyName: 'AgentCoreRuntimePolicy', + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'BedrockModelInvocation', + Effect: 'Allow', + Action: ['bedrock:InvokeModel', 'bedrock:InvokeModelWithResponseStream'], + Resource: { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':bedrock:', + { Ref: 'AWS::Region' }, + '::foundation-model/*' + ] + ] + } + } + ]) + } + } + ] + }); + }); + + it('should include guardrail permissions', () => { + // Verify that guardrail permissions are included + template.hasResourceProperties('AWS::IAM::Role', { + Policies: [ + { + PolicyName: 'AgentCoreRuntimePolicy', + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'BedrockGuardrailAccess', + Effect: 'Allow', + Action: 'bedrock:ApplyGuardrail', + Resource: { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':bedrock:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + ':guardrail/*' + ] + ] + } + } + ]) + } + } + ] + }); + }); + + it('should not use wildcard region for foundation models', () => { + // Verify that the wildcard region pattern is NOT present + const resources = template.findResources('AWS::IAM::Role'); + const roleResource = Object.values(resources)[0] as any; + const policy = roleResource.Properties.Policies[0]; + const bedrockStatement = policy.PolicyDocument.Statement.find( + (stmt: any) => stmt.Sid === 'BedrockModelInvocation' + ); + + expect(bedrockStatement).toBeDefined(); + + // Check that the resource uses specific region + const resource = bedrockStatement.Resource; + const arnParts = resource['Fn::Join'][1]; + // The region should be { Ref: 'AWS::Region' }, not '*' + const regionIndex = arnParts.findIndex((part: any) => part === ':bedrock:'); + expect(arnParts[regionIndex + 1]).toEqual({ Ref: 'AWS::Region' }); + }); + }); + + describe('inference profile support', () => { + let inferenceApp: cdk.App; + let inferenceStack: cdk.Stack; + let inferenceTemplate: Template; + let inferenceAgentExecutionRole: AgentExecutionRole; + let mockCustomResourceLambda: any; + let mockCustomResourceRole: any; + + beforeEach(() => { + inferenceApp = new cdk.App(); + inferenceStack = new cdk.Stack(inferenceApp, 'InferenceTestStack'); + + // Create mock custom resource lambda and role + mockCustomResourceLambda = { + functionArn: 'arn:aws:lambda:us-east-1:123456789012:function:custom-resource' + }; + + mockCustomResourceRole = new iam.Role(inferenceStack, 'MockCustomResourceRole', { + assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com') + }); + + inferenceAgentExecutionRole = new AgentExecutionRole(inferenceStack, 'InferenceAgentExecutionRole', { + useCaseConfigTableName: 'test-config-table' + }); + + // Create a mock condition for testing + const mockCondition = new cdk.CfnCondition(inferenceStack, 'TestCondition', { + expression: cdk.Fn.conditionEquals('Yes', 'Yes') + }); + + // Add inference profile support + inferenceAgentExecutionRole.addInferenceProfileSupport( + mockCustomResourceLambda as any, + mockCustomResourceRole, + 'test-config-table', + 'test-record-key', + mockCondition + ); + + inferenceTemplate = Template.fromStack(inferenceStack); + }); + + it('should create custom resource for model ARN resolution', () => { + inferenceTemplate.hasResourceProperties('Custom::GetModelResourceArns', { + ServiceToken: 'arn:aws:lambda:us-east-1:123456789012:function:custom-resource', + Resource: 'GET_MODEL_RESOURCE_ARNS', + USE_CASE_CONFIG_TABLE_NAME: 'test-config-table', + USE_CASE_CONFIG_RECORD_KEY: 'test-record-key' + }); + }); + + it('should grant custom resource permissions for GetInferenceProfile', () => { + inferenceTemplate.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Action: 'bedrock:GetInferenceProfile', + Resource: { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':bedrock:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + ':inference-profile/*' + ] + ] + } + } + ]) + } + }); + }); + + it('should grant custom resource permissions for DynamoDB GetItem', () => { + inferenceTemplate.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Action: 'dynamodb:GetItem', + Resource: { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':dynamodb:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + ':table/test-config-table' + ] + ] + }, + Condition: { + 'ForAllValues:StringEquals': { + 'dynamodb:LeadingKeys': ['test-record-key'] + } + } + } + ]) + } + }); + }); + + it('should create inference profile model policy with resolved ARNs', () => { + inferenceTemplate.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Effect: 'Allow', + Action: ['bedrock:InvokeModelWithResponseStream', 'bedrock:InvokeModel'], + Resource: { + 'Fn::Split': [ + ',', + { + 'Fn::GetAtt': [Match.stringLikeRegexp('GetModelResourceArns'), 'Arns'] + } + ] + } + } + ] + } + }); + }); + + it('should attach inference profile policy to execution role', () => { + // Verify that the policy is attached to the execution role + const resources = inferenceTemplate.findResources('AWS::IAM::Policy'); + const inferenceProfilePolicy = Object.entries(resources).find(([logicalId, resource]: [string, any]) => + logicalId.includes('InferenceProfileModelPolicy') + ); + + expect(inferenceProfilePolicy).toBeDefined(); + const [, policyResource] = inferenceProfilePolicy!; + expect(policyResource.Properties.Roles).toHaveLength(1); + }); + + it('should return custom resource for dependency management', () => { + // Create a new stack and role to test the return value + const testApp = new cdk.App(); + const testStack = new cdk.Stack(testApp, 'ReturnValueTestStack'); + const testRole = new iam.Role(testStack, 'TestCustomResourceRole', { + assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com') + }); + const testAgentExecutionRole = new AgentExecutionRole(testStack, 'TestAgentExecutionRole', { + useCaseConfigTableName: 'test-config-table' + }); + + // Create a mock condition for testing + const testCondition = new cdk.CfnCondition(testStack, 'TestCondition2', { + expression: cdk.Fn.conditionEquals('Yes', 'Yes') + }); + + const customResource = testAgentExecutionRole.addInferenceProfileSupport( + mockCustomResourceLambda as any, + testRole, + 'test-config-table', + 'test-record-key', + testCondition + ); + + expect(customResource).toBeDefined(); + expect(customResource).toBeInstanceOf(cdk.CustomResource); + }); + + it('should scope DynamoDB permissions with leading keys condition', () => { + // Verify that DynamoDB permissions are scoped to specific record key + const resources = inferenceTemplate.findResources('AWS::IAM::Policy'); + const customResourcePolicies = Object.values(resources).filter((resource: any) => + resource.Properties.PolicyDocument.Statement.some( + (stmt: any) => stmt.Action === 'dynamodb:GetItem' && stmt.Condition + ) + ); + + expect(customResourcePolicies.length).toBeGreaterThan(0); + + const dynamoPolicy = customResourcePolicies[0] as any; + const dynamoStatement = dynamoPolicy.Properties.PolicyDocument.Statement.find( + (stmt: any) => stmt.Action === 'dynamodb:GetItem' + ); + + expect(dynamoStatement.Condition).toEqual({ + 'ForAllValues:StringEquals': { + 'dynamodb:LeadingKeys': ['test-record-key'] + } + }); + }); + }); + + describe('workload identity permissions', () => { + it('should include CreateWorkloadIdentity and GetResourceOauth2Token permissions', () => { + // Verify that the workload identity permissions include all required actions + template.hasResourceProperties('AWS::IAM::Role', { + Policies: [ + { + PolicyName: 'AgentCoreRuntimePolicy', + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'AgentCoreWorkloadIdentity', + Effect: 'Allow', + Action: Match.arrayWith([ + 'bedrock-agentcore:CreateWorkloadIdentity', + 'bedrock-agentcore:GetWorkloadAccessToken', + 'bedrock-agentcore:GetWorkloadAccessTokenForJWT', + 'bedrock-agentcore:GetWorkloadAccessTokenForUserId', + 'bedrock-agentcore:GetResourceOauth2Token' + ]), + Resource: Match.anyValue() + } + ]) + } + } + ] + }); + }); + + it('should scope workload identity permissions to default directory', () => { + const resources = template.findResources('AWS::IAM::Role'); + const roleResource = Object.values(resources)[0] as any; + const policy = roleResource.Properties.Policies[0]; + const workloadIdentityStatement = policy.PolicyDocument.Statement.find( + (stmt: any) => stmt.Sid === 'AgentCoreWorkloadIdentity' + ); + + expect(workloadIdentityStatement).toBeDefined(); + expect(workloadIdentityStatement.Resource).toHaveLength(4); + + // Verify resources include workload identity directory and token vault + const arnStrings = workloadIdentityStatement.Resource.map((resource: any) => { + const arnParts = resource['Fn::Join'][1]; + return arnParts.join(''); + }); + + const hasWorkloadIdentityDir = arnStrings.some( + (arn: string) => + arn.includes('workload-identity-directory/default') && !arn.includes('workload-identity/*') + ); + const hasWorkloadIdentityWildcard = arnStrings.some((arn: string) => + arn.includes('workload-identity-directory/default/workload-identity/*') + ); + const hasTokenVaultBase = arnStrings.some( + (arn: string) => arn.includes('token-vault/default') && !arn.includes('oauth2credentialprovider') + ); + const hasTokenVaultOAuth2 = arnStrings.some((arn: string) => + arn.includes('token-vault/default/oauth2credentialprovider/*') + ); + + expect(hasWorkloadIdentityDir).toBe(true); + expect(hasWorkloadIdentityWildcard).toBe(true); + expect(hasTokenVaultBase).toBe(true); + expect(hasTokenVaultOAuth2).toBe(true); + }); + }); + + describe('OAuth2 token permissions', () => { + it('should include GetResourceOauth2Token in workload identity statement', () => { + // Verify OAuth2 token access is included in the workload identity permissions + template.hasResourceProperties('AWS::IAM::Role', { + Policies: [ + { + PolicyName: 'AgentCoreRuntimePolicy', + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'AgentCoreWorkloadIdentity', + Effect: 'Allow', + Action: Match.arrayWith(['bedrock-agentcore:GetResourceOauth2Token']), + Resource: Match.anyValue() + } + ]) + } + } + ] + }); + }); + + it('should include both workload identity and token vault resources', () => { + const resources = template.findResources('AWS::IAM::Role'); + const roleResource = Object.values(resources)[0] as any; + const policy = roleResource.Properties.Policies[0]; + const workloadIdentityStatement = policy.PolicyDocument.Statement.find( + (stmt: any) => stmt.Sid === 'AgentCoreWorkloadIdentity' + ); + + expect(workloadIdentityStatement).toBeDefined(); + expect(workloadIdentityStatement.Action).toContain('bedrock-agentcore:GetResourceOauth2Token'); + expect(workloadIdentityStatement.Resource).toHaveLength(4); + + // Verify we have both workload-identity and token-vault resources + const arnStrings = workloadIdentityStatement.Resource.map((resource: any) => { + const arnParts = resource['Fn::Join'][1]; + return arnParts.join(''); + }); + + const hasWorkloadIdentityDir = arnStrings.some( + (arn: string) => + arn.includes('workload-identity-directory/default') && !arn.includes('workload-identity/*') + ); + const hasWorkloadIdentityWildcard = arnStrings.some((arn: string) => + arn.includes('workload-identity-directory/default/workload-identity/*') + ); + const hasTokenVaultBase = arnStrings.some( + (arn: string) => arn.includes('token-vault/default') && !arn.includes('oauth2credentialprovider') + ); + const hasTokenVaultOAuth2 = arnStrings.some((arn: string) => + arn.includes('token-vault/default/oauth2credentialprovider/*') + ); + + expect(hasWorkloadIdentityDir).toBe(true); + expect(hasWorkloadIdentityWildcard).toBe(true); + expect(hasTokenVaultBase).toBe(true); + expect(hasTokenVaultOAuth2).toBe(true); + const hasTokenVault = arnStrings.some((arn: string) => + arn.includes('token-vault/default/oauth2credentialprovider/*') + ); + + expect(hasWorkloadIdentityDir).toBe(true); + expect(hasWorkloadIdentityWildcard).toBe(true); + expect(hasTokenVault).toBe(true); + }); + }); + + describe('AgentCore Memory permissions', () => { + it('should create separate memory policy when memoryId is provided', () => { + // The memory policy should be attached to the role, creating a separate AWS::IAM::Policy resource + template.resourceCountIs('AWS::IAM::Policy', 1); + + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Sid: 'AgentCoreMemoryAccess', + Effect: 'Allow', + Action: [ + 'bedrock-agentcore:CreateEvent', + 'bedrock-agentcore:ListEvents', + 'bedrock-agentcore:RetrieveMemoryRecords', + 'bedrock-agentcore:GetEvent' + ], + Resource: { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':bedrock-agentcore:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + ':memory/test_memory_id' + ] + ] + } + } + ] + }, + Roles: [{ Ref: Match.anyValue() }] + }); + }); + + it('should attach memory policy to execution role', () => { + const resources = template.findResources('AWS::IAM::Policy'); + const memoryPolicy = Object.entries(resources).find(([logicalId, resource]: [string, any]) => + logicalId.includes('AgentCoreRuntimeMemoryPolicy') + ); + + expect(memoryPolicy).toBeDefined(); + const [, policyResource] = memoryPolicy!; + expect(policyResource.Properties.Roles).toHaveLength(1); + }); + + it('should not create memory policy when memoryId is not provided', () => { + const noMemoryApp = new cdk.App(); + const noMemoryStack = new cdk.Stack(noMemoryApp, 'NoMemoryTestStack'); + new AgentExecutionRole(noMemoryStack, 'NoMemoryAgentExecutionRole', { + useCaseConfigTableName: 'test-config-table' + }); + const noMemoryTemplate = Template.fromStack(noMemoryStack); + + const resources = noMemoryTemplate.findResources('AWS::IAM::Policy'); + const memoryPolicy = Object.entries(resources).find( + ([logicalId, resource]: [string, any]) => + resource.Properties.PolicyName === 'AgentCoreRuntimeMemoryPolicy' + ); + + expect(memoryPolicy).toBeUndefined(); + }); + }); + + describe('Secrets Manager permissions', () => { + it('should include scoped Secrets Manager permissions', () => { + // Verify Secrets Manager permissions are scoped to bedrock-agentcore-identity! prefix + template.hasResourceProperties('AWS::IAM::Role', { + Policies: [ + { + PolicyName: 'AgentCoreRuntimePolicy', + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'SecretsManagerAccess', + Effect: 'Allow', + Action: 'secretsmanager:GetSecretValue', + Resource: Match.anyValue() + } + ]) + } + } + ] + }); + }); + + it('should scope Secrets Manager to bedrock-agentcore-identity prefix', () => { + const resources = template.findResources('AWS::IAM::Role'); + const roleResource = Object.values(resources)[0] as any; + const policy = roleResource.Properties.Policies[0]; + const secretsManagerStatement = policy.PolicyDocument.Statement.find( + (stmt: any) => stmt.Sid === 'SecretsManagerAccess' + ); + + expect(secretsManagerStatement).toBeDefined(); + expect(secretsManagerStatement.Resource).toBeDefined(); + + // Verify the resource includes the bedrock-agentcore-identity! prefix + const resource = secretsManagerStatement.Resource; + const arnParts = resource['Fn::Join'][1]; + const arnString = arnParts.join(''); + expect(arnString).toContain('secret:bedrock-agentcore-identity!'); + }); + + it('should not use wildcard for all secrets', () => { + // Verify that Secrets Manager permissions are NOT using a full wildcard (secret:*) + const resources = template.findResources('AWS::IAM::Role'); + const roleResource = Object.values(resources)[0] as any; + const policy = roleResource.Properties.Policies[0]; + const secretsManagerStatement = policy.PolicyDocument.Statement.find( + (stmt: any) => stmt.Sid === 'SecretsManagerAccess' + ); + + expect(secretsManagerStatement).toBeDefined(); + expect(secretsManagerStatement.Resource).toBeDefined(); + + // Verify the resource does NOT end with just 'secret:*' + const resource = secretsManagerStatement.Resource; + const arnParts = resource['Fn::Join'][1]; + const lastPart = arnParts[arnParts.length - 1]; + + // Should have the prefix, not just a wildcard + expect(lastPart).not.toBe('secret:*'); + expect(lastPart).toContain('bedrock-agentcore-identity!'); + }); + + it('should follow least privilege with scoped secret access', () => { + // Verify that only GetSecretValue is granted, not other secret operations + template.hasResourceProperties('AWS::IAM::Role', { + Policies: [ + { + PolicyName: 'AgentCoreRuntimePolicy', + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'SecretsManagerAccess', + Effect: 'Allow', + Action: 'secretsmanager:GetSecretValue', + Resource: Match.anyValue() + } + ]) + } + } + ] + }); + + // Verify no write permissions are granted + const resources = template.findResources('AWS::IAM::Role'); + const roleResource = Object.values(resources)[0] as any; + const policy = roleResource.Properties.Policies[0]; + const secretsManagerStatement = policy.PolicyDocument.Statement.find( + (stmt: any) => stmt.Sid === 'SecretsManagerAccess' + ); + + expect(secretsManagerStatement.Action).toEqual('secretsmanager:GetSecretValue'); + }); + }); +}); diff --git a/source/infrastructure/test/use-case-stacks/agent-core/components/agent-invocation-lambda.test.ts b/source/infrastructure/test/use-case-stacks/agent-core/components/agent-invocation-lambda.test.ts new file mode 100644 index 00000000..3e87474b --- /dev/null +++ b/source/infrastructure/test/use-case-stacks/agent-core/components/agent-invocation-lambda.test.ts @@ -0,0 +1,181 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as lambda from 'aws-cdk-lib/aws-lambda'; +import { Match, Template } from 'aws-cdk-lib/assertions'; +import { AgentInvocationLambda } from '../../../../lib/use-case-stacks/agent-core/components/agent-invocation-lambda'; +import { LANGCHAIN_LAMBDA_PYTHON_RUNTIME } from '../../../../lib/utils/constants'; + +describe('AgentInvocationLambda', () => { + let app: cdk.App; + let stack: cdk.Stack; + let template: Template; + let agentInvocationLambda: AgentInvocationLambda; + + const mockProps = { + agentRuntimeArn: 'arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/test-runtime', + useCaseUUID: 'test-uuid-1234' + }; + + beforeEach(() => { + app = new cdk.App(); + stack = new cdk.Stack(app, 'TestStack'); + agentInvocationLambda = new AgentInvocationLambda(stack, 'TestAgentInvocationLambda', mockProps); + template = Template.fromStack(stack); + }); + + describe('createLambdaFunction', () => { + it('should create Lambda function with correct properties', () => { + template.hasResourceProperties('AWS::Lambda::Function', { + Handler: 'handler.lambda_handler', + Runtime: LANGCHAIN_LAMBDA_PYTHON_RUNTIME.name, + MemorySize: 1024, + Timeout: 900, + Environment: { + Variables: { + POWERTOOLS_SERVICE_NAME: 'AGENT_CORE_INVOCATION', + AGENT_RUNTIME_ARN: mockProps.agentRuntimeArn, + USE_CASE_UUID: mockProps.useCaseUUID + } + } + }); + }); + + it('should create Lambda function with correct code location', () => { + template.hasResourceProperties('AWS::Lambda::Function', { + Code: { + S3Bucket: Match.anyValue(), + S3Key: Match.anyValue() // S3 key is a hash, not the directory name + } + }); + }); + + it('should create Lambda function with correct runtime and handler', () => { + template.hasResourceProperties('AWS::Lambda::Function', { + Runtime: 'python3.13', + Handler: 'handler.lambda_handler', + MemorySize: 1024, + Timeout: 900 + }); + }); + }); + + describe('createInvocationRole', () => { + it('should create IAM role with correct assume role policy', () => { + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: [ + { + Effect: 'Allow', + Principal: { + Service: 'lambda.amazonaws.com' + }, + Action: 'sts:AssumeRole' + } + ] + } + }); + }); + + it('should create role with AgentCore invocation permissions', () => { + // The addToPolicy method creates separate AWS::IAM::Policy resources + // Let's check for a separate policy resource that contains AgentCore permissions + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'AgentCoreRuntimeInvocation', + Effect: 'Allow', + Action: [ + 'bedrock-agentcore:InvokeAgentRuntime', + 'bedrock-agentcore:InvokeAgentRuntimeForUser' + ], + Resource: Match.anyValue() + } + ]) + } + }); + }); + + it('should create role with WebSocket permissions', () => { + // Check for a separate policy resource that contains WebSocket permissions + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'WebSocketManagement', + Effect: 'Allow', + Action: 'execute-api:ManageConnections', + Resource: Match.anyValue() + } + ]) + } + }); + }); + + it('should create role with basic Lambda permissions from createDefaultLambdaRole', () => { + // The createDefaultLambdaRole utility adds CloudWatch Logs and X-Ray permissions + // We just verify the role exists and has the correct assume role policy + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Principal: { + Service: 'lambda.amazonaws.com' + }, + Action: 'sts:AssumeRole' + } + ]) + } + }); + }); + }); + + describe('VPC deployment limitations', () => { + it('should not include VPC-specific permissions as Agent Core v4.0.0 does not support VPC deployments', () => { + // Agent Core v4.0.0 runs in non-VPC mode only + // Verify that no VPC-specific permissions are present in any policy + const policies = template.findResources('AWS::IAM::Policy'); + + // Check that none of the policies contain VPC-specific permissions + Object.values(policies).forEach((policy: any) => { + const statements = policy.Properties?.PolicyDocument?.Statement || []; + statements.forEach((statement: any) => { + // Ensure no VPC-related actions are present + const actions = Array.isArray(statement.Action) ? statement.Action : [statement.Action]; + const hasVpcActions = actions.some( + (action: string) => + action.includes('ec2:CreateNetworkInterface') || + action.includes('ec2:DescribeNetworkInterfaces') || + action.includes('ec2:DeleteNetworkInterface') || + action.includes('ec2:AttachNetworkInterface') || + action.includes('ec2:DetachNetworkInterface') + ); + expect(hasVpcActions).toBe(false); + }); + }); + }); + }); + + describe('lambda function properties', () => { + it('should expose the Lambda function', () => { + expect(agentInvocationLambda.function).toBeInstanceOf(lambda.Function); + }); + + it('should have correct function description', () => { + template.hasResourceProperties('AWS::Lambda::Function', { + Description: 'Lambda for AgentCore Runtime invocation via WebSocket with streaming support' + }); + }); + }); + + describe('CDK Nag suppressions', () => { + it('should have appropriate CDK Nag suppressions', () => { + // This test verifies that the Lambda function has been created with proper suppressions + // The actual suppressions are added via NagSuppressions.addResourceSuppressions + expect(agentInvocationLambda.function).toBeDefined(); + }); + }); +}); diff --git a/source/infrastructure/test/use-case-stacks/agent-core/components/agent-memory-deployment.test.ts b/source/infrastructure/test/use-case-stacks/agent-core/components/agent-memory-deployment.test.ts new file mode 100644 index 00000000..70010e3c --- /dev/null +++ b/source/infrastructure/test/use-case-stacks/agent-core/components/agent-memory-deployment.test.ts @@ -0,0 +1,106 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as lambda from 'aws-cdk-lib/aws-lambda'; +import { Template } from 'aws-cdk-lib/assertions'; +import { AgentMemoryDeployment } from '../../../../lib/use-case-stacks/agent-core/components/agent-memory-deployment'; + +describe('AgentMemoryDeployment', () => { + let stack: cdk.Stack; + let mockLambda: lambda.Function; + + beforeEach(() => { + stack = new cdk.Stack(); + mockLambda = new lambda.Function(stack, 'MockLambda', { + runtime: lambda.Runtime.PYTHON_3_11, + handler: 'index.handler', + code: lambda.Code.fromInline('def handler(event, context): pass') + }); + }); + + test('creates memory deployment with required properties', () => { + const memoryDeployment = new AgentMemoryDeployment(stack, 'TestMemoryDeployment', { + customResourceLambda: mockLambda, + agentRuntimeName: 'test-memory', + enableLongTermMemory: 'Yes' + }); + + const template = Template.fromStack(stack); + + // Verify custom resource is created + template.hasResourceProperties('Custom::AgentCoreMemory', { + Resource: 'DEPLOY_AGENT_CORE_MEMORY', + AgentRuntimeName: 'test-memory', + EnableLongTermMemory: 'Yes' + }); + + // Verify IAM policy is created + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Effect: 'Allow', + Action: [ + 'bedrock-agentcore:CreateMemory', + 'bedrock-agentcore:UpdateMemory', + 'bedrock-agentcore:DeleteMemory', + 'bedrock-agentcore:GetMemory', + 'bedrock-agentcore:ListMemories' + ], + Resource: { + 'Fn::Join': [ + '', + [ + 'arn:', + { 'Ref': 'AWS::Partition' }, + ':bedrock-agentcore:', + { 'Ref': 'AWS::Region' }, + ':', + { 'Ref': 'AWS::AccountId' }, + ':memory/*' + ] + ] + } + } + ] + } + }); + + expect(memoryDeployment.customResource).toBeDefined(); + expect(memoryDeployment.managementPolicy).toBeDefined(); + }); + + test('creates memory deployment with long-term memory disabled', () => { + new AgentMemoryDeployment(stack, 'TestMemoryDeployment', { + customResourceLambda: mockLambda, + agentRuntimeName: 'test-memory', + enableLongTermMemory: 'No' + }); + + const template = Template.fromStack(stack); + + template.hasResourceProperties('Custom::AgentCoreMemory', { + EnableLongTermMemory: 'No' + }); + }); + + test('policy is attached to lambda role', () => { + new AgentMemoryDeployment(stack, 'TestMemoryDeployment', { + customResourceLambda: mockLambda, + agentRuntimeName: 'test-memory', + enableLongTermMemory: 'Yes' + }); + + const template = Template.fromStack(stack); + + // Verify policy is attached to the lambda's role + template.hasResourceProperties('AWS::IAM::Policy', { + Roles: [ + { + Ref: 'MockLambdaServiceRoleE789E511' + } + ] + }); + }); +}); diff --git a/source/infrastructure/test/use-case-stacks/agent-core/components/agent-runtime-deployment.test.ts b/source/infrastructure/test/use-case-stacks/agent-core/components/agent-runtime-deployment.test.ts new file mode 100644 index 00000000..1d8aac0c --- /dev/null +++ b/source/infrastructure/test/use-case-stacks/agent-core/components/agent-runtime-deployment.test.ts @@ -0,0 +1,383 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as iam from 'aws-cdk-lib/aws-iam'; +import * as lambda from 'aws-cdk-lib/aws-lambda'; +import { Match, Template } from 'aws-cdk-lib/assertions'; +import { AgentRuntimeDeployment } from '../../../../lib/use-case-stacks/agent-core/components/agent-runtime-deployment'; + +describe('AgentRuntimeDeployment', () => { + let app: cdk.App; + let stack: cdk.Stack; + let template: Template; + let agentRuntimeDeployment: AgentRuntimeDeployment; + let mockCustomResourceLambda: lambda.Function; + let mockAgentExecutionRole: iam.Role; + + const mockProps = { + agentRuntimeName: 'test-agent-runtime', + agentImageUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/test-agent:latest', + useCaseUUID: 'test-uuid-1234', + useCaseConfigTableName: 'test-config-table', + useCaseConfigRecordKey: 'test-config-key', + cognitoUserPoolId: 'us-east-1_TEST123', + additionalProperties: { + EnableLongTermMemory: 'true', + UseCaseType: 'Agent' + } + }; + + beforeEach(() => { + app = new cdk.App(); + stack = new cdk.Stack(app, 'TestStack'); + + // Create mock dependencies + mockCustomResourceLambda = new lambda.Function(stack, 'MockCustomResourceLambda', { + runtime: lambda.Runtime.PYTHON_3_11, + handler: 'index.handler', + code: lambda.Code.fromInline('def handler(event, context): pass') + }); + + mockAgentExecutionRole = new iam.Role(stack, 'MockAgentExecutionRole', { + assumedBy: new iam.ServicePrincipal('bedrock-agentcore.amazonaws.com') + }); + + agentRuntimeDeployment = new AgentRuntimeDeployment(stack, 'TestAgentRuntimeDeployment', { + ...mockProps, + customResourceLambda: mockCustomResourceLambda, + agentExecutionRole: mockAgentExecutionRole + }); + + template = Template.fromStack(stack); + }); + + describe('createCustomResource', () => { + it('should create custom resource with correct properties', () => { + template.hasResourceProperties('Custom::AgentCoreRuntime', { + ServiceToken: { + 'Fn::GetAtt': [Match.stringLikeRegexp('MockCustomResourceLambda.*'), 'Arn'] + }, + Resource: 'DEPLOY_AGENT_CORE', + AgentRuntimeName: mockProps.agentRuntimeName, + ExecutionRoleArn: { + 'Fn::GetAtt': [Match.stringLikeRegexp('MockAgentExecutionRole.*'), 'Arn'] + }, + AgentImageUri: mockProps.agentImageUri, + UseCaseUUID: mockProps.useCaseUUID, + UseCaseConfigTableName: mockProps.useCaseConfigTableName, + UseCaseConfigRecordKey: mockProps.useCaseConfigRecordKey, + CognitoUserPoolId: mockProps.cognitoUserPoolId, + EnableLongTermMemory: mockProps.additionalProperties!.EnableLongTermMemory, + UseCaseType: mockProps.additionalProperties!.UseCaseType + }); + }); + + it('should create custom resource with correct resource type', () => { + template.hasResourceProperties('Custom::AgentCoreRuntime', { + Resource: 'DEPLOY_AGENT_CORE' + }); + }); + }); + + describe('createManagementPolicy', () => { + it('should create IAM policy with AgentCore management permissions', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'AgentCoreRuntimeManagement', + Effect: 'Allow', + Action: [ + 'bedrock-agentcore:CreateAgentRuntime', + 'bedrock-agentcore:CreateAgentRuntimeEndpoint', + 'bedrock-agentcore:CreateWorkloadIdentity', + 'bedrock-agentcore:UpdateAgentRuntime', + 'bedrock-agentcore:DeleteAgentRuntime', + 'bedrock-agentcore:GetAgentRuntime', + 'bedrock-agentcore:ListAgentRuntimes', + 'bedrock-agentcore:ListAgentRuntimeEndpoints', + 'bedrock-agentcore:ListAgentRuntimeVersions' + ], + Resource: Match.arrayWith([ + { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':bedrock-agentcore:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + ':runtime/*' + ] + ] + }, + { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':bedrock-agentcore:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + ':workload-identity-directory/*' + ] + ] + } + ]) + } + ]) + } + }); + }); + + it('should create IAM policy with DynamoDB permissions scoped to specific table', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'DynamoDBConfigUpdate', + Effect: 'Allow', + Action: ['dynamodb:UpdateItem', 'dynamodb:GetItem'], + Resource: { + 'Fn::Join': [ + '', + [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':dynamodb:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + `:table/${mockProps.useCaseConfigTableName}` + ] + ] + } + } + ]) + } + }); + }); + + it('should create IAM policy with IAM pass role permissions', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'PassRoleToAgentCore', + Effect: 'Allow', + Action: 'iam:PassRole', + Resource: { + 'Fn::GetAtt': [Match.stringLikeRegexp('MockAgentExecutionRole.*'), 'Arn'] + } + } + ]) + } + }); + }); + + it('should create IAM policy with correct statement structure', () => { + // The policy should have exactly 4 statements: AgentCore runtime, ECR, DynamoDB, and PassRole + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Sid: 'AgentCoreRuntimeManagement', + Effect: 'Allow', + Action: [ + 'bedrock-agentcore:CreateAgentRuntime', + 'bedrock-agentcore:CreateAgentRuntimeEndpoint', + 'bedrock-agentcore:CreateWorkloadIdentity', + 'bedrock-agentcore:UpdateAgentRuntime', + 'bedrock-agentcore:DeleteAgentRuntime', + 'bedrock-agentcore:GetAgentRuntime', + 'bedrock-agentcore:ListAgentRuntimes', + 'bedrock-agentcore:ListAgentRuntimeEndpoints', + 'bedrock-agentcore:ListAgentRuntimeVersions' + ], + Resource: Match.anyValue() + }, + { + Sid: 'ECRPullThroughCache', + Effect: 'Allow', + Action: [ + 'ecr:DescribeRepositories', + 'ecr:BatchGetImage', + 'ecr:DescribeImages', + 'ecr:CreateRepository', + 'ecr:BatchImportUpstreamImage' + ], + Resource: Match.anyValue() + }, + { + Sid: 'DynamoDBConfigUpdate', + Effect: 'Allow', + Action: ['dynamodb:UpdateItem', 'dynamodb:GetItem'], + Resource: Match.anyValue() + }, + { + Sid: 'PassRoleToAgentCore', + Effect: 'Allow', + Action: 'iam:PassRole', + Resource: Match.anyValue() + } + ] + } + }); + }); + + it('should attach policy to custom resource Lambda role', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + Roles: [ + { + Ref: Match.stringLikeRegexp('MockCustomResourceLambda.*Role.*') + } + ] + }); + }); + }); + + describe('getAgentRuntimeArn', () => { + it('should return custom resource attribute for agent runtime ARN', () => { + const agentRuntimeArn = agentRuntimeDeployment.getAgentRuntimeArn(); + expect(agentRuntimeArn).toBeDefined(); + + // Verify the custom resource has the AgentRuntimeArn attribute + template.hasResource('Custom::AgentCoreRuntime', { + Properties: Match.objectLike({ + Resource: 'DEPLOY_AGENT_CORE' + }) + }); + }); + }); + + describe('with different configurations', () => { + it('should handle different additional properties', () => { + const appDifferent = new cdk.App(); + const stackWithDifferentMemory = new cdk.Stack(appDifferent, 'TestStackDifferentMemory'); + + const mockLambda = new lambda.Function(stackWithDifferentMemory, 'MockLambda', { + runtime: lambda.Runtime.PYTHON_3_11, + handler: 'index.handler', + code: lambda.Code.fromInline('def handler(event, context): pass') + }); + + const mockRole = new iam.Role(stackWithDifferentMemory, 'MockRole', { + assumedBy: new iam.ServicePrincipal('bedrock-agentcore.amazonaws.com') + }); + + new AgentRuntimeDeployment(stackWithDifferentMemory, 'TestAgentRuntimeDeploymentDifferent', { + agentRuntimeName: 'different-agent-runtime', + agentImageUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/different-agent:latest', + useCaseUUID: 'different-uuid-5678', + useCaseConfigTableName: 'different-config-table', + useCaseConfigRecordKey: 'different-config-key', + cognitoUserPoolId: 'us-east-1_DIFFERENT', + customResourceLambda: mockLambda, + agentExecutionRole: mockRole, + additionalProperties: { + EnableLongTermMemory: 'false', + UseCaseType: 'Workflow', + CustomProperty: 'custom-value' + } + }); + + const templateDifferent = Template.fromStack(stackWithDifferentMemory); + + templateDifferent.hasResourceProperties('Custom::AgentCoreRuntime', { + CognitoUserPoolId: 'us-east-1_DIFFERENT', + EnableLongTermMemory: 'false', + UseCaseType: 'Workflow', + CustomProperty: 'custom-value' + }); + }); + }); + + describe('CDK Nag suppressions', () => { + it('should have appropriate CDK Nag suppressions for wildcard permissions', () => { + // This test verifies that the custom resource has been created with proper suppressions + // The actual suppressions are added via NagSuppressions.addResourceSuppressions + expect(agentRuntimeDeployment.getAgentRuntimeArn()).toBeDefined(); + }); + }); + + describe('updateMultimodalProperties', () => { + it('should initialize custom resource with empty multimodal properties', () => { + template.hasResourceProperties('Custom::AgentCoreRuntime', { + Resource: 'DEPLOY_AGENT_CORE', + MultimodalDataMetadataTable: '', // initially empty string + MultimodalDataBucket: '' // initially empty string + }); + }); + + it('should update multimodal properties after calling updateMultimodalProperties', () => { + const testTableName = 'test-multimodal-metadata-table'; + const testBucketName = 'test-multimodal-data-bucket'; + + agentRuntimeDeployment.updateMultimodalProperties(testTableName, testBucketName); + const cfnCustomResource = agentRuntimeDeployment.customResource.node.defaultChild as cdk.CfnCustomResource; + + expect(cfnCustomResource.getAtt('MultimodalDataMetadataTable')).toBeDefined(); + expect(cfnCustomResource.getAtt('MultimodalDataBucket')).toBeDefined(); + expect(agentRuntimeDeployment.updateMultimodalProperties).toBeDefined(); + }); + + it('should allow multiple calls to updateMultimodalProperties with latest values taking precedence', () => { + const firstTableName = 'first-table-name'; + const firstBucketName = 'first-bucket-name'; + const finalTableName = 'final-table-name'; + const finalBucketName = 'final-bucket-name'; + + agentRuntimeDeployment.updateMultimodalProperties(firstTableName, firstBucketName); + expect(() => { + agentRuntimeDeployment.updateMultimodalProperties(firstTableName, firstBucketName); + }).not.toThrow(); + + expect(() => { + agentRuntimeDeployment.updateMultimodalProperties(finalTableName, finalBucketName); + }).not.toThrow(); + }); + + it('should preserve other custom resource properties when updating multimodal properties', () => { + const testTableName = 'test-multimodal-metadata-table'; + const testBucketName = 'test-multimodal-data-bucket'; + + agentRuntimeDeployment.updateMultimodalProperties(testTableName, testBucketName); + + // Verify that the original custom resource still exists and has the base properties + template.hasResourceProperties('Custom::AgentCoreRuntime', { + Resource: 'DEPLOY_AGENT_CORE', + AgentRuntimeName: mockProps.agentRuntimeName, + AgentImageUri: mockProps.agentImageUri, + UseCaseUUID: mockProps.useCaseUUID, + UseCaseConfigTableName: mockProps.useCaseConfigTableName, + UseCaseConfigRecordKey: mockProps.useCaseConfigRecordKey, + CognitoUserPoolId: mockProps.cognitoUserPoolId, + EnableLongTermMemory: mockProps.additionalProperties!.EnableLongTermMemory, + UseCaseType: mockProps.additionalProperties!.UseCaseType + }); + + expect(agentRuntimeDeployment.updateMultimodalProperties).toBeDefined(); + }); + }); + + describe('error handling', () => { + it('should handle custom resource creation with all required properties', () => { + // Verify all required properties are present in the custom resource + template.hasResourceProperties('Custom::AgentCoreRuntime', { + Resource: 'DEPLOY_AGENT_CORE', + AgentRuntimeName: Match.anyValue(), + ExecutionRoleArn: Match.anyValue(), + AgentImageUri: Match.anyValue(), + UseCaseUUID: Match.anyValue(), + UseCaseConfigTableName: Match.anyValue(), + UseCaseConfigRecordKey: Match.anyValue(), + CognitoUserPoolId: Match.anyValue() + }); + }); + }); +}); diff --git a/source/infrastructure/test/use-case-stacks/agent-core/components/ecr-pull-through-cache.test.ts b/source/infrastructure/test/use-case-stacks/agent-core/components/ecr-pull-through-cache.test.ts new file mode 100644 index 00000000..314358d4 --- /dev/null +++ b/source/infrastructure/test/use-case-stacks/agent-core/components/ecr-pull-through-cache.test.ts @@ -0,0 +1,473 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as lambda from 'aws-cdk-lib/aws-lambda'; +import { Match, Template } from 'aws-cdk-lib/assertions'; +import { ECRPullThroughCache } from '../../../../lib/use-case-stacks/agent-core/components/ecr-pull-through-cache'; +import { + ECR_UPSTREAM_REGISTRY, + ECR_UPSTREAM_REGISTRY_URL, + ECR_UPSTREAM_REPOSITORY_PREFIX +} from '../../../../lib/utils/constants'; +import { + resolveUpstreamRegistryUrl, + resolveUpstreamRepositoryPrefix +} from '../../../../lib/use-case-stacks/agent-core/utils/image-uri-resolver'; + +describe('ECRPullThroughCache', () => { + let app: cdk.App; + let stack: cdk.Stack; + let mockLambda: lambda.Function; + let originalEnvVars: { [key: string]: string | undefined }; + + beforeEach(() => { + app = new cdk.App(); + stack = new cdk.Stack(app, 'TestStack'); + + // Save original environment variables + originalEnvVars = { + PUBLIC_ECR_REGISTRY: process.env.PUBLIC_ECR_REGISTRY, + PUBLIC_ECR_REPOSITORY_PREFIX: process.env.PUBLIC_ECR_REPOSITORY_PREFIX, + PUBLIC_ECR_TAG: process.env.PUBLIC_ECR_TAG, + DIST_OUTPUT_BUCKET: process.env.DIST_OUTPUT_BUCKET, + VERSION: process.env.VERSION, + DIST_ACCOUNT_ID: process.env.DIST_ACCOUNT_ID, + SOLUTION_ID: process.env.SOLUTION_ID, + SOLUTION_NAME: process.env.SOLUTION_NAME + }; + + // Create mock lambda function + mockLambda = new lambda.Function(stack, 'MockLambda', { + runtime: lambda.Runtime.PYTHON_3_13, + handler: 'index.handler', + code: lambda.Code.fromInline('def handler(event, context): pass') + }); + }); + + afterEach(() => { + // Restore original environment variables + Object.keys(originalEnvVars).forEach((key) => { + if (originalEnvVars[key] !== undefined) { + process.env[key] = originalEnvVars[key]; + } else { + delete process.env[key]; + } + }); + }); + + describe('constructor', () => { + it('should create ECR pull-through cache rule with custom resource for UUID-based prefix', () => { + const ecrCache = new ECRPullThroughCache(stack, 'TestECRCache', { + gaabVersion: '4.0.0', + customResourceLambda: mockLambda, + useCaseShortId: 'a1b2c3d4' + }); + + const template = Template.fromStack(stack); + + // Should create custom resource - check for the actual resource type + template.hasResourceProperties('Custom::GenEcrRepoPrefix', { + ServiceToken: { + 'Fn::GetAtt': [Match.stringLikeRegexp('MockLambda.*'), 'Arn'] + }, + Resource: 'GEN_ECR_REPO_PREFIX', + UseCaseShortId: 'a1b2c3d4' + }); + + // Should create ECR pull-through cache rule with environment-aware values + template.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + EcrRepositoryPrefix: { + 'Fn::GetAtt': [Match.stringLikeRegexp('.*EcrRepoPrefixGenerator.*'), 'EcrRepoPrefix'] + }, + UpstreamRegistry: ECR_UPSTREAM_REGISTRY, + UpstreamRegistryUrl: resolveUpstreamRegistryUrl(), + UpstreamRepositoryPrefix: resolveUpstreamRepositoryPrefix() + }); + + expect(ecrCache).toBeDefined(); + }); + + it('should create ECR pull-through cache rule with custom resource for stack name-based prefix', () => { + const ecrCache = new ECRPullThroughCache(stack, 'TestECRCache2', { + gaabVersion: '4.0.0', + customResourceLambda: mockLambda + // No useCaseShortId - will use stack name + }); + + const template = Template.fromStack(stack); + + // Should create custom resource with stack name - check for the actual resource type + template.hasResourceProperties('Custom::GenEcrRepoPrefix', { + ServiceToken: { + 'Fn::GetAtt': [Match.stringLikeRegexp('MockLambda.*'), 'Arn'] + }, + Resource: 'GEN_ECR_REPO_PREFIX', + StackName: { + Ref: 'AWS::StackName' + } + }); + + expect(ecrCache).toBeDefined(); + }); + + it('should require custom resource lambda', () => { + expect(() => { + new ECRPullThroughCache(stack, 'TestECRError', { + gaabVersion: '4.0.0' + // Missing required customResourceLambda + } as any); + }).toThrow(); + }); + }); + + describe('cache rule naming', () => { + it('should use GaabAgentImageCache for standalone deployments', () => { + new ECRPullThroughCache(stack, 'TestECRCache', { + gaabVersion: '4.0.0', + customResourceLambda: mockLambda, + useCaseShortId: 'a1b2c3d4' + }); + + const template = Template.fromStack(stack); + + // Should create cache rule with standalone naming + template.hasResource('AWS::ECR::PullThroughCacheRule', {}); + + // Check that the logical ID contains the expected pattern + const resources = template.findResources('AWS::ECR::PullThroughCacheRule'); + const logicalIds = Object.keys(resources); + expect(logicalIds.some((id) => id.includes('GaabAgentImageCache'))).toBe(true); + }); + + it('should use SharedAgentImageCache for shared deployments', () => { + new ECRPullThroughCache(stack, 'TestECRCache', { + gaabVersion: '4.0.0', + customResourceLambda: mockLambda + // No useCaseShortId - shared cache + }); + + const template = Template.fromStack(stack); + + // Check that the logical ID contains the expected pattern + const resources = template.findResources('AWS::ECR::PullThroughCacheRule'); + const logicalIds = Object.keys(resources); + expect(logicalIds.some((id) => id.includes('SharedAgentImageCache'))).toBe(true); + }); + }); + + describe('image URI methods', () => { + let ecrCache: ECRPullThroughCache; + + beforeEach(() => { + ecrCache = new ECRPullThroughCache(stack, 'TestECRCache', { + gaabVersion: '4.0.0', + customResourceLambda: mockLambda, + useCaseShortId: 'a1b2c3d4' + }); + }); + + it('should provide getCachedImageUri method', () => { + const imageUri = ecrCache.getCachedImageUri(); + expect(imageUri).toBeDefined(); + expect(typeof imageUri).toBe('string'); + }); + + it('should provide getCachedWorkflowImageUri method', () => { + const workflowUri = ecrCache.getCachedWorkflowImageUri(); + expect(workflowUri).toBeDefined(); + expect(typeof workflowUri).toBe('string'); + }); + + it('should provide getRepositoryPrefix method', () => { + const prefix = ecrCache.getRepositoryPrefix(); + expect(prefix).toBeDefined(); + expect(typeof prefix).toBe('string'); + }); + }); + + describe('repository prefix handling', () => { + it('should create custom resource for UUID-based prefix when useCaseShortId provided', () => { + const ecrTest = new ECRPullThroughCache(stack, 'TestECRUUID', { + gaabVersion: '4.0.0', + customResourceLambda: mockLambda, + useCaseShortId: 'a1b2c3d4' + }); + + // Should create the construct without error + expect(ecrTest).toBeDefined(); + }); + + it('should create custom resource for stack name-based prefix when no useCaseShortId', () => { + const ecrTest = new ECRPullThroughCache(stack, 'TestECRStackName', { + gaabVersion: '4.0.0', + customResourceLambda: mockLambda + // No useCaseShortId - will use stack name + }); + + // Should create the construct without error + expect(ecrTest).toBeDefined(); + }); + }); + + describe('CI/CD environment variable support', () => { + let originalPublicEcrRegistry: string | undefined; + let originalPublicEcrTag: string | undefined; + + beforeEach(() => { + // Save original environment variables + originalPublicEcrRegistry = process.env.PUBLIC_ECR_REGISTRY; + originalPublicEcrTag = process.env.PUBLIC_ECR_TAG; + }); + + afterEach(() => { + // Restore original environment variables + if (originalPublicEcrRegistry !== undefined) { + process.env.PUBLIC_ECR_REGISTRY = originalPublicEcrRegistry; + } else { + delete process.env.PUBLIC_ECR_REGISTRY; + } + + if (originalPublicEcrTag !== undefined) { + process.env.PUBLIC_ECR_TAG = originalPublicEcrTag; + } else { + delete process.env.PUBLIC_ECR_TAG; + } + }); + + describe('integration with pull-through cache rule', () => { + it('should use environment-aware registry URL in cache rule', () => { + process.env.PUBLIC_ECR_REGISTRY = 'custom.registry.aws'; + + new ECRPullThroughCache(stack, 'TestECREnvRegistry', { + gaabVersion: '4.0.0', + customResourceLambda: mockLambda, + useCaseShortId: 'test123' + }); + + const template = Template.fromStack(stack); + + // Should use the custom registry URL + template.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + UpstreamRegistryUrl: 'custom.registry.aws' + }); + }); + + it('should use default registry URL when environment variable not set', () => { + delete process.env.PUBLIC_ECR_REGISTRY; + + new ECRPullThroughCache(stack, 'TestECRDefaultRegistry', { + gaabVersion: '4.0.0', + customResourceLambda: mockLambda, + useCaseShortId: 'test456' + }); + + const template = Template.fromStack(stack); + + // Should use the default registry URL + template.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + UpstreamRegistryUrl: ECR_UPSTREAM_REGISTRY_URL + }); + }); + + it('should handle CI/CD environment with DIST_OUTPUT_BUCKET', () => { + // Simulate actual CI/CD environment values + process.env.DIST_OUTPUT_BUCKET = 'bucket'; + process.env.PUBLIC_ECR_REGISTRY = 'registry'; + process.env.PUBLIC_ECR_TAG = 'tag'; + + new ECRPullThroughCache(stack, 'TestECRCICD', { + gaabVersion: '4.0.0', + customResourceLambda: mockLambda, + useCaseShortId: 'cicd123' + }); + + const template = Template.fromStack(stack); + + // Should use the CI/CD registry URL (exactly as set in pipeline) + template.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + UpstreamRegistryUrl: 'registry' + }); + }); + }); + + describe('environment detection', () => { + it('should work in local environment (no DIST_OUTPUT_BUCKET)', () => { + delete process.env.DIST_OUTPUT_BUCKET; + delete process.env.PUBLIC_ECR_REGISTRY; + delete process.env.PUBLIC_ECR_REPOSITORY_PREFIX; + + const ecrCache = new ECRPullThroughCache(stack, 'TestECRLocal', { + gaabVersion: '4.0.0', + customResourceLambda: mockLambda, + useCaseShortId: 'local123' + }); + + const template = Template.fromStack(stack); + + // Should use default values in local environment + template.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + UpstreamRegistryUrl: ECR_UPSTREAM_REGISTRY_URL, + UpstreamRepositoryPrefix: ECR_UPSTREAM_REPOSITORY_PREFIX + }); + + expect(ecrCache).toBeDefined(); + }); + + it('should work in CI/CD environment (with DIST_OUTPUT_BUCKET)', () => { + // Use actual CI/CD pipeline environment values + process.env.DIST_OUTPUT_BUCKET = 'bucket'; + process.env.PUBLIC_ECR_REGISTRY = 'registry'; + process.env.PUBLIC_ECR_TAG = 'tag'; + const ecrCache = new ECRPullThroughCache(stack, 'TestECRCICD2', { + gaabVersion: '4.0.0', + customResourceLambda: mockLambda, + useCaseShortId: 'cicd456' + }); + + const template = Template.fromStack(stack); + + // Should use CI/CD environment values + template.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + UpstreamRegistryUrl: 'registry', + UpstreamRepositoryPrefix: 'aws-solutions' + }); + + expect(ecrCache).toBeDefined(); + }); + }); + }); + + describe('environment-specific behavior validation', () => { + it('should validate resolver functions work correctly in different environments', () => { + // Test local environment + delete process.env.PUBLIC_ECR_REGISTRY; + delete process.env.PUBLIC_ECR_REPOSITORY_PREFIX; + delete process.env.DIST_OUTPUT_BUCKET; + delete process.env.PUBLIC_ECR_TAG; + + expect(resolveUpstreamRegistryUrl()).toBe(ECR_UPSTREAM_REGISTRY_URL); + expect(resolveUpstreamRepositoryPrefix()).toBe(ECR_UPSTREAM_REPOSITORY_PREFIX); + + // Test actual CI/CD environment (based on pipeline env vars) + process.env.DIST_OUTPUT_BUCKET = 'bucket'; + process.env.PUBLIC_ECR_REGISTRY = 'registry'; + process.env.PUBLIC_ECR_TAG = 'tag'; + // PUBLIC_ECR_REPOSITORY_PREFIX is not set in CI/CD pipeline + + expect(resolveUpstreamRegistryUrl()).toBe('registry'); + expect(resolveUpstreamRepositoryPrefix()).toBe(ECR_UPSTREAM_REPOSITORY_PREFIX); // Uses default + }); + + it('should create consistent cache rules regardless of environment', () => { + // Test that the construct creates valid resources in both environments + const testEnvironments = [ + { + name: 'local', + env: { + DIST_OUTPUT_BUCKET: undefined, + PUBLIC_ECR_REGISTRY: undefined, + PUBLIC_ECR_REPOSITORY_PREFIX: undefined, + PUBLIC_ECR_TAG: undefined + }, + expectedRegistry: ECR_UPSTREAM_REGISTRY_URL, + expectedPrefix: ECR_UPSTREAM_REPOSITORY_PREFIX + }, + { + name: 'ci-cd', + env: { + DIST_OUTPUT_BUCKET: 'bucket', // Actual CI/CD value + PUBLIC_ECR_REGISTRY: 'registry', // Actual CI/CD value + PUBLIC_ECR_TAG: 'tag', // Actual CI/CD value + PUBLIC_ECR_REPOSITORY_PREFIX: undefined // Not set in CI/CD pipeline + }, + expectedRegistry: 'registry', + expectedPrefix: ECR_UPSTREAM_REPOSITORY_PREFIX // Uses default + } + ]; + + testEnvironments.forEach((testEnv, index) => { + // Set up environment + Object.keys(testEnv.env).forEach((key) => { + if (testEnv.env[key as keyof typeof testEnv.env] !== undefined) { + process.env[key] = testEnv.env[key as keyof typeof testEnv.env]; + } else { + delete process.env[key]; + } + }); + + // Create a new app and stack for each test to avoid synthesis conflicts + const testApp = new cdk.App(); + const testStack = new cdk.Stack(testApp, `TestStack${testEnv.name}${index}`); + const testLambda = new lambda.Function(testStack, 'TestLambda', { + runtime: lambda.Runtime.PYTHON_3_13, + handler: 'index.handler', + code: lambda.Code.fromInline('def handler(event, context): pass') + }); + + const ecrCache = new ECRPullThroughCache(testStack, 'TestECRCache', { + gaabVersion: '4.0.0', + customResourceLambda: testLambda, + useCaseShortId: `test${index}` + }); + + const template = Template.fromStack(testStack); + + // Validate the cache rule properties match the environment + template.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + EcrRepositoryPrefix: { + 'Fn::GetAtt': [Match.stringLikeRegexp('.*EcrRepoPrefixGenerator.*'), 'EcrRepoPrefix'] + }, + UpstreamRegistry: ECR_UPSTREAM_REGISTRY, + UpstreamRegistryUrl: testEnv.expectedRegistry, + UpstreamRepositoryPrefix: testEnv.expectedPrefix + }); + + expect(ecrCache).toBeDefined(); + expect(ecrCache.getCachedImageUri()).toBeDefined(); + expect(ecrCache.getCachedWorkflowImageUri()).toBeDefined(); + expect(ecrCache.getRepositoryPrefix()).toBeDefined(); + }); + }); + + describe('CI/CD pipeline compatibility', () => { + it('should work with exact CI/CD pipeline environment variables', () => { + // Set up exact environment variables from CI/CD pipeline + process.env.DIST_ACCOUNT_ID = '111111111111'; + process.env.DIST_OUTPUT_BUCKET = 'bucket'; + process.env.PUBLIC_ECR_REGISTRY = 'registry'; + process.env.VERSION = 'v4.0.0'; + process.env.SOLUTION_ID = 'SO0276'; + process.env.SOLUTION_NAME = 'generative-ai-application-builder-on-aws'; + const ecrCache = new ECRPullThroughCache(stack, 'TestECRPipeline', { + gaabVersion: '4.0.0', + customResourceLambda: mockLambda, + useCaseShortId: 'pipeline123' + }); + + const template = Template.fromStack(stack); + + // Should create valid cache rule with CI/CD values + template.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + EcrRepositoryPrefix: { + 'Fn::GetAtt': [Match.stringLikeRegexp('.*EcrRepoPrefixGenerator.*'), 'EcrRepoPrefix'] + }, + UpstreamRegistry: ECR_UPSTREAM_REGISTRY, + UpstreamRegistryUrl: 'registry', // From CI/CD env var + UpstreamRepositoryPrefix: 'aws-solutions' + }); + + // Validate all methods work + expect(ecrCache).toBeDefined(); + expect(ecrCache.getCachedImageUri()).toBeDefined(); + expect(ecrCache.getCachedWorkflowImageUri()).toBeDefined(); + expect(ecrCache.getRepositoryPrefix()).toBeDefined(); + + // Clean up CI/CD specific env vars + delete process.env.DIST_ACCOUNT_ID; + delete process.env.SOLUTION_ID; + delete process.env.SOLUTION_NAME; + }); + }); + }); +}); diff --git a/source/infrastructure/test/use-case-stacks/agent-core/utils/image-uri-resolver.test.ts b/source/infrastructure/test/use-case-stacks/agent-core/utils/image-uri-resolver.test.ts new file mode 100644 index 00000000..142466c8 --- /dev/null +++ b/source/infrastructure/test/use-case-stacks/agent-core/utils/image-uri-resolver.test.ts @@ -0,0 +1,433 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import { Template } from 'aws-cdk-lib/assertions'; +import { + ECRImageError, + sanitizeVersionTag, + resolveImageTag, + resolveUpstreamRepositoryPrefix, + resolveUpstreamRegistryUrl, + constructLocalEcrImageUri, + constructPullThroughCacheImageUri, + resolveImageUri, + determineDeploymentMode, + resolveSolutionVersion, + resolveImageUriWithConditions, + resolveWorkflowImageUri, + resolveAgentImageUriWithConditions, + ImageResolutionContext +} from '../../../../lib/use-case-stacks/agent-core/utils/image-uri-resolver'; +import { + GAAB_STRANDS_AGENT_IMAGE_NAME, + GAAB_STRANDS_WORKFLOW_IMAGE_NAME, + StackDeploymentSource +} from '../../../../lib/utils/constants'; + +describe('ImageUriResolver', () => { + let app: cdk.App; + let stack: cdk.Stack; + + beforeEach(() => { + app = new cdk.App({ + context: { + solution_version: 'v4.0.0' + } + }); + stack = new cdk.Stack(app, 'TestStack'); + }); + + afterEach(() => { + // Clean up environment variables + delete process.env.VERSION; + delete process.env.PUBLIC_ECR_TAG; + delete process.env.PUBLIC_ECR_REGISTRY; + delete process.env.PUBLIC_ECR_REPOSITORY_PREFIX; + delete process.env.DIST_OUTPUT_BUCKET; + }); + + describe('sanitizeVersionTag', () => { + it('should remove double v prefix for local deployment', () => { + // vv4.0.0 -> remove all v -> 4.0.0 -> add local -> 4.0.0-local -> add v -> v4.0.0-local + expect(sanitizeVersionTag('vv4.0.0', 'local')).toBe('v4.0.0-local'); + }); + + it('should handle single v prefix for local deployment', () => { + // v4.0.0 -> remove v -> 4.0.0 -> add local -> 4.0.0-local -> add v -> v4.0.0-local + expect(sanitizeVersionTag('v4.0.0', 'local')).toBe('v4.0.0-local'); + }); + + it('should add v prefix and local suffix for version without v prefix', () => { + // 4.0.0 -> no v to remove -> 4.0.0 -> add local -> 4.0.0-local -> add v -> v4.0.0-local + expect(sanitizeVersionTag('4.0.0', 'local')).toBe('v4.0.0-local'); + }); + + it('should handle pipeline deployment without local suffix', () => { + // v4.0.0 -> remove v -> 4.0.0 -> no local (pipeline) -> 4.0.0 -> add v -> v4.0.0 + expect(sanitizeVersionTag('v4.0.0', 'pipeline')).toBe('v4.0.0'); + }); + + it('should remove double v for pipeline deployment', () => { + // vv4.0.0 -> remove all v -> 4.0.0 -> no local (pipeline) -> 4.0.0 -> add v -> v4.0.0 + expect(sanitizeVersionTag('vv4.0.0', 'pipeline')).toBe('v4.0.0'); + }); + + it('should handle triple v prefix', () => { + // vvv4.0.0 -> remove all v -> 4.0.0 -> no local (pipeline) -> 4.0.0 -> add v -> v4.0.0 + expect(sanitizeVersionTag('vvv4.0.0', 'pipeline')).toBe('v4.0.0'); + }); + + it('should return latest for invalid input', () => { + expect(sanitizeVersionTag('')).toBe('latest'); + expect(sanitizeVersionTag(null as any)).toBe('latest'); + }); + + it('should not add duplicate local suffix', () => { + // 4.0.0-local -> no v to remove -> 4.0.0-local -> already has local -> 4.0.0-local -> add v -> v4.0.0-local + expect(sanitizeVersionTag('4.0.0-local', 'local')).toBe('v4.0.0-local'); + }); + }); + + describe('resolveImageTag', () => { + it('should use environment variable when available', () => { + process.env.PUBLIC_ECR_TAG = 'v5.0.0'; + expect(resolveImageTag('v4.0.0')).toBe('v5.0.0'); // pipeline mode: v5.0.0 -> 5.0.0 -> v5.0.0 + }); + + it('should use GAAB version when no environment variable', () => { + expect(resolveImageTag('4.0.0')).toBe('4.0.0'); + }); + + it('should handle version with existing v prefix', () => { + expect(resolveImageTag('v4.0.0')).toBe('v4.0.0'); + }); + + it('should sanitize double v prefix from environment', () => { + process.env.PUBLIC_ECR_TAG = 'vv5.0.0'; + expect(resolveImageTag('v4.0.0')).toBe('v5.0.0'); // pipeline mode: vv5.0.0 -> remove all v -> 5.0.0 -> add v -> v5.0.0 + }); + + it('should sanitize triple v prefix from environment', () => { + process.env.PUBLIC_ECR_TAG = 'vvv5.0.0'; + expect(resolveImageTag('v4.0.0')).toBe('v5.0.0'); // pipeline mode: vvv5.0.0 -> remove all v -> 5.0.0 -> add v -> v5.0.0 + }); + }); + + describe('resolveUpstreamRepositoryPrefix', () => { + it('should return default prefix when PUBLIC_ECR_REGISTRY is not set', () => { + delete process.env.PUBLIC_ECR_REGISTRY; + expect(resolveUpstreamRepositoryPrefix()).toBe('aws-solutions'); + }); + + it('should extract prefix from PUBLIC_ECR_REGISTRY with prefix', () => { + process.env.PUBLIC_ECR_REGISTRY = 'public.ecr.aws/custom-namespace'; + expect(resolveUpstreamRepositoryPrefix()).toBe('custom-namespace'); + }); + + it('should return default when PUBLIC_ECR_REGISTRY has no prefix', () => { + process.env.PUBLIC_ECR_REGISTRY = 'public.ecr.aws'; + expect(resolveUpstreamRepositoryPrefix()).toBe('aws-solutions'); + }); + + it('should handle PUBLIC_ECR_REGISTRY with trailing slash', () => { + process.env.PUBLIC_ECR_REGISTRY = 'public.ecr.aws/my-prefix/'; + expect(resolveUpstreamRepositoryPrefix()).toBe('my-prefix'); + }); + + it('should return default for invalid prefix characters', () => { + process.env.PUBLIC_ECR_REGISTRY = 'public.ecr.aws/invalid@prefix!'; + expect(resolveUpstreamRepositoryPrefix()).toBe('aws-solutions'); + }); + + it('should handle prefix with hyphens and underscores', () => { + process.env.PUBLIC_ECR_REGISTRY = 'public.ecr.aws/my-custom_prefix'; + expect(resolveUpstreamRepositoryPrefix()).toBe('my-custom_prefix'); + }); + + it('should handle prefix with forward slashes', () => { + process.env.PUBLIC_ECR_REGISTRY = 'public.ecr.aws/namespace/subnamespace'; + expect(resolveUpstreamRepositoryPrefix()).toBe('namespace'); + }); + }); + + describe('resolveUpstreamRegistryUrl', () => { + it('should return default registry URL when PUBLIC_ECR_REGISTRY is not set', () => { + delete process.env.PUBLIC_ECR_REGISTRY; + expect(resolveUpstreamRegistryUrl()).toBe('public.ecr.aws'); + }); + + it('should extract registry domain from PUBLIC_ECR_REGISTRY with prefix', () => { + process.env.PUBLIC_ECR_REGISTRY = 'public.ecr.aws/custom-namespace'; + expect(resolveUpstreamRegistryUrl()).toBe('public.ecr.aws'); + }); + + it('should handle PUBLIC_ECR_REGISTRY without prefix', () => { + process.env.PUBLIC_ECR_REGISTRY = 'public.ecr.aws'; + expect(resolveUpstreamRegistryUrl()).toBe('public.ecr.aws'); + }); + + it('should extract registry domain from PUBLIC_ECR_REGISTRY with multiple path segments', () => { + process.env.PUBLIC_ECR_REGISTRY = 'public.ecr.aws/namespace/subnamespace'; + expect(resolveUpstreamRegistryUrl()).toBe('public.ecr.aws'); + }); + + it('should handle PUBLIC_ECR_REGISTRY with trailing slash', () => { + process.env.PUBLIC_ECR_REGISTRY = 'public.ecr.aws/my-prefix/'; + expect(resolveUpstreamRegistryUrl()).toBe('public.ecr.aws'); + }); + + it('should handle custom registry domains', () => { + process.env.PUBLIC_ECR_REGISTRY = 'custom.registry.example.com/namespace'; + expect(resolveUpstreamRegistryUrl()).toBe('custom.registry.example.com'); + }); + + it('should fallback to default on empty string', () => { + process.env.PUBLIC_ECR_REGISTRY = ''; + expect(resolveUpstreamRegistryUrl()).toBe('public.ecr.aws'); + }); + + it('should fallback to default when env var is deleted', () => { + // Delete the env var to trigger fallback + delete process.env.PUBLIC_ECR_REGISTRY; + expect(resolveUpstreamRegistryUrl()).toBe('public.ecr.aws'); + }); + }); + + describe('constructLocalEcrImageUri', () => { + it('should construct valid local ECR URI', () => { + const uri = constructLocalEcrImageUri('my-image', 'v1.0.0'); + expect(uri).toContain('.dkr.ecr.'); + expect(uri).toContain('.amazonaws.com/my-image:v1.0.0'); + }); + + it('should use resolved image tag', () => { + const uri = constructLocalEcrImageUri('my-image', '1.0.0'); + expect(uri).toContain(':1.0.0'); + }); + + it('should throw error for missing parameters', () => { + expect(() => constructLocalEcrImageUri('', 'v1.0.0')).toThrow(ECRImageError); + expect(() => constructLocalEcrImageUri('my-image', '')).toThrow(ECRImageError); + }); + }); + + describe('constructPullThroughCacheImageUri', () => { + it('should construct valid pull-through cache URI', () => { + const uri = constructPullThroughCacheImageUri('my-prefix', 'my-image', 'v1.0.0'); + // This returns a CDK token, so we can't easily test the exact value + expect(typeof uri).toBe('string'); + }); + + it('should throw error for missing parameters', () => { + expect(() => constructPullThroughCacheImageUri('', 'my-image', 'v1.0.0')).toThrow(ECRImageError); + expect(() => constructPullThroughCacheImageUri('my-prefix', '', 'v1.0.0')).toThrow(ECRImageError); + expect(() => constructPullThroughCacheImageUri('my-prefix', 'my-image', '')).toThrow(ECRImageError); + }); + }); + + describe('determineDeploymentMode', () => { + it('should return local when DIST_OUTPUT_BUCKET is not set', () => { + expect(determineDeploymentMode()).toBe('local'); + }); + + it('should return pipeline when DIST_OUTPUT_BUCKET is set', () => { + process.env.DIST_OUTPUT_BUCKET = 'my-bucket'; + expect(determineDeploymentMode()).toBe('pipeline'); + }); + }); + + describe('resolveSolutionVersion', () => { + it('should use VERSION environment variable when available', () => { + process.env.VERSION = 'v5.1.0'; + const version = resolveSolutionVersion(stack); + expect(version).toBe('v5.1.0'); + }); + + it('should use context version when VERSION env var not available', () => { + delete process.env.VERSION; + const version = resolveSolutionVersion(stack); + expect(version).toBe('v4.0.0'); // From app context + }); + + it('should throw error when no version available in context', () => { + delete process.env.VERSION; + // Create app with empty context - this overrides cdk.json context + const emptyApp = new cdk.App({ context: {} }); + const emptyStack = new cdk.Stack(emptyApp, 'EmptyStack'); + expect(() => resolveSolutionVersion(emptyStack)).toThrow(ECRImageError); + }); + }); + + describe('resolveImageUriWithConditions', () => { + let customImageUriParam: cdk.CfnParameter; + let sharedEcrCachePrefixParam: cdk.CfnParameter; + let stackDeploymentSourceParam: cdk.CfnParameter; + const pullThroughCacheUri = 'standalone-cache-uri'; + + beforeEach(() => { + customImageUriParam = new cdk.CfnParameter(stack, 'CustomImageUri', { + type: 'String', + default: '' + }); + sharedEcrCachePrefixParam = new cdk.CfnParameter(stack, 'SharedEcrCachePrefix', { + type: 'String', + default: 'shared-cache' + }); + stackDeploymentSourceParam = new cdk.CfnParameter(stack, 'StackDeploymentSource', { + type: 'String', + default: StackDeploymentSource.STANDALONE_USE_CASE + }); + }); + + it('should return local ECR URI for local deployment', () => { + const context: ImageResolutionContext = { + deploymentMode: 'local', + gaabVersion: 'v4.0.0' + }; + + const result = resolveImageUriWithConditions( + stack, + GAAB_STRANDS_AGENT_IMAGE_NAME, + context, + customImageUriParam, + sharedEcrCachePrefixParam, + stackDeploymentSourceParam.valueAsString, + pullThroughCacheUri + ); + + // Result is a CDK token, so we can't test exact string content + expect(typeof result).toBe('string'); + expect(result).toBeTruthy(); + }); + + it('should create CloudFormation conditions for pipeline deployment', () => { + const context: ImageResolutionContext = { + deploymentMode: 'pipeline', + gaabVersion: 'v4.0.0' + }; + + const result = resolveImageUriWithConditions( + stack, + GAAB_STRANDS_AGENT_IMAGE_NAME, + context, + customImageUriParam, + sharedEcrCachePrefixParam, + stackDeploymentSourceParam.valueAsString, + pullThroughCacheUri + ); + + // Should return a CloudFormation function (Fn::If) + expect(typeof result).toBe('string'); + + // Verify conditions were created + const template = Template.fromStack(stack); + template.hasCondition('IsStandaloneDeploymentConditionForImageUri', {}); + template.hasCondition('HasCustomAgentImageCondition', {}); + }); + + it('should throw error for missing image name', () => { + const context: ImageResolutionContext = { + deploymentMode: 'pipeline', + gaabVersion: 'v4.0.0' + }; + + // The function doesn't validate image name, it just uses it in the template + const result = resolveImageUriWithConditions( + stack, + '', + context, + customImageUriParam, + sharedEcrCachePrefixParam, + stackDeploymentSourceParam.valueAsString, + pullThroughCacheUri + ); + + expect(typeof result).toBe('string'); + }); + }); + + describe('resolveImageUri', () => { + const baseContext: ImageResolutionContext = { + deploymentMode: 'local', + gaabVersion: 'v4.0.0' + }; + + it('should use local ECR for local deployment mode', () => { + const result = resolveImageUri(stack, GAAB_STRANDS_AGENT_IMAGE_NAME, baseContext); + expect(result.resolutionStrategy).toBe('local-ecr'); + expect(result.imageUri).toContain('.dkr.ecr.'); + expect(result.imageUri).toContain(GAAB_STRANDS_AGENT_IMAGE_NAME); + expect(result.metadata.version).toBe('v4.0.0'); + expect(result.metadata.repository).toBe(GAAB_STRANDS_AGENT_IMAGE_NAME); + }); + + it('should throw error for pipeline deployment mode', () => { + const context: ImageResolutionContext = { + ...baseContext, + deploymentMode: 'pipeline', + sharedEcrCachePrefix: 'my-cache-prefix' + }; + + expect(() => resolveImageUri(stack, GAAB_STRANDS_AGENT_IMAGE_NAME, context)).toThrow(ECRImageError); + expect(() => resolveImageUri(stack, GAAB_STRANDS_AGENT_IMAGE_NAME, context)).toThrow( + /This simplified resolver only supports local deployments/ + ); + }); + + it('should throw error for missing image name', () => { + expect(() => resolveImageUri(stack, '', baseContext)).toThrow(ECRImageError); + }); + + it('should resolve solution version from construct context', () => { + const result = resolveImageUri(stack, GAAB_STRANDS_AGENT_IMAGE_NAME, baseContext); + expect(result.metadata.version).toBe('v4.0.0'); + }); + }); + + describe('convenience functions', () => { + it('should resolve workflow image URI', () => { + const context: ImageResolutionContext = { + deploymentMode: 'local', + gaabVersion: 'v4.0.0' + }; + + const result = resolveWorkflowImageUri(stack, context); + expect(result.resolutionStrategy).toBe('local-ecr'); + expect(result.imageUri).toContain(GAAB_STRANDS_WORKFLOW_IMAGE_NAME); + }); + + it('should resolve agent image URI with conditions', () => { + const customImageUriParam = new cdk.CfnParameter(stack, 'TestCustomImageUri', { + type: 'String', + default: '' + }); + const sharedEcrCachePrefixParam = new cdk.CfnParameter(stack, 'TestSharedEcrCachePrefix', { + type: 'String', + default: 'shared-cache' + }); + const stackDeploymentSourceParam = new cdk.CfnParameter(stack, 'TestStackDeploymentSource', { + type: 'String', + default: StackDeploymentSource.STANDALONE_USE_CASE + }); + + const context: ImageResolutionContext = { + deploymentMode: 'local', + gaabVersion: 'v4.0.0' + }; + + const result = resolveAgentImageUriWithConditions( + stack, + context, + customImageUriParam, + sharedEcrCachePrefixParam, + stackDeploymentSourceParam.valueAsString, + 'pull-through-cache-uri' + ); + + // Result is a CDK token, so we can't test exact string content + expect(typeof result).toBe('string'); + expect(result).toBeTruthy(); + }); + }); +}); diff --git a/source/infrastructure/test/use-case-stacks/agent-core/workflow-stack.test.ts b/source/infrastructure/test/use-case-stacks/agent-core/workflow-stack.test.ts new file mode 100644 index 00000000..66243833 --- /dev/null +++ b/source/infrastructure/test/use-case-stacks/agent-core/workflow-stack.test.ts @@ -0,0 +1,630 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from 'aws-cdk-lib'; +import * as rawCdkJson from '../../../cdk.json'; +import { Match, Template } from 'aws-cdk-lib/assertions'; +import { WorkflowStack } from '../../../lib/use-case-stacks/agent-core/workflow-stack'; +import { + CHAT_PROVIDERS, + GAAB_STRANDS_WORKFLOW_IMAGE_NAME, + LANGCHAIN_LAMBDA_PYTHON_RUNTIME +} from '../../../lib/utils/constants'; + +let globalTemplate: Template; +let globalStack: WorkflowStack; + +const originalConsoleLog = console.log; +const originalConsoleWarn = console.warn; + +// Save original environment variable value at module level +const originalDistOutputBucket = process.env.DIST_OUTPUT_BUCKET; + +beforeAll(() => { + // Ensure test runs in local deployment mode + delete process.env.DIST_OUTPUT_BUCKET; + + console.log = jest.fn(); + console.warn = jest.fn(); + + [globalTemplate, , globalStack] = buildStack(); + + console.log = originalConsoleLog; + console.warn = originalConsoleWarn; +}); + +afterAll(() => { + // Restore original environment variable value + if (originalDistOutputBucket !== undefined) { + process.env.DIST_OUTPUT_BUCKET = originalDistOutputBucket; + } else { + delete process.env.DIST_OUTPUT_BUCKET; + } +}); + +describe('WorkflowStack', () => { + let stack: WorkflowStack; + let template: Template; + + beforeAll(() => { + template = globalTemplate; + stack = globalStack; + }); + + describe('stack initialization', () => { + it('should create stack with correct properties', () => { + expect(stack).toBeInstanceOf(WorkflowStack); + expect(stack.stackName).toBe('TestWorkflowStack'); + }); + + it('should have correct LLM provider name', () => { + expect(stack.getLlmProviderName()).toBe(`${CHAT_PROVIDERS.AGENT_CORE}Workflow`); + }); + }); + + describe('abstract method implementations', () => { + it('should return correct image name', () => { + expect(stack.getImageName()).toBe(GAAB_STRANDS_WORKFLOW_IMAGE_NAME); + }); + + it('should return correct use case type', () => { + expect(stack.getUseCaseType()).toBe('Workflow'); + }); + + it('should return correct WebSocket route name', () => { + expect(stack.getWebSocketRouteName()).toBe('invokeWorkflow'); + }); + + it('should return correct agent runtime name pattern', () => { + const runtimeName = stack.getAgentRuntimeName(); + expect(runtimeName).toMatch(/^gaab_workflow_/); + }); + + it('should include inference profile support', () => { + expect(stack.shouldIncludeInferenceProfileSupport()).toBe(true); + }); + }); + + describe('CloudFormation parameters', () => { + it('should create AgentCore base parameters', () => { + template.hasParameter('EnableLongTermMemory', { + Type: 'String', + AllowedValues: ['Yes', 'No'], + Default: 'Yes', + Description: 'Enable long-term memory for the agent' + }); + }); + + it('should create shared cache parameter', () => { + template.hasParameter('SharedEcrCachePrefix', { + Type: 'String', + Description: + 'Internal parameter - Shared ECR cache prefix automatically provided by deployment platform', + Default: '' + }); + }); + + it('should create authentication parameters', () => { + template.hasParameter('ComponentCognitoUserPoolId', { + Type: 'String', + Description: + 'Cognito User Pool ID for creating component App Client - automatically provided by deployment platform', + Default: '' + }); + }); + + it('should create workflow-specific parameters', () => { + template.hasParameter('UseCasesTableName', { + Type: 'String', + Description: + 'Internal parameter - Use cases table name for workflow agent discovery, automatically provided by deployment platform', + Default: '' + }); + + template.hasParameter('CustomWorkflowImageUri', { + Type: 'String', + Description: + 'Optional custom ECR image URI for workflows. If provided, overrides default image resolution.', + Default: '' + }); + }); + + it('should create inference profile parameter', () => { + template.hasParameter('UseInferenceProfile', { + Type: 'String', + AllowedValues: ['Yes', 'No'], + Default: 'No' + }); + }); + + it('should create multimodal parameters inherited from UseCaseStack', () => { + template.hasParameter('MultimodalEnabled', { + Type: 'String', + Description: + 'If set to Yes, the deployed use case stack will have access to multimodal functionality. This functionality is only enabled for Agentcore-based AgentBuilder and Workflow usecases.', + AllowedValues: ['Yes', 'No'], + AllowedPattern: '^Yes|No$', + Default: 'No' + }); + + template.hasParameter('ExistingMultimodalDataMetadataTable', { + Type: 'String', + Description: + 'Existing multimodal data metadata table name which contains references of the files in S3', + Default: '', + ConstraintDescription: 'Must be a valid DynamoDB table name or empty string' + }); + + template.hasParameter('ExistingMultimodalDataBucket', { + Type: 'String', + Description: 'Existing multimodal data bucket name which stores the multimodal data files', + Default: '', + ConstraintDescription: 'Must be a valid S3 bucket name or empty string' + }); + }); + + it('should have infrastructure ready for automatic multimodal permissions when multimodal is enabled', () => { + // This test verifies that the stack has the necessary infrastructure for multimodal permissions + // The actual automatic addition happens at runtime when multimodal is enabled via CloudFormation parameters + + // Verify that the AgentExecutionRole exists with the correct properties + template.hasResourceProperties('AWS::IAM::Role', { + Description: 'Execution role for AgentCore Runtime', + AssumeRolePolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Principal: { + Service: 'bedrock-agentcore.amazonaws.com' + }, + Action: 'sts:AssumeRole' + } + ]) + } + }); + + // Verify that multimodal parameters exist for conditional behavior + template.hasParameter('MultimodalEnabled', { + Type: 'String', + AllowedValues: ['Yes', 'No'], + Default: 'No' + }); + + template.hasParameter('ExistingMultimodalDataMetadataTable', { + Type: 'String', + Default: '' + }); + + template.hasParameter('ExistingMultimodalDataBucket', { + Type: 'String', + Default: '' + }); + + // Verify that multimodal conditions exist for conditional resource creation + const templateJson = template.toJSON(); + expect(templateJson.Conditions).toBeDefined(); + expect(templateJson.Conditions.MultimodalEnabledCondition).toBeDefined(); + }); + + it('should create multimodal permissions policy when multimodal is enabled', () => { + // Create a new stack with multimodal enabled to test the conditional policy creation + const app = new cdk.App({ + context: { + ...rawCdkJson.context, + '@aws-cdk/aws-lambda:recognizeLayerVersion': true, + '@aws-cdk/aws-lambda:recognizeVersionProps': true + } + }); + + // Set multimodal parameters to enabled values on the app before creating the stack + app.node.setContext('multimodalEnabled', 'Yes'); + app.node.setContext('existingMultimodalDataMetadataTable', 'test-metadata-table'); + app.node.setContext('existingMultimodalDataBucket', 'test-multimodal-bucket'); + + const multimodalStack = new WorkflowStack(app, 'TestMultimodalWorkflowStack', { + solutionID: 'SO0276', + solutionVersion: 'v2.0.0', + solutionName: 'generative-ai-application-builder-on-aws', + applicationTrademarkName: 'Generative AI Application Builder on AWS' + }); + + const multimodalTemplate = Template.fromStack(multimodalStack); + + // Verify that the conditional multimodal permissions policy exists with the correct condition + multimodalTemplate.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'multimodalMetadataAccess', + Effect: 'Allow', + Action: 'dynamodb:GetItem', + Resource: Match.anyValue() + }, + { + Sid: 'MultimodalDataBucketAccess', + Effect: 'Allow', + Action: 's3:GetObject', + Resource: Match.anyValue() + } + ]) + } + }); + }); + }); + + describe('parameter organization and grouping', () => { + it('should have parameter groups with proper structure', () => { + const templateJson = template.toJSON(); + const metadata = templateJson.Metadata; + + expect(metadata).toBeDefined(); + expect(metadata['AWS::CloudFormation::Interface']).toBeDefined(); + expect(metadata['AWS::CloudFormation::Interface'].ParameterGroups).toBeDefined(); + + const parameterGroups = metadata['AWS::CloudFormation::Interface'].ParameterGroups; + expect(parameterGroups.length).toBeGreaterThanOrEqual(1); + }); + + it('should maintain backward compatibility for existing parameters', () => { + // Verify all existing parameter names are preserved + const expectedParameters = [ + 'EnableLongTermMemory', + 'SharedEcrCachePrefix', + 'ComponentCognitoUserPoolId', + 'UseInferenceProfile', + 'UseCasesTableName', + 'CustomWorkflowImageUri' + ]; + + expectedParameters.forEach((paramName) => { + expect(() => template.hasParameter(paramName, Match.anyValue())).not.toThrow(); + }); + }); + + it('should have proper parameter validation constraints', () => { + // Test memory parameter validation + template.hasParameter('EnableLongTermMemory', { + AllowedValues: ['Yes', 'No'], + AllowedPattern: '^Yes|No$' + }); + + // Test custom workflow image URI validation with enhanced constraint description + template.hasParameter('CustomWorkflowImageUri', { + AllowedPattern: Match.stringLikeRegexp('.*\\|\\^\\$$'), // ECR_URI_PATTERN + '|^$' + ConstraintDescription: Match.stringLikeRegexp( + 'Must be a valid ECR image URI.*default Workflow image resolution' + ) + }); + + // Test shared cache parameter validation + template.hasParameter('SharedEcrCachePrefix', { + AllowedPattern: '^.*[^/]$|^$' + }); + + // Test use cases table name parameter (workflow-specific) + template.hasParameter('UseCasesTableName', { + Type: 'String', + Default: '' + }); + }); + }); + + describe('AgentCore components setup', () => { + it('should create agent execution role', () => { + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Principal: { + Service: 'bedrock-agentcore.amazonaws.com' + }, + Action: 'sts:AssumeRole' + } + ]) + } + }); + }); + + it('should create agent invocation lambda', () => { + template.hasResourceProperties('AWS::Lambda::Function', { + Handler: 'handler.lambda_handler', + Runtime: LANGCHAIN_LAMBDA_PYTHON_RUNTIME.name, + MemorySize: 1024, + Timeout: 900, + Environment: { + Variables: { + POWERTOOLS_SERVICE_NAME: 'AGENT_CORE_INVOCATION', + AGENT_RUNTIME_ARN: Match.anyValue(), + USE_CASE_UUID: { + Ref: 'UseCaseUUID' + } + } + } + }); + }); + + it('should create workflow runtime deployment custom resource', () => { + template.hasResourceProperties('Custom::AgentCoreRuntime', { + Resource: 'DEPLOY_AGENT_CORE', + AgentRuntimeName: Match.anyValue(), + ExecutionRoleArn: Match.anyValue(), + UseCaseUUID: { + Ref: 'UseCaseUUID' + }, + UseCaseConfigTableName: { + Ref: 'UseCaseConfigTableName' + }, + UseCaseConfigRecordKey: { + Ref: 'UseCaseConfigRecordKey' + }, + CognitoUserPoolId: { + Ref: 'ComponentCognitoUserPoolId' + }, + MemoryId: { 'Fn::GetAtt': ['AgentMemoryDeploymentAgentCoreMemory9759028C', 'MemoryId'] }, + UseCaseType: 'Workflow' + }); + }); + + it('should create ECR Pull-Through Cache rule for workflows', () => { + // Import the resolver to get environment-aware values + const { + resolveUpstreamRegistryUrl, + resolveUpstreamRepositoryPrefix + } = require('../../../lib/use-case-stacks/agent-core/utils/image-uri-resolver'); + + template.hasResourceProperties('AWS::ECR::PullThroughCacheRule', { + EcrRepositoryPrefix: { + 'Fn::GetAtt': [Match.stringLikeRegexp('.*EcrRepoPrefixGenerator.*'), 'EcrRepoPrefix'] + }, + UpstreamRegistry: 'ecr-public', + UpstreamRegistryUrl: resolveUpstreamRegistryUrl(), + UpstreamRepositoryPrefix: resolveUpstreamRepositoryPrefix() + }); + }); + }); + + describe('Authentication components', () => { + it('should create component Cognito app client conditionally', () => { + template.hasResourceProperties('AWS::Cognito::UserPoolClient', { + UserPoolId: { + Ref: 'ComponentCognitoUserPoolId' + }, + GenerateSecret: true, + ExplicitAuthFlows: ['ALLOW_REFRESH_TOKEN_AUTH'] + }); + }); + + it('should create AgentCore outbound permissions custom resource', () => { + template.hasResourceProperties('Custom::AgentCoreOutboundPermissions', { + Resource: 'AGENTCORE_OUTBOUND_PERMISSIONS', + USE_CASE_ID: Match.anyValue(), + USE_CASE_CLIENT_ID: Match.anyValue(), + USE_CASE_CONFIG_TABLE_NAME: { + Ref: 'UseCaseConfigTableName' + }, + USE_CASE_CONFIG_RECORD_KEY: { + Ref: 'UseCaseConfigRecordKey' + } + }); + }); + + it('should create OAuth client custom resource conditionally', () => { + // OAuth client is created conditionally based on CreateAppClientCondition + const oauthResources = template.findResources('Custom::AgentCoreOAuthClient'); + if (Object.keys(oauthResources).length > 0) { + template.hasResourceProperties('Custom::AgentCoreOAuthClient', { + Resource: 'AGENTCORE_OAUTH_CLIENT', + CLIENT_ID: Match.anyValue(), + CLIENT_SECRET: Match.anyValue(), + DISCOVERY_URL: Match.anyValue(), + PROVIDER_NAME: Match.anyValue() + }); + } else { + // If no OAuth client resources found, that's expected when condition is false + expect(Object.keys(oauthResources)).toHaveLength(0); + } + }); + }); + + describe('WebSocket routes', () => { + it('should configure WebSocket routes for workflow invocation', () => { + template.hasResourceProperties('AWS::ApiGatewayV2::Route', { + RouteKey: 'invokeWorkflow' + }); + }); + }); + + describe('stack outputs', () => { + it('should create workflow runtime ARN output', () => { + template.hasOutput('WorkflowRuntimeArn', { + Description: 'ARN of the deployed Agentcore Runtime', + Value: Match.anyValue() + }); + }); + + it('should create workflow execution role ARN output', () => { + template.hasOutput('WorkflowExecutionRoleArn', { + Description: 'ARN of the Agentcore execution role', + Value: Match.anyValue() + }); + }); + + it('should create workflow invocation lambda ARN output', () => { + template.hasOutput('WorkflowInvocationLambdaArn', { + Description: 'ARN of the Workflow invocation Lambda function', + Value: Match.anyValue() + }); + }); + + it('should create workflow component app client ID output', () => { + template.hasOutput('WorkflowComponentAppClientId', { + Description: 'Cognito App Client ID for the component authentication', + Value: Match.anyValue() + }); + }); + }); + + describe('IAM permissions', () => { + it('should create workflow execution role with Bedrock permissions', () => { + template.hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Principal: { + Service: 'bedrock-agentcore.amazonaws.com' + }, + Action: 'sts:AssumeRole' + } + ]) + } + }); + }); + + it('should create workflow invocation lambda permissions', () => { + template.hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Sid: 'AgentCoreRuntimeInvocation', + Effect: 'Allow', + Action: [ + 'bedrock-agentcore:InvokeAgentRuntime', + 'bedrock-agentcore:InvokeAgentRuntimeForUser' + ], + Resource: Match.anyValue() + } + ]) + } + }); + }); + + it('should create auth-related IAM policies', () => { + // Check for the CustomResourceAuthPolicy specifically + const authPolicies = template.findResources('AWS::IAM::Policy', { + Properties: { + PolicyDocument: { + Statement: Match.arrayWith([ + { + Effect: 'Allow', + Action: 'ssm:GetParameter', + Resource: Match.anyValue() + } + ]) + } + } + }); + + // Should have at least one auth policy if conditions are met + expect(Object.keys(authPolicies).length).toBeGreaterThanOrEqual(0); + }); + }); + + describe('conditions', () => { + it('should create deployment type conditions', () => { + template.hasCondition('IsStandaloneDeploymentCondition', { + 'Fn::Equals': [{ Ref: 'StackDeploymentSource' }, 'StandaloneUseCase'] + }); + }); + + it('should create app client creation condition', () => { + template.hasCondition('CreateAppClientCondition', { + 'Fn::Not': [ + { + 'Fn::Equals': [{ Ref: 'ComponentCognitoUserPoolId' }, ''] + } + ] + }); + }); + }); + + describe('workflow-specific functionality', () => { + it('should have workflow-specific image URI resolution', () => { + // Test that the stack uses the correct image name for workflows + expect(stack.getImageName()).toBe(GAAB_STRANDS_WORKFLOW_IMAGE_NAME); + }); + + it('should configure workflow-specific runtime name', () => { + const runtimeName = stack.getAgentRuntimeName(); + expect(runtimeName).toContain('gaab_workflow_'); + }); + + it('should have use cases table parameter for agent discovery', () => { + // Test that the UseCasesTableName parameter exists for workflow agent discovery + template.hasParameter('UseCasesTableName', { + Type: 'String', + Description: Match.stringLikeRegexp('.*workflow agent discovery.*'), + Default: '' + }); + }); + }); +}); + +describe('WorkflowStack parameter groups', () => { + let template: Template; + + beforeAll(() => { + template = globalTemplate; + }); + + it('should organize parameters into correct groups', () => { + const templateJson = template.toJSON(); + const metadata = templateJson.Metadata?.['AWS::CloudFormation::Interface']; + + expect(metadata).toBeDefined(); + expect(metadata.ParameterGroups).toBeDefined(); + + // Check for Workflow Configuration group (overridden from base AgentCore Configuration) + const workflowConfigGroup = metadata.ParameterGroups.find( + (group: any) => group.Label.default === 'Workflow Configuration' + ); + expect(workflowConfigGroup).toBeDefined(); + expect(workflowConfigGroup.Parameters).toContain('EnableLongTermMemory'); + + // Check for Authentication Configuration group + const authGroup = metadata.ParameterGroups.find( + (group: any) => group.Label.default === 'Authentication Configuration (Internal)' + ); + expect(authGroup).toBeDefined(); + expect(authGroup.Parameters).toContain('ComponentCognitoUserPoolId'); + + // Check for Workflow Agent Discovery group + const workflowDiscoveryGroup = metadata.ParameterGroups.find( + (group: any) => group.Label.default === 'Workflow Agent Discovery (Advanced)' + ); + expect(workflowDiscoveryGroup).toBeDefined(); + expect(workflowDiscoveryGroup.Parameters).toContain('UseCasesTableName'); + + // Check for Custom Image Configuration group + const imageGroup = metadata.ParameterGroups.find( + (group: any) => group.Label.default === 'Custom Image Configuration (Advanced)' + ); + expect(imageGroup).toBeDefined(); + expect(imageGroup.Parameters).toContain('CustomWorkflowImageUri'); + }); +}); + +function buildStack(): [Template, cdk.App, WorkflowStack] { + const app = new cdk.App({ + context: rawCdkJson.context + }); + + const solutionID = process.env.SOLUTION_ID ?? app.node.tryGetContext('solution_id') ?? 'SO0276'; + const version = process.env.VERSION ?? app.node.tryGetContext('solution_version') ?? 'v4.0.0'; + const solutionName = + process.env.SOLUTION_NAME ?? + app.node.tryGetContext('solution_name') ?? + 'Generative AI Application Builder on AWS'; + + const stack = new WorkflowStack(app, 'TestWorkflowStack', { + solutionID: solutionID, + solutionVersion: version, + solutionName: solutionName, + applicationTrademarkName: + rawCdkJson.context.application_trademark_name ?? 'Generative AI Application Builder on AWS' + }); + + const template = Template.fromStack(stack); + return [template, app, stack]; +} diff --git a/source/infrastructure/test/utils/app-registry.test.ts b/source/infrastructure/test/utils/app-registry.test.ts deleted file mode 100644 index 703873d9..00000000 --- a/source/infrastructure/test/utils/app-registry.test.ts +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -import * as cdk from 'aws-cdk-lib'; -import * as rawCdkJson from '../../cdk.json'; - -import * as crypto from 'crypto'; - -import { Capture, Match, Template } from 'aws-cdk-lib/assertions'; - -import { BedrockChat } from '../../lib/bedrock-chat-stack'; -import { DeploymentPlatformStack } from '../../lib/deployment-platform-stack'; -import { BaseStack } from '../../lib/framework/base-stack'; -import { SageMakerChat } from '../../lib/sagemaker-chat-stack'; -import { AppRegistry } from '../../lib/utils/app-registry-aspects'; - -describe('When Solution Stack with a nested stack is registered with AppRegistry', () => { - let template: Template; - let app: cdk.App; - let stack: DeploymentPlatformStack; - const appRegApplicationCapture = new Capture(); - const expectedTags = { - 'Solutions:ApplicationType': 'AWS-Solutions', - 'Solutions:SolutionID': 'SO0276', - 'Solutions:SolutionName': 'generative-ai-application-builder-on-aws', - 'Solutions:SolutionVersion': rawCdkJson.context.solution_version - }; - - beforeAll(() => { - app = new cdk.App({ - context: rawCdkJson.context - }); - - stack = new DeploymentPlatformStack(app, 'TestStack', { - solutionID: rawCdkJson.context.solution_id, - solutionName: rawCdkJson.context.solution_name, - solutionVersion: rawCdkJson.context.solution_version, - applicationTrademarkName: rawCdkJson.context.application_trademark_name - }); - cdk.Aspects.of(app).add( - new AppRegistry(stack, 'AppRegistryAspect', { - solutionName: rawCdkJson.context.solution_name, - applicationName: rawCdkJson.context.app_registry_name, - solutionID: rawCdkJson.context.solution_id, - solutionVersion: rawCdkJson.context.solution_version, - applicationType: rawCdkJson.context.application_type - }) - ); - template = Template.fromStack(stack); - }); - - it('should create a ServiceCatalogueRegistry Application', () => { - expect(app.node.tryGetContext('app_registry_name')).toStrictEqual('GAAB'); - expect(app.node.tryGetContext('solution_name')).toStrictEqual('generative-ai-application-builder-on-aws'); - template.resourceCountIs('AWS::ServiceCatalogAppRegistry::Application', 1); - template.hasResourceProperties('AWS::ServiceCatalogAppRegistry::Application', { - Name: { - 'Fn::Join': ['', ['App-GAAB-', { Ref: 'AWS::StackName' }]] - }, - Description: `Service Catalog application to track and manage all your resources for the solution ${expectedTags['Solutions:SolutionName']}`, - Tags: expectedTags - }); - }); - - it('should create ResourceAssociation for parent stack', () => { - template.resourceCountIs('AWS::ServiceCatalogAppRegistry::ResourceAssociation', 1); - template.hasResourceProperties('AWS::ServiceCatalogAppRegistry::ResourceAssociation', { - Application: { - 'Fn::GetAtt': [appRegApplicationCapture, 'Id'] - }, - Resource: { - Ref: 'AWS::StackId' - }, - ResourceType: 'CFN_STACK' - }); - }); - - it('should create ResourceAssociation for WebApp Nested Stack', () => { - const webAppStack = stack.uiDistribution; - const nestedTemplate = Template.fromStack(webAppStack); - nestedTemplate.hasResourceProperties('AWS::ServiceCatalogAppRegistry::ResourceAssociation', { - Application: { - Ref: Match.anyValue() - }, - Resource: { - Ref: 'AWS::StackId' - }, - ResourceType: 'CFN_STACK' - }); - - template.hasResource('AWS::CloudFormation::Stack', { - Type: 'AWS::CloudFormation::Stack', - Properties: Match.anyValue(), - DependsOn: [Match.anyValue(), Match.anyValue(), 'WebConfig'], - UpdateReplacePolicy: 'Delete', - DeletionPolicy: 'Delete', - Condition: 'DeployWebAppUIInfrastructureCondition' - }); - }); - - const attGrpCapture = new Capture(); - it('should have AttributeGroupAssociation', () => { - template.resourceCountIs('AWS::ServiceCatalogAppRegistry::AttributeGroupAssociation', 1); - template.hasResourceProperties('AWS::ServiceCatalogAppRegistry::AttributeGroupAssociation', { - Application: { - 'Fn::GetAtt': [Match.stringLikeRegexp('RegistrySetup*'), 'Id'] - }, - AttributeGroup: { - 'Fn::GetAtt': [attGrpCapture, 'Id'] - } - }); - expect(template.toJSON()['Resources'][attGrpCapture.asString()]['Type']).toStrictEqual( - 'AWS::ServiceCatalogAppRegistry::AttributeGroup' - ); - }); - - it('should have AttributeGroup', () => { - template.resourceCountIs('AWS::ServiceCatalogAppRegistry::AttributeGroup', 1); - template.hasResourceProperties('AWS::ServiceCatalogAppRegistry::AttributeGroup', { - Attributes: { - applicationType: 'AWS-Solutions', - solutionID: 'SO0276', - solutionName: expectedTags['Solutions:SolutionName'], - version: expectedTags['Solutions:SolutionVersion'] - }, - Name: { - 'Fn::Join': [ - '', - [ - 'AttrGrp-', - { - Ref: 'AWS::StackName' - } - ] - ] - }, - Description: 'Attributes for Solutions Metadata' - }); - }); -}); - -describe('When injecting AppRegistry aspect', () => { - it('The use case stack should have also have DependsOn with DeleteResourceAssociation', () => { - const stackList: (typeof BaseStack)[] = [BedrockChat, SageMakerChat]; - const solutionID = rawCdkJson.context.solution_id; - const version = rawCdkJson.context.solution_version; - const solutionName = rawCdkJson.context.solution_name; - const applicationType = rawCdkJson.context.application_type; - const applicationName = rawCdkJson.context.app_registry_name; - const applicationTrademarkName = rawCdkJson.context.application_trademark_name; - - for (const stack of stackList) { - const app = new cdk.App(); - const instance = new stack(app, stack.name, { - description: `(${solutionID}-${stack.name}) - ${solutionName} - ${stack.name} - Version ${version}`, - synthesizer: new cdk.DefaultStackSynthesizer({ - generateBootstrapVersionRule: false - }), - solutionID: solutionID, - solutionVersion: version, - solutionName: `${solutionName}`, - applicationTrademarkName: applicationTrademarkName, - stackName: `${stack.name}-${crypto.randomUUID().substring(0, 8)}` - }); - - cdk.Aspects.of(instance).add( - new AppRegistry(instance, 'AppRegistry', { - solutionID: solutionID, - solutionVersion: version, - solutionName: solutionName, - applicationType: applicationType, - applicationName: `${applicationName}-${cdk.Fn.ref('UseCaseUUID')}` - }) - ); - - const template = Template.fromStack(instance); - - if (instance.nested) { - const stackResources = template.findResources('AWS::ServiceCatalogAppRegistry::ResourceAssociation'); - for (const stackResource in stackResources) { - expect( - stackResources[stackResource]['DependsOn'].includes('DeleteResourceAssociation') - ).toBeTruthy(); - } - } - } - }); -}); diff --git a/source/infrastructure/test/utils/common-utils.test.ts b/source/infrastructure/test/utils/common-utils.test.ts index 030fb9d7..473572cb 100644 --- a/source/infrastructure/test/utils/common-utils.test.ts +++ b/source/infrastructure/test/utils/common-utils.test.ts @@ -135,10 +135,7 @@ describe('when calling resource properties in local synth', () => { }); customResource = new CustomInfraSetup(stack, 'Infra', { solutionID: rawCdkJson.context.solution_id, - solutionVersion: rawCdkJson.context.solution_version, - sendAnonymousMetricsCondition: new cdk.CfnCondition(stack, 'TestCondition', { - expression: cdk.Fn.conditionEquals('Yes', 'Yes') - }) + solutionVersion: rawCdkJson.context.solution_version }).customResourceLambda; resourceProperties = util.getResourceProperties(stack, asset, customResource).properties; }); @@ -212,10 +209,7 @@ describe('when calling resource properties in a builder pipeline', () => { }); customResource = new CustomInfraSetup(stack, 'Infra', { solutionID: rawCdkJson.context.solution_id, - solutionVersion: rawCdkJson.context.solution_version, - sendAnonymousMetricsCondition: new cdk.CfnCondition(stack, 'TestCondition', { - expression: cdk.Fn.conditionEquals('Yes', 'Yes') - }) + solutionVersion: rawCdkJson.context.solution_version }).customResourceLambda; new cdk.CustomResource(stack, 'TestResource', { diff --git a/source/infrastructure/test/utils/custom-infra-setup.test.ts b/source/infrastructure/test/utils/custom-infra-setup.test.ts index cab18a72..422e3912 100644 --- a/source/infrastructure/test/utils/custom-infra-setup.test.ts +++ b/source/infrastructure/test/utils/custom-infra-setup.test.ts @@ -16,17 +16,14 @@ describe('When creating the custom resource infrastructure construct', () => { const stack = new cdk.Stack(app, 'TestStack'); new CustomInfraSetup(stack, 'TestInfraSetup', { solutionID: rawCdkJson.context.solution_id, - solutionVersion: rawCdkJson.context.solution_version, - sendAnonymousMetricsCondition: new cdk.CfnCondition(stack, 'TestCondition', { - expression: cdk.Fn.conditionEquals('Yes', 'Yes') - }) + solutionVersion: rawCdkJson.context.solution_version }); template = Template.fromStack(stack); }); const customResourceLambdaRole = new Capture(); - const anonymousMetricsLambda = new Capture(); + const metricsLambda = new Capture(); it('should have a custom resource lambda definition', () => { template.hasResourceProperties('AWS::Lambda::Function', { @@ -232,7 +229,7 @@ describe('When creating the custom resource infrastructure construct', () => { }); }); - it('should have an anonymous metrics lambda definition', () => { + it('should have a metrics lambda definition', () => { template.hasResourceProperties('AWS::Lambda::Function', { Code: Match.anyValue(), Role: { @@ -246,7 +243,7 @@ describe('When creating the custom resource infrastructure construct', () => { }, Environment: { Variables: { - POWERTOOLS_SERVICE_NAME: 'ANONYMOUS-CW-METRICS', + POWERTOOLS_SERVICE_NAME: 'CW-METRICS', SOLUTION_ID: rawCdkJson.context.solution_id, SOLUTION_VERSION: rawCdkJson.context.solution_version, LOG_LEVEL: Match.absent(), @@ -256,14 +253,14 @@ describe('When creating the custom resource infrastructure construct', () => { }); }); - it('Should create and attach the anonymous metrics event rule with scheduled expression', () => { + it('Should create and attach the metrics event rule with scheduled expression', () => { template.hasResourceProperties('AWS::Events::Rule', { ScheduleExpression: 'rate(3 hours)', State: 'ENABLED', Targets: [ { Arn: { - 'Fn::GetAtt': [anonymousMetricsLambda, 'Arn'] + 'Fn::GetAtt': [metricsLambda, 'Arn'] }, Id: 'Target0' } @@ -271,7 +268,7 @@ describe('When creating the custom resource infrastructure construct', () => { }); }); - it('should have an anonymous metrics with roles', () => { + it('should have metrics with roles', () => { template.resourceCountIs('AWS::IAM::Role', 2); template.hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { @@ -372,7 +369,7 @@ describe('When creating the custom resource infrastructure construct', () => { template.hasResourceProperties('AWS::Lambda::Permission', { Action: 'lambda:InvokeFunction', FunctionName: { - 'Fn::GetAtt': [Match.stringLikeRegexp('ScheduledAnonymousMetrics'), 'Arn'] + 'Fn::GetAtt': [Match.stringLikeRegexp('ScheduledMetrics'), 'Arn'] }, Principal: 'events.amazonaws.com', SourceArn: { diff --git a/source/infrastructure/test/utils/solution-helper.test.ts b/source/infrastructure/test/utils/solution-helper.test.ts index d315a398..ee95e16b 100644 --- a/source/infrastructure/test/utils/solution-helper.test.ts +++ b/source/infrastructure/test/utils/solution-helper.test.ts @@ -18,48 +18,29 @@ describe('When solution helper construct is created', () => { }); const customInfra = new CustomInfraSetup(stack, 'TestInfra', { solutionID: rawCdkJson.context.solution_id, - solutionVersion: rawCdkJson.context.solution_version, - sendAnonymousMetricsCondition: condition + solutionVersion: rawCdkJson.context.solution_version }); new SolutionHelper(stack, 'SolutionHelper', { customResource: customInfra.customResourceLambda, solutionID: 'SO0999', - version: 'v9.9.9', - sendAnonymousMetricsCondition: condition + version: 'v9.9.9' }); template = Template.fromStack(stack); jsonTemplate = template.toJSON(); }); - it('should create a custom resource for anonymous data', () => { + it('should create a custom resource for data', () => { const customResourceLambda = new Capture(); - template.resourceCountIs('Custom::AnonymousData', 1); - template.hasResourceProperties('Custom::AnonymousData', { + template.resourceCountIs('Custom::Data', 1); + template.hasResourceProperties('Custom::Data', { ServiceToken: { 'Fn::GetAtt': [customResourceLambda, 'Arn'] }, - Resource: 'ANONYMOUS_METRIC' + Resource: 'METRIC' }); expect(jsonTemplate['Resources'][customResourceLambda.asString()]['Type']).toEqual('AWS::Lambda::Function'); }); - - const conditionLogicalId = new Capture(); - it('should have a custom resource block with a condition', () => { - template.hasResource('Custom::AnonymousData', { - Type: 'Custom::AnonymousData', - Properties: Match.anyValue(), - UpdateReplacePolicy: 'Delete', - DeletionPolicy: 'Delete', - Condition: conditionLogicalId - }); - }); - - it('should have a conditions block in the template', () => { - template.hasCondition(conditionLogicalId.asString(), { - 'Fn::Equals': ['Yes', 'Yes'] - }); - }); }); diff --git a/source/infrastructure/test/vpc/custom-vpc.test.ts b/source/infrastructure/test/vpc/custom-vpc.test.ts index 182ed0fe..d545b94a 100644 --- a/source/infrastructure/test/vpc/custom-vpc.test.ts +++ b/source/infrastructure/test/vpc/custom-vpc.test.ts @@ -588,7 +588,9 @@ describe('When creating a custom VPC', () => { expect(jsonTemplate['Resources'][natGatewayIdCapture.asString()]['Type']).toBe('AWS::EC2::NatGateway'); }); - it('should have additional interface endpoints for CloudFormation and CloudWatch', () => { + it('should have interface endpoints for CloudWatch, CloudWatch Logs, X-Ray and SQS', () => { + // Verify we have the expected number of VPC endpoints: 2 Gateway (S3, DDB) + 4 Interface (CloudWatch, Logs, X-Ray, SQS) + template.resourceCountIs('AWS::EC2::VPCEndpoint', 6); template.hasResourceProperties('AWS::EC2::VPCEndpoint', { PolicyDocument: { Statement: [ diff --git a/source/lambda/agentcore-invocation/handler.py b/source/lambda/agentcore-invocation/handler.py new file mode 100644 index 00000000..ea91f29f --- /dev/null +++ b/source/lambda/agentcore-invocation/handler.py @@ -0,0 +1,523 @@ +#!/usr/bin/env python3 +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json +import os +import time +from typing import Any, Dict, List + +from aws_lambda_powertools import Logger, Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext +from utils import ( + AgentCoreClient, + AgentCoreClientError, + AgentCoreConfigurationError, + AgentCoreInvocationError, + EventProcessor, + WebsocketErrorHandler, + get_keep_alive_manager, + get_metrics_client, + get_service_client, +) +from utils.constants import ( + CONNECTION_ID_KEY, + CONVERSATION_ID_KEY, + END_CONVERSATION_TOKEN, + FILES_KEY, + INPUT_TEXT_KEY, + LAMBDA_REMAINING_TIME_THRESHOLD_MS, + MESSAGE_ID_KEY, + TRACE_ID_ENV_VAR, + USER_ID_KEY, + WEBSOCKET_CALLBACK_URL_ENV_VAR, + CloudWatchNamespaces, +) + +logger = Logger(utc=True) +tracer = Tracer() +metrics = get_metrics_client(CloudWatchNamespaces.COLD_STARTS) + +WEBSOCKET_CALLBACK_URL = os.environ.get(WEBSOCKET_CALLBACK_URL_ENV_VAR) + +# X-Ray trace ID format constants +XRAY_ROOT_PREFIX = "Root=" + +_agentcore_client = None + + +def extract_root_trace_id(trace_id: str) -> str: + """ + Extract the root trace ID from AWS X-Ray trace ID format. + + X-Ray trace IDs come in format: Root=1-xxx-yyy;Parent=zzz;Sampled=0;Lineage=... + This function extracts just the root portion: 1-xxx-yyy + + Args: + trace_id: Full X-Ray trace ID string + + Returns: + Root trace ID (e.g., "1-68f6b98e-7ae43e64d1ed2eb8ad2029c9") + """ + if not trace_id or trace_id == "unknown": + return trace_id + + if trace_id.startswith(XRAY_ROOT_PREFIX): + parts = trace_id.split(";") + for part in parts: + if part.startswith(XRAY_ROOT_PREFIX): + return part.split("=", 1)[1] + + return trace_id + + +def get_agentcore_client() -> AgentCoreClient: + """ + Get or create the AgentCore client instance. + + Returns: + AgentCoreClient: The initialized AgentCore client + + Raises: + AgentCoreConfigurationError: If client initialization fails + """ + global _agentcore_client + + if _agentcore_client is None: + logger.info("Initializing AgentCore client") + _agentcore_client = AgentCoreClient() + logger.info("AgentCore client initialized successfully") + + return _agentcore_client + + +@tracer.capture_lambda_handler +@metrics.log_metrics(capture_cold_start_metric=True) +def lambda_handler(event: Dict[str, Any], context: LambdaContext) -> Dict: + """ + Lambda handler for AgentCore invocation via SQS. + + Processes SQS messages containing WebSocket requests, invokes the AgentCore Runtime + using the bedrock-agentcore SDK, and streams responses back to the Chat UI. + """ + records = event["Records"] + total_records = len(records) + logger.debug(f"Total records received in the event: {total_records}") + + processed_records = 0 + batch_item_failures = set() # Use a set to avoid duplicates + sqs_batch_response = {} + + index = 0 + while index < len(records): + record = records[index] + + if context.get_remaining_time_in_millis() < LAMBDA_REMAINING_TIME_THRESHOLD_MS: + batch_item_failures.update(r["messageId"] for r in records[index:]) + break + + processed_event = EventProcessor(record).process() + connection_id = processed_event[CONNECTION_ID_KEY] + conversation_id = processed_event[CONVERSATION_ID_KEY] + input_text = processed_event[INPUT_TEXT_KEY] + files = processed_event.get(FILES_KEY, []) + user_id = processed_event[USER_ID_KEY] + message_id = processed_event[MESSAGE_ID_KEY] + + try: + invoke_agent_core( + connection_id=connection_id, + conversation_id=conversation_id, + input_text=input_text, + user_id=user_id, + message_id=message_id, + files=files, + ) + + processed_records += 1 + index += 1 # Move to the next record only if successful + except Exception as ex: + tracer_id = os.getenv(TRACE_ID_ENV_VAR) + logger.error(f"An exception occurred in the processing of AgentCore request: {ex}", xray_trace_id=tracer_id) + + send_error_message(connection_id, conversation_id, message_id) + + while ( + index < len(records) + and records[index]["messageAttributes"]["connectionId"]["stringValue"] == connection_id + ): + batch_item_failures.add(records[index]["messageId"]) + index += 1 + + sqs_batch_response["batchItemFailures"] = [{"itemIdentifier": message_id} for message_id in batch_item_failures] + logger.debug( + f"Processed {processed_records} out of {total_records} records. SQS Batch Response: {json.dumps(sqs_batch_response)}" + ) + return sqs_batch_response + + +def _process_content_chunk( + chunk: Dict[str, Any], + connection_id: str, + conversation_id: str, + message_id: str, + chunk_count: int, + elapsed: float, +) -> None: + """Process and send content chunk to WebSocket.""" + logger.info( + f"[HANDLER_STREAMING] Sending content chunk #{chunk_count} to WebSocket at {elapsed:.3f}s: {len(chunk['text'])} chars" + ) + send_websocket_message(connection_id, conversation_id, chunk["text"], message_id) + + +def _process_thinking_chunk( + chunk: Dict[str, Any], + connection_id: str, + conversation_id: str, + message_id: str, + chunk_count: int, + elapsed: float, +) -> None: + """Process and send thinking chunk to WebSocket.""" + thinking_text = chunk["thinking"].get("thinkingMessage", "Processing...") + tagged_content = f"{thinking_text}" + logger.info(f"[HANDLER_STREAMING] Sending thinking chunk #{chunk_count} to WebSocket at {elapsed:.3f}s") + send_websocket_message(connection_id, conversation_id, tagged_content, message_id) + + +def _process_tool_use_chunk( + chunk: Dict[str, Any], + connection_id: str, + conversation_id: str, + message_id: str, + chunk_count: int, + elapsed: float, +) -> None: + """Process and send tool usage chunk to WebSocket.""" + logger.info(f"[HANDLER_TOOL_USE] Received tool_use chunk #{chunk_count} at {elapsed:.3f}s") + logger.debug( + f"[HANDLER_TOOL_USE] Tool usage chunk structure: type={chunk.get('type')}, " + f"toolUsage keys={list(chunk.get('toolUsage', {}).keys())}" + ) + logger.debug(f"[HANDLER_TOOL_USE] Tool usage data: {json.dumps(chunk.get('toolUsage', {}), default=str)}") + + tool_usage = chunk.get("toolUsage", {}) + if not isinstance(tool_usage, dict): + logger.error(f"[HANDLER_TOOL_USE] Invalid tool usage structure: expected dict, got {type(tool_usage)}") + return + + expected_fields = ["toolName", "status", "startTime"] + missing_fields = [field for field in expected_fields if field not in tool_usage] + if missing_fields: + logger.warning(f"[HANDLER_TOOL_USE] Tool usage missing expected fields: {missing_fields}") + + logger.info( + f"[HANDLER_TOOL_USE] Sending tool usage chunk to WebSocket: " + f"toolName={tool_usage.get('toolName')}, status={tool_usage.get('status')}" + ) + send_tool_usage(connection_id, conversation_id, tool_usage, message_id) + logger.info(f"[HANDLER_TOOL_USE] Successfully sent tool usage chunk #{chunk_count}") + + +def _process_stream_chunks( + response_stream, + connection_id: str, + conversation_id: str, + message_id: str, + keep_alive_manager, + start_time: float, +) -> tuple: + """ + Process all chunks from the response stream. + + Returns: + Tuple of (content_count, thinking_count, tool_count, websocket_count) + """ + content_chunk_count = 0 + thinking_chunk_count = 0 + tool_chunk_count = 0 + websocket_chunk_count = 0 + + for chunk in response_stream: + keep_alive_manager.update_activity(connection_id) + chunk_elapsed = time.time() - start_time + + chunk_type = chunk.get("type") + + if chunk_type == "content" and chunk.get("text"): + content_chunk_count += 1 + websocket_chunk_count += 1 + _process_content_chunk( + chunk, connection_id, conversation_id, message_id, content_chunk_count, chunk_elapsed + ) + + elif chunk_type == "thinking" and "thinking" in chunk: + thinking_chunk_count += 1 + websocket_chunk_count += 1 + _process_thinking_chunk( + chunk, connection_id, conversation_id, message_id, thinking_chunk_count, chunk_elapsed + ) + + elif chunk_type == "tool_use" and "toolUsage" in chunk: + tool_chunk_count += 1 + websocket_chunk_count += 1 + _process_tool_use_chunk(chunk, connection_id, conversation_id, message_id, tool_chunk_count, chunk_elapsed) + + elif chunk_type == "error": + error_message = chunk.get("message", chunk.get("error", "An error occurred")) + logger.error( + f"[HANDLER_STREAMING] Error chunk received for conversation {conversation_id} at {chunk_elapsed:.3f}s: {error_message}" + ) + # Raise exception to let top-level handler send error message (matches chat lambda pattern) + raise AgentCoreInvocationError(f"AgentCore streaming error: {error_message}") + + elif chunk_type == "completion": + logger.info( + f"[HANDLER_STREAMING] Response completion received for conversation {conversation_id} at {chunk_elapsed:.3f}s" + ) + break + + else: + logger.warning(f"[HANDLER_STREAMING] Unexpected chunk type received: {chunk_type}") + + return content_chunk_count, thinking_chunk_count, tool_chunk_count, websocket_chunk_count + + +@tracer.capture_method +def invoke_agent_core( + connection_id: str, + conversation_id: str, + input_text: str, + user_id: str, + message_id: str, + files: List[Dict[str, Any]], +) -> None: + """ + Invoke the AgentCore Runtime with SSE streaming support and stream responses back to the WebSocket connection. + + This function implements the core streaming logic with immediate forwarding of response chunks, + keep-alive management for long-running tasks, and automatic fallback to non-streaming mode when SSE is unavailable or fails. + + Args: + connection_id: WebSocket connection ID + conversation_id: Conversation ID + input_text: User input text + user_id: User ID + message_id: Message ID for response formatting + + Raises: + AgentCoreClientError: If AgentCore invocation fails + """ + logger.info(f"Processing AgentCore request for conversation {conversation_id}") + + keep_alive_manager = get_keep_alive_manager(send_websocket_message) + start_time = time.time() + + try: + agentcore_client = get_agentcore_client() + keep_alive_manager.start_keep_alive(connection_id, conversation_id, message_id) + + try: + response_stream = agentcore_client.invoke_agent( + input_text=input_text, + conversation_id=conversation_id, + user_id=user_id, + message_id=message_id, + files=files, + ) + + content_count, thinking_count, tool_count, websocket_count = _process_stream_chunks( + response_stream, connection_id, conversation_id, message_id, keep_alive_manager, start_time + ) + + websocket_count += 1 + logger.info("[HANDLER_STREAMING] Sending END_CONVERSATION_TOKEN to WebSocket") + send_websocket_message(connection_id, conversation_id, END_CONVERSATION_TOKEN, message_id) + + total_elapsed = time.time() - start_time + logger.info( + f"[HANDLER_STREAMING] Successfully completed AgentCore invocation for conversation {conversation_id}" + ) + logger.info( + f"[HANDLER_STREAMING] Total WebSocket chunks sent: {websocket_count} " + f"(content: {content_count}, thinking: {thinking_count}, tool: {tool_count}) " + f"in {total_elapsed:.3f}s" + ) + + except AgentCoreInvocationError as invocation_error: + logger.error(f"AgentCore invocation failed for conversation {conversation_id}: {str(invocation_error)}") + raise + + except AgentCoreConfigurationError as config_error: + tracer_id = os.getenv(TRACE_ID_ENV_VAR) + logger.error(f"AgentCore configuration error: {str(config_error)}", xray_trace_id=tracer_id) + raise + except AgentCoreClientError as client_error: + tracer_id = os.getenv(TRACE_ID_ENV_VAR) + logger.error(f"AgentCore client error: {str(client_error)}", xray_trace_id=tracer_id) + raise + except Exception as e: + tracer_id = os.getenv(TRACE_ID_ENV_VAR) + logger.error(f"Unexpected error during AgentCore invocation: {str(e)}", xray_trace_id=tracer_id) + raise AgentCoreInvocationError(f"Unexpected error during AgentCore invocation: {str(e)}") from e + finally: + keep_alive_manager.stop_keep_alive(connection_id) + logger.info(f"Stopped keep-alive monitoring for connection {connection_id}") + + +def send_websocket_message(connection_id: str, conversation_id: str, message: str, message_id: str) -> None: + """ + Send a message to WebSocket connection. + + Args: + connection_id: WebSocket connection ID + conversation_id: Conversation ID + message: Message to send + message_id: Message ID for response formatting + """ + try: + client = get_service_client("apigatewaymanagementapi", endpoint_url=WEBSOCKET_CALLBACK_URL) + + formatted_response = format_response(conversation_id, message_id, data=message) + + client.post_to_connection(ConnectionId=connection_id, Data=formatted_response) + + except Exception as e: + logger.error( + f"Error sending WebSocket message to {connection_id}: {str(e)}", + xray_trace_id=os.environ.get(TRACE_ID_ENV_VAR), + ) + raise + + +def send_tool_usage(connection_id: str, conversation_id: str, tool_usage: Dict[str, Any], message_id: str) -> None: + """ + Send tool usage information to WebSocket connection. + + Expected tool usage structure: + { + "toolName": str, # Required: Name of the tool being used + "status": str, # Required: "started", "in_progress", "completed", or "failed" + "startTime": str, # Required: ISO timestamp when tool started + "endTime": str, # Optional: ISO timestamp when tool completed/failed + "toolInput": dict, # Optional: Tool input parameters + "toolOutput": str, # Optional: Tool output/result + "mcpServerName": str, # Optional: MCP server name if tool is from MCP + "error": str # Optional: Error message if status is "failed" + } + + WebSocket message format: + { + "conversationId": str, + "messageId": str, + "toolUsage": { + ... (tool usage object as above) + } + } + + Args: + connection_id: WebSocket connection ID + conversation_id: Conversation ID + tool_usage: Tool usage information object + message_id: Message ID for response formatting + """ + try: + # DEBUG: Log before formatting + logger.debug(f"[SEND_TOOL_USAGE] Preparing to send tool usage for connection {connection_id}") + logger.debug(f"[SEND_TOOL_USAGE] Tool usage data: {json.dumps(tool_usage, default=str)}") + + # Validate tool usage structure + if not isinstance(tool_usage, dict): + logger.error(f"[SEND_TOOL_USAGE] Invalid tool usage type: expected dict, got {type(tool_usage)}") + return + + # Check for required fields + required_fields = ["toolName", "status", "startTime"] + missing_fields = [field for field in required_fields if field not in tool_usage] + if missing_fields: + logger.error( + f"[SEND_TOOL_USAGE] Tool usage missing required fields: {missing_fields}. " + f"Available fields: {list(tool_usage.keys())}" + ) + # Continue anyway - frontend should handle gracefully + + client = get_service_client("apigatewaymanagementapi", endpoint_url=WEBSOCKET_CALLBACK_URL) + + formatted_response = format_response(conversation_id, message_id, toolUsage=tool_usage) + + # DEBUG: Log formatted response structure + logger.debug(f"[SEND_TOOL_USAGE] Formatted WebSocket message: {formatted_response[:500]}") + + # Verify formatted response structure + try: + response_dict = json.loads(formatted_response) + logger.debug( + f"[SEND_TOOL_USAGE] WebSocket message structure verified: " + f"conversationId={response_dict.get('conversationId')}, " + f"messageId={response_dict.get('messageId')}, " + f"has_toolUsage={'toolUsage' in response_dict}" + ) + except json.JSONDecodeError as json_err: + logger.error(f"[SEND_TOOL_USAGE] Failed to parse formatted response as JSON: {json_err}") + return + + client.post_to_connection(ConnectionId=connection_id, Data=formatted_response) + + logger.info( + f"[SEND_TOOL_USAGE] Successfully sent tool usage to connection {connection_id}: " + f"toolName={tool_usage.get('toolName')}, status={tool_usage.get('status')}" + ) + + except Exception as e: + logger.error( + f"[SEND_TOOL_USAGE] Error sending tool usage to {connection_id}: {str(e)}", + xray_trace_id=os.environ.get(TRACE_ID_ENV_VAR), + ) + logger.error(f"[SEND_TOOL_USAGE] Tool usage data that failed: {json.dumps(tool_usage, default=str)}") + # Don't raise - tool usage errors shouldn't break the conversation flow + logger.warning("[SEND_TOOL_USAGE] Continuing conversation despite tool usage send error") + + +def send_error_message(connection_id: str, conversation_id: str, message_id: str) -> None: + """ + Send an error message to WebSocket connection using WebsocketErrorHandler. + + Args: + connection_id: WebSocket connection ID + conversation_id: Conversation ID + message_id: Message ID for response formatting + """ + trace_id = os.environ.get(TRACE_ID_ENV_VAR, "unknown") + + root_trace_id = trace_id + if trace_id and trace_id.startswith(XRAY_ROOT_PREFIX): + parts = trace_id.split(";") + for part in parts: + if part.startswith(XRAY_ROOT_PREFIX): + root_trace_id = part.split("=", 1)[1] + break + + formatted_error_message = f"AgentCore invocation service failed to respond. Please contact your system administrator for support and quote the following trace id: {root_trace_id}" + + error_handler = WebsocketErrorHandler( + connection_id=connection_id, trace_id=root_trace_id, conversation_id=conversation_id, message_id=message_id + ) + error_handler.post_token_to_connection(formatted_error_message) + + +def format_response(conversation_id: str, message_id: str = None, **kwargs: Any) -> str: + """ + Format the payload following the existing GAAB response formatting patterns. + + Args: + conversation_id: Conversation ID + message_id: Message ID (optional, can be None for error responses) + **kwargs: Additional fields (data, errorMessage, traceId, etc.) + + Returns: + JSON-formatted string containing the formatted response + """ + response_dict = {CONVERSATION_ID_KEY: conversation_id, MESSAGE_ID_KEY: message_id} + response_dict.update(kwargs) + return json.dumps(response_dict) diff --git a/source/lambda/agentcore-invocation/poetry.lock b/source/lambda/agentcore-invocation/poetry.lock new file mode 100644 index 00000000..887e418a --- /dev/null +++ b/source/lambda/agentcore-invocation/poetry.lock @@ -0,0 +1,1148 @@ +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. + +[[package]] +name = "aws-lambda-powertools" +version = "3.19.0" +description = "Powertools for AWS Lambda (Python) is a developer toolkit to implement Serverless best practices and increase developer velocity." +optional = false +python-versions = "<4.0.0,>=3.9" +groups = ["main", "test"] +files = [ + {file = "aws_lambda_powertools-3.19.0-py3-none-any.whl", hash = "sha256:98f18d35f843cd46b80ccadcf39eefc0c489325bea116383bd93048a5241d9fc"}, + {file = "aws_lambda_powertools-3.19.0.tar.gz", hash = "sha256:8897ba4be0b3a51f2b8f68946d650f3ef574fa2c40395544de03bd0c61979999"}, +] + +[package.dependencies] +jmespath = ">=1.0.1,<2.0.0" +typing-extensions = ">=4.11.0,<5.0.0" + +[package.extras] +all = ["aws-encryption-sdk (>=3.1.1,<5.0.0)", "aws-xray-sdk (>=2.8.0,<3.0.0)", "fastjsonschema (>=2.14.5,<3.0.0)", "jsonpath-ng (>=1.6.0,<2.0.0)", "pydantic (>=2.4.0,<3.0.0)", "pydantic-settings (>=2.6.1,<3.0.0)"] +aws-sdk = ["boto3 (>=1.34.32,<2.0.0)"] +datadog = ["datadog-lambda (>=6.106.0,<7.0.0)"] +datamasking = ["aws-encryption-sdk (>=3.1.1,<5.0.0)", "jsonpath-ng (>=1.6.0,<2.0.0)"] +kafka-consumer-avro = ["avro (>=1.12.0,<2.0.0)"] +kafka-consumer-protobuf = ["protobuf (>=6.30.2,<7.0.0)"] +parser = ["pydantic (>=2.4.0,<3.0.0)"] +redis = ["redis (>=4.4,<7.0)"] +tracer = ["aws-xray-sdk (>=2.8.0,<3.0.0)"] +validation = ["fastjsonschema (>=2.14.5,<3.0.0)"] +valkey = ["valkey-glide (>=1.3.5,<3.0)"] + +[[package]] +name = "aws-xray-sdk" +version = "2.14.0" +description = "The AWS X-Ray SDK for Python (the SDK) enables Python developers to record and emit information from within their applications to the AWS X-Ray service." +optional = false +python-versions = ">=3.7" +groups = ["main", "test"] +files = [ + {file = "aws_xray_sdk-2.14.0-py2.py3-none-any.whl", hash = "sha256:cfbe6feea3d26613a2a869d14c9246a844285c97087ad8f296f901633554ad94"}, + {file = "aws_xray_sdk-2.14.0.tar.gz", hash = "sha256:aab843c331af9ab9ba5cefb3a303832a19db186140894a523edafc024cc0493c"}, +] + +[package.dependencies] +botocore = ">=1.11.3" +wrapt = "*" + +[[package]] +name = "boto3" +version = "1.40.53" +description = "The AWS SDK for Python" +optional = false +python-versions = ">=3.9" +groups = ["main", "test"] +files = [ + {file = "boto3-1.40.53-py3-none-any.whl", hash = "sha256:65ded2738de259bd9030feb4772ec7b53d5b661befa88ce836117c3df8265309"}, + {file = "boto3-1.40.53.tar.gz", hash = "sha256:3f8cf56034cfde20dd0abca01349f64ab65734d90c3fbf7357e8a84cb64a62ee"}, +] + +[package.dependencies] +botocore = ">=1.40.53,<1.41.0" +jmespath = ">=0.7.1,<2.0.0" +s3transfer = ">=0.14.0,<0.15.0" + +[package.extras] +crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] + +[[package]] +name = "boto3-layer" +version = "4.0.0" +description = "Layer for AWS Boto3 python SDK" +optional = false +python-versions = "^3.13" +groups = ["test"] +files = [] +develop = true + +[package.dependencies] +boto3 = "1.40.53" +botocore = "1.40.53" +urllib3 = "2.5.0" + +[package.source] +type = "directory" +url = "../layers/aws_boto3" + +[[package]] +name = "botocore" +version = "1.40.53" +description = "Low-level, data-driven core of boto 3." +optional = false +python-versions = ">=3.9" +groups = ["main", "test"] +files = [ + {file = "botocore-1.40.53-py3-none-any.whl", hash = "sha256:840322b0af4be7a6e2effddb4eb388053c25af0618f627f37d8b03cc1edbc928"}, + {file = "botocore-1.40.53.tar.gz", hash = "sha256:4ebb9e6648c4896d3f0cdda5ff30b5de9a83aeb591be89a16f98cc5ee3cd371c"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""} + +[package.extras] +crt = ["awscrt (==0.27.6)"] + +[[package]] +name = "certifi" +version = "2025.8.3" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5"}, + {file = "certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407"}, +] + +[[package]] +name = "cffi" +version = "2.0.0" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.9" +groups = ["test"] +markers = "platform_python_implementation != \"PyPy\"" +files = [ + {file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"}, + {file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb"}, + {file = "cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a"}, + {file = "cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743"}, + {file = "cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5"}, + {file = "cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5"}, + {file = "cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187"}, + {file = "cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18"}, + {file = "cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5"}, + {file = "cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b"}, + {file = "cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27"}, + {file = "cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75"}, + {file = "cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1"}, + {file = "cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f"}, + {file = "cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25"}, + {file = "cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4"}, + {file = "cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e"}, + {file = "cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6"}, + {file = "cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322"}, + {file = "cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a"}, + {file = "cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9"}, + {file = "cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529"}, +] + +[package.dependencies] +pycparser = {version = "*", markers = "implementation_name != \"PyPy\""} + +[[package]] +name = "charset-normalizer" +version = "3.4.3" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "charset_normalizer-3.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-win32.whl", hash = "sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-win32.whl", hash = "sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-win32.whl", hash = "sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-win32.whl", hash = "sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0f2be7e0cf7754b9a30eb01f4295cc3d4358a479843b31f328afd210e2c7598c"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c60e092517a73c632ec38e290eba714e9627abe9d301c8c8a12ec32c314a2a4b"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:252098c8c7a873e17dd696ed98bbe91dbacd571da4b87df3736768efa7a792e4"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3653fad4fe3ed447a596ae8638b437f827234f01a8cd801842e43f3d0a6b281b"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8999f965f922ae054125286faf9f11bc6932184b93011d138925a1773830bbe9"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d95bfb53c211b57198bb91c46dd5a2d8018b3af446583aab40074bf7988401cb"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:5b413b0b1bfd94dbf4023ad6945889f374cd24e3f62de58d6bb102c4d9ae534a"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:b5e3b2d152e74e100a9e9573837aba24aab611d39428ded46f4e4022ea7d1942"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a2d08ac246bb48479170408d6c19f6385fa743e7157d716e144cad849b2dd94b"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-win32.whl", hash = "sha256:ec557499516fc90fd374bf2e32349a2887a876fbf162c160e3c01b6849eaf557"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:5d8d01eac18c423815ed4f4a2ec3b439d654e55ee4ad610e153cf02faf67ea40"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:70bfc5f2c318afece2f5838ea5e4c3febada0be750fcf4775641052bbba14d05"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:23b6b24d74478dc833444cbd927c338349d6ae852ba53a0d02a2de1fce45b96e"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:34a7f768e3f985abdb42841e20e17b330ad3aaf4bb7e7aeeb73db2e70f077b99"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:fb731e5deb0c7ef82d698b0f4c5bb724633ee2a489401594c5c88b02e6cb15f7"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:257f26fed7d7ff59921b78244f3cd93ed2af1800ff048c33f624c87475819dd7"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1ef99f0456d3d46a50945c98de1774da86f8e992ab5c77865ea8b8195341fc19"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:2c322db9c8c89009a990ef07c3bcc9f011a3269bc06782f916cd3d9eed7c9312"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:511729f456829ef86ac41ca78c63a5cb55240ed23b4b737faca0eb1abb1c41bc"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:88ab34806dea0671532d3f82d82b85e8fc23d7b2dd12fa837978dad9bb392a34"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-win32.whl", hash = "sha256:16a8770207946ac75703458e2c743631c79c59c5890c80011d536248f8eaa432"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:d22dbedd33326a4a5190dd4fe9e9e693ef12160c77382d9e87919bce54f3d4ca"}, + {file = "charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a"}, + {file = "charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["test"] +markers = "sys_platform == \"win32\"" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "coverage" +version = "7.10.6" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "coverage-7.10.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70e7bfbd57126b5554aa482691145f798d7df77489a177a6bef80de78860a356"}, + {file = "coverage-7.10.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e41be6f0f19da64af13403e52f2dec38bbc2937af54df8ecef10850ff8d35301"}, + {file = "coverage-7.10.6-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c61fc91ab80b23f5fddbee342d19662f3d3328173229caded831aa0bd7595460"}, + {file = "coverage-7.10.6-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10356fdd33a7cc06e8051413140bbdc6f972137508a3572e3f59f805cd2832fd"}, + {file = "coverage-7.10.6-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:80b1695cf7c5ebe7b44bf2521221b9bb8cdf69b1f24231149a7e3eb1ae5fa2fb"}, + {file = "coverage-7.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2e4c33e6378b9d52d3454bd08847a8651f4ed23ddbb4a0520227bd346382bbc6"}, + {file = "coverage-7.10.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c8a3ec16e34ef980a46f60dc6ad86ec60f763c3f2fa0db6d261e6e754f72e945"}, + {file = "coverage-7.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7d79dabc0a56f5af990cc6da9ad1e40766e82773c075f09cc571e2076fef882e"}, + {file = "coverage-7.10.6-cp310-cp310-win32.whl", hash = "sha256:86b9b59f2b16e981906e9d6383eb6446d5b46c278460ae2c36487667717eccf1"}, + {file = "coverage-7.10.6-cp310-cp310-win_amd64.whl", hash = "sha256:e132b9152749bd33534e5bd8565c7576f135f157b4029b975e15ee184325f528"}, + {file = "coverage-7.10.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c706db3cabb7ceef779de68270150665e710b46d56372455cd741184f3868d8f"}, + {file = "coverage-7.10.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e0c38dc289e0508ef68ec95834cb5d2e96fdbe792eaccaa1bccac3966bbadcc"}, + {file = "coverage-7.10.6-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:752a3005a1ded28f2f3a6e8787e24f28d6abe176ca64677bcd8d53d6fe2ec08a"}, + {file = "coverage-7.10.6-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:689920ecfd60f992cafca4f5477d55720466ad2c7fa29bb56ac8d44a1ac2b47a"}, + {file = "coverage-7.10.6-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ec98435796d2624d6905820a42f82149ee9fc4f2d45c2c5bc5a44481cc50db62"}, + {file = "coverage-7.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b37201ce4a458c7a758ecc4efa92fa8ed783c66e0fa3c42ae19fc454a0792153"}, + {file = "coverage-7.10.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:2904271c80898663c810a6b067920a61dd8d38341244a3605bd31ab55250dad5"}, + {file = "coverage-7.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5aea98383463d6e1fa4e95416d8de66f2d0cb588774ee20ae1b28df826bcb619"}, + {file = "coverage-7.10.6-cp311-cp311-win32.whl", hash = "sha256:e3fb1fa01d3598002777dd259c0c2e6d9d5e10e7222976fc8e03992f972a2cba"}, + {file = "coverage-7.10.6-cp311-cp311-win_amd64.whl", hash = "sha256:f35ed9d945bece26553d5b4c8630453169672bea0050a564456eb88bdffd927e"}, + {file = "coverage-7.10.6-cp311-cp311-win_arm64.whl", hash = "sha256:99e1a305c7765631d74b98bf7dbf54eeea931f975e80f115437d23848ee8c27c"}, + {file = "coverage-7.10.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5b2dd6059938063a2c9fee1af729d4f2af28fd1a545e9b7652861f0d752ebcea"}, + {file = "coverage-7.10.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:388d80e56191bf846c485c14ae2bc8898aa3124d9d35903fef7d907780477634"}, + {file = "coverage-7.10.6-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:90cb5b1a4670662719591aa92d0095bb41714970c0b065b02a2610172dbf0af6"}, + {file = "coverage-7.10.6-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:961834e2f2b863a0e14260a9a273aff07ff7818ab6e66d2addf5628590c628f9"}, + {file = "coverage-7.10.6-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf9a19f5012dab774628491659646335b1928cfc931bf8d97b0d5918dd58033c"}, + {file = "coverage-7.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:99c4283e2a0e147b9c9cc6bc9c96124de9419d6044837e9799763a0e29a7321a"}, + {file = "coverage-7.10.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:282b1b20f45df57cc508c1e033403f02283adfb67d4c9c35a90281d81e5c52c5"}, + {file = "coverage-7.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8cdbe264f11afd69841bd8c0d83ca10b5b32853263ee62e6ac6a0ab63895f972"}, + {file = "coverage-7.10.6-cp312-cp312-win32.whl", hash = "sha256:a517feaf3a0a3eca1ee985d8373135cfdedfbba3882a5eab4362bda7c7cf518d"}, + {file = "coverage-7.10.6-cp312-cp312-win_amd64.whl", hash = "sha256:856986eadf41f52b214176d894a7de05331117f6035a28ac0016c0f63d887629"}, + {file = "coverage-7.10.6-cp312-cp312-win_arm64.whl", hash = "sha256:acf36b8268785aad739443fa2780c16260ee3fa09d12b3a70f772ef100939d80"}, + {file = "coverage-7.10.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ffea0575345e9ee0144dfe5701aa17f3ba546f8c3bb48db62ae101afb740e7d6"}, + {file = "coverage-7.10.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:95d91d7317cde40a1c249d6b7382750b7e6d86fad9d8eaf4fa3f8f44cf171e80"}, + {file = "coverage-7.10.6-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3e23dd5408fe71a356b41baa82892772a4cefcf758f2ca3383d2aa39e1b7a003"}, + {file = "coverage-7.10.6-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0f3f56e4cb573755e96a16501a98bf211f100463d70275759e73f3cbc00d4f27"}, + {file = "coverage-7.10.6-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:db4a1d897bbbe7339946ffa2fe60c10cc81c43fab8b062d3fcb84188688174a4"}, + {file = "coverage-7.10.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d8fd7879082953c156d5b13c74aa6cca37f6a6f4747b39538504c3f9c63d043d"}, + {file = "coverage-7.10.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:28395ca3f71cd103b8c116333fa9db867f3a3e1ad6a084aa3725ae002b6583bc"}, + {file = "coverage-7.10.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:61c950fc33d29c91b9e18540e1aed7d9f6787cc870a3e4032493bbbe641d12fc"}, + {file = "coverage-7.10.6-cp313-cp313-win32.whl", hash = "sha256:160c00a5e6b6bdf4e5984b0ef21fc860bc94416c41b7df4d63f536d17c38902e"}, + {file = "coverage-7.10.6-cp313-cp313-win_amd64.whl", hash = "sha256:628055297f3e2aa181464c3808402887643405573eb3d9de060d81531fa79d32"}, + {file = "coverage-7.10.6-cp313-cp313-win_arm64.whl", hash = "sha256:df4ec1f8540b0bcbe26ca7dd0f541847cc8a108b35596f9f91f59f0c060bfdd2"}, + {file = "coverage-7.10.6-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c9a8b7a34a4de3ed987f636f71881cd3b8339f61118b1aa311fbda12741bff0b"}, + {file = "coverage-7.10.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8dd5af36092430c2b075cee966719898f2ae87b636cefb85a653f1d0ba5d5393"}, + {file = "coverage-7.10.6-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b0353b0f0850d49ada66fdd7d0c7cdb0f86b900bb9e367024fd14a60cecc1e27"}, + {file = "coverage-7.10.6-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d6b9ae13d5d3e8aeca9ca94198aa7b3ebbc5acfada557d724f2a1f03d2c0b0df"}, + {file = "coverage-7.10.6-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:675824a363cc05781b1527b39dc2587b8984965834a748177ee3c37b64ffeafb"}, + {file = "coverage-7.10.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:692d70ea725f471a547c305f0d0fc6a73480c62fb0da726370c088ab21aed282"}, + {file = "coverage-7.10.6-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:851430a9a361c7a8484a36126d1d0ff8d529d97385eacc8dfdc9bfc8c2d2cbe4"}, + {file = "coverage-7.10.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d9369a23186d189b2fc95cc08b8160ba242057e887d766864f7adf3c46b2df21"}, + {file = "coverage-7.10.6-cp313-cp313t-win32.whl", hash = "sha256:92be86fcb125e9bda0da7806afd29a3fd33fdf58fba5d60318399adf40bf37d0"}, + {file = "coverage-7.10.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6b3039e2ca459a70c79523d39347d83b73f2f06af5624905eba7ec34d64d80b5"}, + {file = "coverage-7.10.6-cp313-cp313t-win_arm64.whl", hash = "sha256:3fb99d0786fe17b228eab663d16bee2288e8724d26a199c29325aac4b0319b9b"}, + {file = "coverage-7.10.6-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6008a021907be8c4c02f37cdc3ffb258493bdebfeaf9a839f9e71dfdc47b018e"}, + {file = "coverage-7.10.6-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5e75e37f23eb144e78940b40395b42f2321951206a4f50e23cfd6e8a198d3ceb"}, + {file = "coverage-7.10.6-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0f7cb359a448e043c576f0da00aa8bfd796a01b06aa610ca453d4dde09cc1034"}, + {file = "coverage-7.10.6-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c68018e4fc4e14b5668f1353b41ccf4bc83ba355f0e1b3836861c6f042d89ac1"}, + {file = "coverage-7.10.6-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cd4b2b0707fc55afa160cd5fc33b27ccbf75ca11d81f4ec9863d5793fc6df56a"}, + {file = "coverage-7.10.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4cec13817a651f8804a86e4f79d815b3b28472c910e099e4d5a0e8a3b6a1d4cb"}, + {file = "coverage-7.10.6-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:f2a6a8e06bbda06f78739f40bfb56c45d14eb8249d0f0ea6d4b3d48e1f7c695d"}, + {file = "coverage-7.10.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:081b98395ced0d9bcf60ada7661a0b75f36b78b9d7e39ea0790bb4ed8da14747"}, + {file = "coverage-7.10.6-cp314-cp314-win32.whl", hash = "sha256:6937347c5d7d069ee776b2bf4e1212f912a9f1f141a429c475e6089462fcecc5"}, + {file = "coverage-7.10.6-cp314-cp314-win_amd64.whl", hash = "sha256:adec1d980fa07e60b6ef865f9e5410ba760e4e1d26f60f7e5772c73b9a5b0713"}, + {file = "coverage-7.10.6-cp314-cp314-win_arm64.whl", hash = "sha256:a80f7aef9535442bdcf562e5a0d5a5538ce8abe6bb209cfbf170c462ac2c2a32"}, + {file = "coverage-7.10.6-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:0de434f4fbbe5af4fa7989521c655c8c779afb61c53ab561b64dcee6149e4c65"}, + {file = "coverage-7.10.6-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6e31b8155150c57e5ac43ccd289d079eb3f825187d7c66e755a055d2c85794c6"}, + {file = "coverage-7.10.6-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:98cede73eb83c31e2118ae8d379c12e3e42736903a8afcca92a7218e1f2903b0"}, + {file = "coverage-7.10.6-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f863c08f4ff6b64fa8045b1e3da480f5374779ef187f07b82e0538c68cb4ff8e"}, + {file = "coverage-7.10.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2b38261034fda87be356f2c3f42221fdb4171c3ce7658066ae449241485390d5"}, + {file = "coverage-7.10.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0e93b1476b79eae849dc3872faeb0bf7948fd9ea34869590bc16a2a00b9c82a7"}, + {file = "coverage-7.10.6-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:ff8a991f70f4c0cf53088abf1e3886edcc87d53004c7bb94e78650b4d3dac3b5"}, + {file = "coverage-7.10.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ac765b026c9f33044419cbba1da913cfb82cca1b60598ac1c7a5ed6aac4621a0"}, + {file = "coverage-7.10.6-cp314-cp314t-win32.whl", hash = "sha256:441c357d55f4936875636ef2cfb3bee36e466dcf50df9afbd398ce79dba1ebb7"}, + {file = "coverage-7.10.6-cp314-cp314t-win_amd64.whl", hash = "sha256:073711de3181b2e204e4870ac83a7c4853115b42e9cd4d145f2231e12d670930"}, + {file = "coverage-7.10.6-cp314-cp314t-win_arm64.whl", hash = "sha256:137921f2bac5559334ba66122b753db6dc5d1cf01eb7b64eb412bb0d064ef35b"}, + {file = "coverage-7.10.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:90558c35af64971d65fbd935c32010f9a2f52776103a259f1dee865fe8259352"}, + {file = "coverage-7.10.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8953746d371e5695405806c46d705a3cd170b9cc2b9f93953ad838f6c1e58612"}, + {file = "coverage-7.10.6-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c83f6afb480eae0313114297d29d7c295670a41c11b274e6bca0c64540c1ce7b"}, + {file = "coverage-7.10.6-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7eb68d356ba0cc158ca535ce1381dbf2037fa8cb5b1ae5ddfc302e7317d04144"}, + {file = "coverage-7.10.6-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5b15a87265e96307482746d86995f4bff282f14b027db75469c446da6127433b"}, + {file = "coverage-7.10.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fc53ba868875bfbb66ee447d64d6413c2db91fddcfca57025a0e7ab5b07d5862"}, + {file = "coverage-7.10.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:efeda443000aa23f276f4df973cb82beca682fd800bb119d19e80504ffe53ec2"}, + {file = "coverage-7.10.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9702b59d582ff1e184945d8b501ffdd08d2cee38d93a2206aa5f1365ce0b8d78"}, + {file = "coverage-7.10.6-cp39-cp39-win32.whl", hash = "sha256:2195f8e16ba1a44651ca684db2ea2b2d4b5345da12f07d9c22a395202a05b23c"}, + {file = "coverage-7.10.6-cp39-cp39-win_amd64.whl", hash = "sha256:f32ff80e7ef6a5b5b606ea69a36e97b219cd9dc799bcf2963018a4d8f788cfbf"}, + {file = "coverage-7.10.6-py3-none-any.whl", hash = "sha256:92c4ecf6bf11b2e85fd4d8204814dc26e6a19f0c9d938c207c5cb0eadfcabbe3"}, + {file = "coverage-7.10.6.tar.gz", hash = "sha256:f644a3ae5933a552a29dbb9aa2f90c677a875f80ebea028e5a52a4f429044b90"}, +] + +[package.extras] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] + +[[package]] +name = "cryptography" +version = "44.0.3" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = "!=3.9.0,!=3.9.1,>=3.7" +groups = ["test"] +files = [ + {file = "cryptography-44.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:962bc30480a08d133e631e8dfd4783ab71cc9e33d5d7c1e192f0b7c06397bb88"}, + {file = "cryptography-44.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffc61e8f3bf5b60346d89cd3d37231019c17a081208dfbbd6e1605ba03fa137"}, + {file = "cryptography-44.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58968d331425a6f9eedcee087f77fd3c927c88f55368f43ff7e0a19891f2642c"}, + {file = "cryptography-44.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:e28d62e59a4dbd1d22e747f57d4f00c459af22181f0b2f787ea83f5a876d7c76"}, + {file = "cryptography-44.0.3-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:af653022a0c25ef2e3ffb2c673a50e5a0d02fecc41608f4954176f1933b12359"}, + {file = "cryptography-44.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:157f1f3b8d941c2bd8f3ffee0af9b049c9665c39d3da9db2dc338feca5e98a43"}, + {file = "cryptography-44.0.3-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:c6cd67722619e4d55fdb42ead64ed8843d64638e9c07f4011163e46bc512cf01"}, + {file = "cryptography-44.0.3-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:b424563394c369a804ecbee9b06dfb34997f19d00b3518e39f83a5642618397d"}, + {file = "cryptography-44.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c91fc8e8fd78af553f98bc7f2a1d8db977334e4eea302a4bfd75b9461c2d8904"}, + {file = "cryptography-44.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:25cd194c39fa5a0aa4169125ee27d1172097857b27109a45fadc59653ec06f44"}, + {file = "cryptography-44.0.3-cp37-abi3-win32.whl", hash = "sha256:3be3f649d91cb182c3a6bd336de8b61a0a71965bd13d1a04a0e15b39c3d5809d"}, + {file = "cryptography-44.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:3883076d5c4cc56dbef0b898a74eb6992fdac29a7b9013870b34efe4ddb39a0d"}, + {file = "cryptography-44.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:5639c2b16764c6f76eedf722dbad9a0914960d3489c0cc38694ddf9464f1bb2f"}, + {file = "cryptography-44.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3ffef566ac88f75967d7abd852ed5f182da252d23fac11b4766da3957766759"}, + {file = "cryptography-44.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:192ed30fac1728f7587c6f4613c29c584abdc565d7417c13904708db10206645"}, + {file = "cryptography-44.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7d5fe7195c27c32a64955740b949070f21cba664604291c298518d2e255931d2"}, + {file = "cryptography-44.0.3-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3f07943aa4d7dad689e3bb1638ddc4944cc5e0921e3c227486daae0e31a05e54"}, + {file = "cryptography-44.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cb90f60e03d563ca2445099edf605c16ed1d5b15182d21831f58460c48bffb93"}, + {file = "cryptography-44.0.3-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:ab0b005721cc0039e885ac3503825661bd9810b15d4f374e473f8c89b7d5460c"}, + {file = "cryptography-44.0.3-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3bb0847e6363c037df8f6ede57d88eaf3410ca2267fb12275370a76f85786a6f"}, + {file = "cryptography-44.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b0cc66c74c797e1db750aaa842ad5b8b78e14805a9b5d1348dc603612d3e3ff5"}, + {file = "cryptography-44.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6866df152b581f9429020320e5eb9794c8780e90f7ccb021940d7f50ee00ae0b"}, + {file = "cryptography-44.0.3-cp39-abi3-win32.whl", hash = "sha256:c138abae3a12a94c75c10499f1cbae81294a6f983b3af066390adee73f433028"}, + {file = "cryptography-44.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:5d186f32e52e66994dce4f766884bcb9c68b8da62d61d9d215bfe5fb56d21334"}, + {file = "cryptography-44.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:cad399780053fb383dc067475135e41c9fe7d901a97dd5d9c5dfb5611afc0d7d"}, + {file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:21a83f6f35b9cc656d71b5de8d519f566df01e660ac2578805ab245ffd8523f8"}, + {file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fc3c9babc1e1faefd62704bb46a69f359a9819eb0292e40df3fb6e3574715cd4"}, + {file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:e909df4053064a97f1e6565153ff8bb389af12c5c8d29c343308760890560aff"}, + {file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:dad80b45c22e05b259e33ddd458e9e2ba099c86ccf4e88db7bbab4b747b18d06"}, + {file = "cryptography-44.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:479d92908277bed6e1a1c69b277734a7771c2b78633c224445b5c60a9f4bc1d9"}, + {file = "cryptography-44.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:896530bc9107b226f265effa7ef3f21270f18a2026bc09fed1ebd7b66ddf6375"}, + {file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9b4d4a5dbee05a2c390bf212e78b99434efec37b17a4bff42f50285c5c8c9647"}, + {file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02f55fb4f8b79c1221b0961488eaae21015b69b210e18c386b69de182ebb1259"}, + {file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:dd3db61b8fe5be220eee484a17233287d0be6932d056cf5738225b9c05ef4fff"}, + {file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:978631ec51a6bbc0b7e58f23b68a8ce9e5f09721940933e9c217068388789fe5"}, + {file = "cryptography-44.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:5d20cc348cca3a8aa7312f42ab953a56e15323800ca3ab0706b8cd452a3a056c"}, + {file = "cryptography-44.0.3.tar.gz", hash = "sha256:fe19d8bc5536a91a24a8133328880a41831b6c5df54599a8417b62fe015d3053"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=3.0.0) ; python_version >= \"3.8\""] +docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"] +nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_version >= \"3.8\""] +pep8test = ["check-sdist ; python_version >= \"3.8\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] +sdist = ["build (>=1.0.0)"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi (>=2024)", "cryptography-vectors (==44.0.3)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "custom-boto3-init" +version = "4.0.0" +description = "Initialize boto config for AWS Python SDK with custom configuration" +optional = false +python-versions = "^3.13" +groups = ["test"] +files = [] +develop = true + +[package.dependencies] +aws-lambda-powertools = "3.19.0" +aws-xray-sdk = "2.14.0" +cryptography = {version = "^44.0.1", markers = "sys_platform == \"linux\" and platform_machine == \"x86_64\""} +pyjwt = "^2.10.1" + +[package.source] +type = "directory" +url = "../layers/custom_boto3_init" + +[[package]] +name = "freezegun" +version = "1.5.1" +description = "Let your Python tests travel through time" +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "freezegun-1.5.1-py3-none-any.whl", hash = "sha256:bf111d7138a8abe55ab48a71755673dbaa4ab87f4cff5634a4442dfec34c15f1"}, + {file = "freezegun-1.5.1.tar.gz", hash = "sha256:b29dedfcda6d5e8e083ce71b2b542753ad48cfec44037b3fc79702e2980a89e9"}, +] + +[package.dependencies] +python-dateutil = ">=2.7" + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +groups = ["test"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "iniconfig" +version = "2.1.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +optional = false +python-versions = ">=3.7" +groups = ["main", "test"] +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "mock" +version = "5.1.0" +description = "Rolling backport of unittest.mock for all Pythons" +optional = false +python-versions = ">=3.6" +groups = ["test"] +files = [ + {file = "mock-5.1.0-py3-none-any.whl", hash = "sha256:18c694e5ae8a208cdb3d2c20a993ca1a7b0efa258c247a1e565150f477f83744"}, + {file = "mock-5.1.0.tar.gz", hash = "sha256:5e96aad5ccda4718e0a229ed94b2024df75cc2d55575ba5762d31f5767b8767d"}, +] + +[package.extras] +build = ["blurb", "twine", "wheel"] +docs = ["sphinx"] +test = ["pytest", "pytest-cov"] + +[[package]] +name = "moto" +version = "5.0.28" +description = "A library that allows you to easily mock out tests based on AWS infrastructure" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "moto-5.0.28-py3-none-any.whl", hash = "sha256:2dfbea1afe3b593e13192059a1a7fc4b3cf7fdf92e432070c22346efa45aa0f0"}, + {file = "moto-5.0.28.tar.gz", hash = "sha256:4d3437693411ec943c13c77de5b0b520c4b0a9ac850fead4ba2a54709e086e8b"}, +] + +[package.dependencies] +boto3 = ">=1.9.201" +botocore = ">=1.14.0,<1.35.45 || >1.35.45,<1.35.46 || >1.35.46" +cryptography = ">=35.0.0" +Jinja2 = ">=2.10.1" +python-dateutil = ">=2.1,<3.0.0" +requests = ">=2.5" +responses = ">=0.15.0,<0.25.5 || >0.25.5" +werkzeug = ">=0.5,<2.2.0 || >2.2.0,<2.2.1 || >2.2.1" +xmltodict = "*" + +[package.extras] +all = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsonpath-ng", "jsonschema", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.6.1)", "pyparsing (>=3.0.7)", "setuptools"] +apigateway = ["PyYAML (>=5.1)", "joserfc (>=0.9.0)", "openapi-spec-validator (>=0.5.0)"] +apigatewayv2 = ["PyYAML (>=5.1)", "openapi-spec-validator (>=0.5.0)"] +appsync = ["graphql-core"] +awslambda = ["docker (>=3.0.0)"] +batch = ["docker (>=3.0.0)"] +cloudformation = ["PyYAML (>=5.1)", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.6.1)", "pyparsing (>=3.0.7)", "setuptools"] +cognitoidp = ["joserfc (>=0.9.0)"] +dynamodb = ["docker (>=3.0.0)", "py-partiql-parser (==0.6.1)"] +dynamodbstreams = ["docker (>=3.0.0)", "py-partiql-parser (==0.6.1)"] +events = ["jsonpath-ng"] +glue = ["pyparsing (>=3.0.7)"] +proxy = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=2.5.1)", "graphql-core", "joserfc (>=0.9.0)", "jsonpath-ng", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.6.1)", "pyparsing (>=3.0.7)", "setuptools"] +quicksight = ["jsonschema"] +resourcegroupstaggingapi = ["PyYAML (>=5.1)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.6.1)", "pyparsing (>=3.0.7)"] +s3 = ["PyYAML (>=5.1)", "py-partiql-parser (==0.6.1)"] +s3crc32c = ["PyYAML (>=5.1)", "crc32c", "py-partiql-parser (==0.6.1)"] +server = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "flask (!=2.2.0,!=2.2.1)", "flask-cors", "graphql-core", "joserfc (>=0.9.0)", "jsonpath-ng", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.6.1)", "pyparsing (>=3.0.7)", "setuptools"] +ssm = ["PyYAML (>=5.1)"] +stepfunctions = ["antlr4-python3-runtime", "jsonpath-ng"] +xray = ["aws-xray-sdk (>=0.93,!=0.96)", "setuptools"] + +[[package]] +name = "packaging" +version = "25.0" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["coverage", "pytest", "pytest-benchmark"] + +[[package]] +name = "pycparser" +version = "2.23" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +groups = ["test"] +markers = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\"" +files = [ + {file = "pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934"}, + {file = "pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2"}, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +description = "JSON Web Token implementation in Python" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb"}, + {file = "pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953"}, +] + +[package.extras] +crypto = ["cryptography (>=3.4.0)"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] + +[[package]] +name = "pytest" +version = "8.3.4" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, + {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-cov" +version = "6.0.0" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0"}, + {file = "pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35"}, +] + +[package.dependencies] +coverage = {version = ">=7.5", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] + +[[package]] +name = "pytest-env" +version = "1.1.5" +description = "pytest plugin that allows you to add environment variables." +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "pytest_env-1.1.5-py3-none-any.whl", hash = "sha256:ce90cf8772878515c24b31cd97c7fa1f4481cd68d588419fd45f10ecaee6bc30"}, + {file = "pytest_env-1.1.5.tar.gz", hash = "sha256:91209840aa0e43385073ac464a554ad2947cc2fd663a9debf88d03b01e0cc1cf"}, +] + +[package.dependencies] +pytest = ">=8.3.3" + +[package.extras] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "pytest-mock (>=3.14)"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main", "test"] +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "requests" +version = "2.32.4" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, + {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset_normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "responses" +version = "0.25.8" +description = "A utility library for mocking out the `requests` Python library." +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "responses-0.25.8-py3-none-any.whl", hash = "sha256:0c710af92def29c8352ceadff0c3fe340ace27cf5af1bbe46fb71275bcd2831c"}, + {file = "responses-0.25.8.tar.gz", hash = "sha256:9374d047a575c8f781b94454db5cab590b6029505f488d12899ddb10a4af1cf4"}, +] + +[package.dependencies] +pyyaml = "*" +requests = ">=2.30.0,<3.0" +urllib3 = ">=1.25.10,<3.0" + +[package.extras] +tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "tomli ; python_version < \"3.11\"", "tomli-w", "types-PyYAML", "types-requests"] + +[[package]] +name = "s3transfer" +version = "0.14.0" +description = "An Amazon S3 Transfer Manager" +optional = false +python-versions = ">=3.9" +groups = ["main", "test"] +files = [ + {file = "s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456"}, + {file = "s3transfer-0.14.0.tar.gz", hash = "sha256:eff12264e7c8b4985074ccce27a3b38a485bb7f7422cc8046fee9be4983e4125"}, +] + +[package.dependencies] +botocore = ">=1.37.4,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.37.4,<2.0a.0)"] + +[[package]] +name = "setuptools" +version = "80.8.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "setuptools-80.8.0-py3-none-any.whl", hash = "sha256:95a60484590d24103af13b686121328cc2736bee85de8936383111e421b9edc0"}, + {file = "setuptools-80.8.0.tar.gz", hash = "sha256:49f7af965996f26d43c8ae34539c8d99c5042fbff34302ea151eaa9c207cd257"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""] +core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] + +[[package]] +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main", "test"] +files = [ + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +description = "Backported and Experimental Type Hints for Python 3.9+" +optional = false +python-versions = ">=3.9" +groups = ["main", "test"] +files = [ + {file = "typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"}, + {file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"}, +] + +[[package]] +name = "urllib3" +version = "2.5.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +groups = ["main", "test"] +files = [ + {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, + {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "werkzeug" +version = "3.1.3" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e"}, + {file = "werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "wrapt" +version = "1.17.3" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.8" +groups = ["main", "test"] +files = [ + {file = "wrapt-1.17.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88bbae4d40d5a46142e70d58bf664a89b6b4befaea7b2ecc14e03cedb8e06c04"}, + {file = "wrapt-1.17.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6b13af258d6a9ad602d57d889f83b9d5543acd471eee12eb51f5b01f8eb1bc2"}, + {file = "wrapt-1.17.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd341868a4b6714a5962c1af0bd44f7c404ef78720c7de4892901e540417111c"}, + {file = "wrapt-1.17.3-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f9b2601381be482f70e5d1051a5965c25fb3625455a2bf520b5a077b22afb775"}, + {file = "wrapt-1.17.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:343e44b2a8e60e06a7e0d29c1671a0d9951f59174f3709962b5143f60a2a98bd"}, + {file = "wrapt-1.17.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:33486899acd2d7d3066156b03465b949da3fd41a5da6e394ec49d271baefcf05"}, + {file = "wrapt-1.17.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e6f40a8aa5a92f150bdb3e1c44b7e98fb7113955b2e5394122fa5532fec4b418"}, + {file = "wrapt-1.17.3-cp310-cp310-win32.whl", hash = "sha256:a36692b8491d30a8c75f1dfee65bef119d6f39ea84ee04d9f9311f83c5ad9390"}, + {file = "wrapt-1.17.3-cp310-cp310-win_amd64.whl", hash = "sha256:afd964fd43b10c12213574db492cb8f73b2f0826c8df07a68288f8f19af2ebe6"}, + {file = "wrapt-1.17.3-cp310-cp310-win_arm64.whl", hash = "sha256:af338aa93554be859173c39c85243970dc6a289fa907402289eeae7543e1ae18"}, + {file = "wrapt-1.17.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:273a736c4645e63ac582c60a56b0acb529ef07f78e08dc6bfadf6a46b19c0da7"}, + {file = "wrapt-1.17.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5531d911795e3f935a9c23eb1c8c03c211661a5060aab167065896bbf62a5f85"}, + {file = "wrapt-1.17.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0610b46293c59a3adbae3dee552b648b984176f8562ee0dba099a56cfbe4df1f"}, + {file = "wrapt-1.17.3-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b32888aad8b6e68f83a8fdccbf3165f5469702a7544472bdf41f582970ed3311"}, + {file = "wrapt-1.17.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8cccf4f81371f257440c88faed6b74f1053eef90807b77e31ca057b2db74edb1"}, + {file = "wrapt-1.17.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8a210b158a34164de8bb68b0e7780041a903d7b00c87e906fb69928bf7890d5"}, + {file = "wrapt-1.17.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:79573c24a46ce11aab457b472efd8d125e5a51da2d1d24387666cd85f54c05b2"}, + {file = "wrapt-1.17.3-cp311-cp311-win32.whl", hash = "sha256:c31eebe420a9a5d2887b13000b043ff6ca27c452a9a22fa71f35f118e8d4bf89"}, + {file = "wrapt-1.17.3-cp311-cp311-win_amd64.whl", hash = "sha256:0b1831115c97f0663cb77aa27d381237e73ad4f721391a9bfb2fe8bc25fa6e77"}, + {file = "wrapt-1.17.3-cp311-cp311-win_arm64.whl", hash = "sha256:5a7b3c1ee8265eb4c8f1b7d29943f195c00673f5ab60c192eba2d4a7eae5f46a"}, + {file = "wrapt-1.17.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ab232e7fdb44cdfbf55fc3afa31bcdb0d8980b9b95c38b6405df2acb672af0e0"}, + {file = "wrapt-1.17.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9baa544e6acc91130e926e8c802a17f3b16fbea0fd441b5a60f5cf2cc5c3deba"}, + {file = "wrapt-1.17.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6b538e31eca1a7ea4605e44f81a48aa24c4632a277431a6ed3f328835901f4fd"}, + {file = "wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:042ec3bb8f319c147b1301f2393bc19dba6e176b7da446853406d041c36c7828"}, + {file = "wrapt-1.17.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3af60380ba0b7b5aeb329bc4e402acd25bd877e98b3727b0135cb5c2efdaefe9"}, + {file = "wrapt-1.17.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0b02e424deef65c9f7326d8c19220a2c9040c51dc165cddb732f16198c168396"}, + {file = "wrapt-1.17.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:74afa28374a3c3a11b3b5e5fca0ae03bef8450d6aa3ab3a1e2c30e3a75d023dc"}, + {file = "wrapt-1.17.3-cp312-cp312-win32.whl", hash = "sha256:4da9f45279fff3543c371d5ababc57a0384f70be244de7759c85a7f989cb4ebe"}, + {file = "wrapt-1.17.3-cp312-cp312-win_amd64.whl", hash = "sha256:e71d5c6ebac14875668a1e90baf2ea0ef5b7ac7918355850c0908ae82bcb297c"}, + {file = "wrapt-1.17.3-cp312-cp312-win_arm64.whl", hash = "sha256:604d076c55e2fdd4c1c03d06dc1a31b95130010517b5019db15365ec4a405fc6"}, + {file = "wrapt-1.17.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a47681378a0439215912ef542c45a783484d4dd82bac412b71e59cf9c0e1cea0"}, + {file = "wrapt-1.17.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a30837587c6ee3cd1a4d1c2ec5d24e77984d44e2f34547e2323ddb4e22eb77"}, + {file = "wrapt-1.17.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:16ecf15d6af39246fe33e507105d67e4b81d8f8d2c6598ff7e3ca1b8a37213f7"}, + {file = "wrapt-1.17.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6fd1ad24dc235e4ab88cda009e19bf347aabb975e44fd5c2fb22a3f6e4141277"}, + {file = "wrapt-1.17.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ed61b7c2d49cee3c027372df5809a59d60cf1b6c2f81ee980a091f3afed6a2d"}, + {file = "wrapt-1.17.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:423ed5420ad5f5529db9ce89eac09c8a2f97da18eb1c870237e84c5a5c2d60aa"}, + {file = "wrapt-1.17.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e01375f275f010fcbf7f643b4279896d04e571889b8a5b3f848423d91bf07050"}, + {file = "wrapt-1.17.3-cp313-cp313-win32.whl", hash = "sha256:53e5e39ff71b3fc484df8a522c933ea2b7cdd0d5d15ae82e5b23fde87d44cbd8"}, + {file = "wrapt-1.17.3-cp313-cp313-win_amd64.whl", hash = "sha256:1f0b2f40cf341ee8cc1a97d51ff50dddb9fcc73241b9143ec74b30fc4f44f6cb"}, + {file = "wrapt-1.17.3-cp313-cp313-win_arm64.whl", hash = "sha256:7425ac3c54430f5fc5e7b6f41d41e704db073309acfc09305816bc6a0b26bb16"}, + {file = "wrapt-1.17.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cf30f6e3c077c8e6a9a7809c94551203c8843e74ba0c960f4a98cd80d4665d39"}, + {file = "wrapt-1.17.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e228514a06843cae89621384cfe3a80418f3c04aadf8a3b14e46a7be704e4235"}, + {file = "wrapt-1.17.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5ea5eb3c0c071862997d6f3e02af1d055f381b1d25b286b9d6644b79db77657c"}, + {file = "wrapt-1.17.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:281262213373b6d5e4bb4353bc36d1ba4084e6d6b5d242863721ef2bf2c2930b"}, + {file = "wrapt-1.17.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc4a8d2b25efb6681ecacad42fca8859f88092d8732b170de6a5dddd80a1c8fa"}, + {file = "wrapt-1.17.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:373342dd05b1d07d752cecbec0c41817231f29f3a89aa8b8843f7b95992ed0c7"}, + {file = "wrapt-1.17.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d40770d7c0fd5cbed9d84b2c3f2e156431a12c9a37dc6284060fb4bec0b7ffd4"}, + {file = "wrapt-1.17.3-cp314-cp314-win32.whl", hash = "sha256:fbd3c8319de8e1dc79d346929cd71d523622da527cca14e0c1d257e31c2b8b10"}, + {file = "wrapt-1.17.3-cp314-cp314-win_amd64.whl", hash = "sha256:e1a4120ae5705f673727d3253de3ed0e016f7cd78dc463db1b31e2463e1f3cf6"}, + {file = "wrapt-1.17.3-cp314-cp314-win_arm64.whl", hash = "sha256:507553480670cab08a800b9463bdb881b2edeed77dc677b0a5915e6106e91a58"}, + {file = "wrapt-1.17.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ed7c635ae45cfbc1a7371f708727bf74690daedc49b4dba310590ca0bd28aa8a"}, + {file = "wrapt-1.17.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:249f88ed15503f6492a71f01442abddd73856a0032ae860de6d75ca62eed8067"}, + {file = "wrapt-1.17.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5a03a38adec8066d5a37bea22f2ba6bbf39fcdefbe2d91419ab864c3fb515454"}, + {file = "wrapt-1.17.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5d4478d72eb61c36e5b446e375bbc49ed002430d17cdec3cecb36993398e1a9e"}, + {file = "wrapt-1.17.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:223db574bb38637e8230eb14b185565023ab624474df94d2af18f1cdb625216f"}, + {file = "wrapt-1.17.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e405adefb53a435f01efa7ccdec012c016b5a1d3f35459990afc39b6be4d5056"}, + {file = "wrapt-1.17.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:88547535b787a6c9ce4086917b6e1d291aa8ed914fdd3a838b3539dc95c12804"}, + {file = "wrapt-1.17.3-cp314-cp314t-win32.whl", hash = "sha256:41b1d2bc74c2cac6f9074df52b2efbef2b30bdfe5f40cb78f8ca22963bc62977"}, + {file = "wrapt-1.17.3-cp314-cp314t-win_amd64.whl", hash = "sha256:73d496de46cd2cdbdbcce4ae4bcdb4afb6a11234a1df9c085249d55166b95116"}, + {file = "wrapt-1.17.3-cp314-cp314t-win_arm64.whl", hash = "sha256:f38e60678850c42461d4202739f9bf1e3a737c7ad283638251e79cc49effb6b6"}, + {file = "wrapt-1.17.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:70d86fa5197b8947a2fa70260b48e400bf2ccacdcab97bb7de47e3d1e6312225"}, + {file = "wrapt-1.17.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:df7d30371a2accfe4013e90445f6388c570f103d61019b6b7c57e0265250072a"}, + {file = "wrapt-1.17.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:caea3e9c79d5f0d2c6d9ab96111601797ea5da8e6d0723f77eabb0d4068d2b2f"}, + {file = "wrapt-1.17.3-cp38-cp38-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:758895b01d546812d1f42204bd443b8c433c44d090248bf22689df673ccafe00"}, + {file = "wrapt-1.17.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:02b551d101f31694fc785e58e0720ef7d9a10c4e62c1c9358ce6f63f23e30a56"}, + {file = "wrapt-1.17.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:656873859b3b50eeebe6db8b1455e99d90c26ab058db8e427046dbc35c3140a5"}, + {file = "wrapt-1.17.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a9a2203361a6e6404f80b99234fe7fb37d1fc73487b5a78dc1aa5b97201e0f22"}, + {file = "wrapt-1.17.3-cp38-cp38-win32.whl", hash = "sha256:55cbbc356c2842f39bcc553cf695932e8b30e30e797f961860afb308e6b1bb7c"}, + {file = "wrapt-1.17.3-cp38-cp38-win_amd64.whl", hash = "sha256:ad85e269fe54d506b240d2d7b9f5f2057c2aa9a2ea5b32c66f8902f768117ed2"}, + {file = "wrapt-1.17.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:30ce38e66630599e1193798285706903110d4f057aab3168a34b7fdc85569afc"}, + {file = "wrapt-1.17.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:65d1d00fbfb3ea5f20add88bbc0f815150dbbde3b026e6c24759466c8b5a9ef9"}, + {file = "wrapt-1.17.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7c06742645f914f26c7f1fa47b8bc4c91d222f76ee20116c43d5ef0912bba2d"}, + {file = "wrapt-1.17.3-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7e18f01b0c3e4a07fe6dfdb00e29049ba17eadbc5e7609a2a3a4af83ab7d710a"}, + {file = "wrapt-1.17.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f5f51a6466667a5a356e6381d362d259125b57f059103dd9fdc8c0cf1d14139"}, + {file = "wrapt-1.17.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:59923aa12d0157f6b82d686c3fd8e1166fa8cdfb3e17b42ce3b6147ff81528df"}, + {file = "wrapt-1.17.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:46acc57b331e0b3bcb3e1ca3b421d65637915cfcd65eb783cb2f78a511193f9b"}, + {file = "wrapt-1.17.3-cp39-cp39-win32.whl", hash = "sha256:3e62d15d3cfa26e3d0788094de7b64efa75f3a53875cdbccdf78547aed547a81"}, + {file = "wrapt-1.17.3-cp39-cp39-win_amd64.whl", hash = "sha256:1f23fa283f51c890eda8e34e4937079114c74b4c81d2b2f1f1d94948f5cc3d7f"}, + {file = "wrapt-1.17.3-cp39-cp39-win_arm64.whl", hash = "sha256:24c2ed34dc222ed754247a2702b1e1e89fdbaa4016f324b4b8f1a802d4ffe87f"}, + {file = "wrapt-1.17.3-py3-none-any.whl", hash = "sha256:7171ae35d2c33d326ac19dd8facb1e82e5fd04ef8c6c0e394d7af55a55051c22"}, + {file = "wrapt-1.17.3.tar.gz", hash = "sha256:f66eb08feaa410fe4eebd17f2a2c8e2e46d3476e9f8c783daa8e09e0faa666d0"}, +] + +[[package]] +name = "xmltodict" +version = "0.15.1" +description = "Makes working with XML feel like you are working with JSON" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "xmltodict-0.15.1-py2.py3-none-any.whl", hash = "sha256:dcd84b52f30a15be5ac4c9099a0cb234df8758624b035411e329c5c1e7a49089"}, + {file = "xmltodict-0.15.1.tar.gz", hash = "sha256:3d8d49127f3ce6979d40a36dbcad96f8bab106d232d24b49efdd4bd21716983c"}, +] + +[metadata] +lock-version = "2.1" +python-versions = "^3.13" +content-hash = "1acfc2c3555d9c0a1dfaf37a83b0e307d29df27a0bb5a610791d35cd6d01bd31" diff --git a/source/lambda/agentcore-invocation/pyproject.toml b/source/lambda/agentcore-invocation/pyproject.toml new file mode 100644 index 00000000..a8acdfe7 --- /dev/null +++ b/source/lambda/agentcore-invocation/pyproject.toml @@ -0,0 +1,50 @@ +[tool.poetry] +name = "agent-invocation" +version = "4.0.0" +authors = [ "Amazon Web Services" ] +description = "Lambda implementation for agent invocation feature" +packages = [ + { include = "*.py" }, + { include = "./**/*.py" }, + { include = "./**/**/*.py" } +] +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache Software License", +] +license = "Apache-2.0" + +[tool.poetry.dependencies] +python = "^3.13" +aws-lambda-powertools = "3.19.0" +aws-xray-sdk = "2.14.0" +boto3 = "^1.35.0" +botocore = "^1.35.0" + +[tool.poetry.group.test.dependencies] +freezegun = "1.5.1" +mock = "5.1.0" +moto = "5.0.28" +pytest = "8.3.4" +pytest-cov = "6.0.0" +pytest-env = "1.1.5" +PyYAML = "6.0.2" +setuptools = "80.8.0" +requests = "2.32.4" +urllib3 = "2.5.0" +boto3-layer = { path = "../layers/aws_boto3/", develop = true } +custom_boto3_init = { path = "../layers/custom_boto3_init", develop = true } + +[tool.black] +line-length = 120 + +[tool.isort] +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +line_length = 120 +profile = "black" + +[build-system] +requires = [ "poetry-core>=1.0.8" ] +build-backend = "poetry.core.masonry.api" \ No newline at end of file diff --git a/source/lambda/agentcore-invocation/test/__init__.py b/source/lambda/agentcore-invocation/test/__init__.py new file mode 100644 index 00000000..04f8b7b7 --- /dev/null +++ b/source/lambda/agentcore-invocation/test/__init__.py @@ -0,0 +1,2 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 diff --git a/source/lambda/agentcore-invocation/test/test_agentcore_client.py b/source/lambda/agentcore-invocation/test/test_agentcore_client.py new file mode 100644 index 00000000..957255ad --- /dev/null +++ b/source/lambda/agentcore-invocation/test/test_agentcore_client.py @@ -0,0 +1,587 @@ +#!/usr/bin/env python3 +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json +from unittest.mock import Mock, patch +from uuid import UUID + +import pytest +from utils.agentcore_client import ( + AgentCoreClient, + AgentCoreClientError, + AgentCoreConfigurationError, + AgentCoreInvocationError, +) + + +class MockStreamingBody: + """Mock StreamingBody for testing.""" + + def __init__(self, content: str): + self.content = content.encode("utf-8") + self.position = 0 + + def read(self, size=None): + if size is None: + # Read all remaining content + result = self.content[self.position :] + self.position = len(self.content) + return result + else: + # Read up to size bytes + result = self.content[self.position : self.position + size] + self.position += len(result) + return result + + +class TestAgentCoreClient: + """Test AgentCore client functionality.""" + + def setup_method(self): + """Set up test fixtures.""" + with patch.dict( + "os.environ", {"AGENT_RUNTIME_ARN": "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/test-runtime"} + ): + self.client = AgentCoreClient() + + self.test_input = "Hello, how can you help me?" + self.test_conversation_id = "conv-12345" + self.test_user_id = "user-67890" + + def test_successful_invocation_with_dict_response(self): + """Test successful agent invocation with dictionary response.""" + mock_response = {"response": {"result": "I can help you with various tasks. What would you like to know?"}} + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = mock_response + + chunks = list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + # Verify invoke_agent_runtime was called with correct parameters including runtimeSessionId + mock_boto_client.invoke_agent_runtime.assert_called_once() + call_kwargs = mock_boto_client.invoke_agent_runtime.call_args[1] + assert call_kwargs["runtimeUserId"] == self.test_user_id + assert call_kwargs["runtimeSessionId"] == f"{self.test_conversation_id}_{self.test_user_id}" + assert call_kwargs["contentType"] == "application/json" + assert call_kwargs["accept"] == "application/json" + + # Should have content chunks and completion + assert len(chunks) >= 2 + + # Find content chunks + content_chunks = [chunk for chunk in chunks if chunk.get("type") == "content"] + assert len(content_chunks) > 0 + + # Verify content contains expected text + full_text = "".join(chunk.get("text", "") for chunk in content_chunks) + assert "I can help you with various tasks" in full_text + + def test_successful_invocation_with_streaming_body_response(self): + """Test successful agent invocation with StreamingBody response.""" + response_content = '{"result": "This is a streaming response from the agent."}' + mock_streaming_body = MockStreamingBody(response_content) + mock_response = {"response": mock_streaming_body} + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = mock_response + + chunks = list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + # Verify invoke_agent_runtime was called with correct parameters including runtimeSessionId + mock_boto_client.invoke_agent_runtime.assert_called_once() + call_kwargs = mock_boto_client.invoke_agent_runtime.call_args[1] + assert call_kwargs["runtimeUserId"] == self.test_user_id + assert call_kwargs["runtimeSessionId"] == f"{self.test_conversation_id}_{self.test_user_id}" + + # Should have content chunks and completion + assert len(chunks) >= 2 + + # Find content chunks + content_chunks = [chunk for chunk in chunks if chunk.get("type") == "content"] + assert len(content_chunks) > 0 + + # Verify content contains expected text + full_text = "".join(chunk.get("text", "") for chunk in content_chunks) + assert "This is a streaming response from the agent" in full_text + + def test_successful_invocation_with_bytes_response(self): + """Test successful agent invocation with bytes response.""" + response_content = '{"result": "Response from bytes payload."}' + mock_response = {"response": response_content.encode("utf-8")} + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = mock_response + + chunks = list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + # Verify invoke_agent_runtime was called with correct parameters including runtimeSessionId + mock_boto_client.invoke_agent_runtime.assert_called_once() + call_kwargs = mock_boto_client.invoke_agent_runtime.call_args[1] + assert call_kwargs["runtimeUserId"] == self.test_user_id + assert call_kwargs["runtimeSessionId"] == f"{self.test_conversation_id}_{self.test_user_id}" + + # Should have content chunks and completion + assert len(chunks) >= 2 + + # Find content chunks + content_chunks = [chunk for chunk in chunks if chunk.get("type") == "content"] + assert len(content_chunks) > 0 + + # Verify content contains expected text + full_text = "".join(chunk.get("text", "") for chunk in content_chunks) + assert "Response from bytes payload" in full_text + + def test_successful_invocation_with_string_response(self): + """Test successful agent invocation with string response.""" + response_content = '{"result": "String response from agent."}' + mock_response = {"response": response_content} + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = mock_response + + chunks = list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + # Verify invoke_agent_runtime was called with correct parameters including runtimeSessionId + mock_boto_client.invoke_agent_runtime.assert_called_once() + call_kwargs = mock_boto_client.invoke_agent_runtime.call_args[1] + assert call_kwargs["runtimeUserId"] == self.test_user_id + assert call_kwargs["runtimeSessionId"] == f"{self.test_conversation_id}_{self.test_user_id}" + + # Should have content chunks and completion + assert len(chunks) >= 2 + + # Find content chunks + content_chunks = [chunk for chunk in chunks if chunk.get("type") == "content"] + assert len(content_chunks) > 0 + + # Verify content contains expected text + full_text = "".join(chunk.get("text", "") for chunk in content_chunks) + assert "String response from agent" in full_text + + def test_client_error_handling(self): + """Test handling of client errors.""" + from botocore.exceptions import ClientError + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.side_effect = ClientError( + {"Error": {"Code": "ValidationException", "Message": "Invalid input"}}, "InvokeAgentRuntime" + ) + + with pytest.raises(AgentCoreInvocationError) as exc_info: + list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + assert "AgentCore boto3 error during invocation" in str(exc_info.value) + + def test_streaming_body_read_error(self): + """Test handling of StreamingBody read errors.""" + mock_streaming_body = Mock() + mock_streaming_body.read.side_effect = Exception("Failed to read stream") + mock_response = {"response": mock_streaming_body} + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = mock_response + + # Error should occur when consuming the stream + chunks = list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + # Should have error chunk and completion chunk + assert len(chunks) >= 2 + error_chunk = chunks[0] + assert error_chunk["type"] == "error" + assert "Failed to read stream" in error_chunk["error"] + + def test_client_none_error(self): + """Test error when client is None.""" + self.client.client = None + + with pytest.raises(AgentCoreInvocationError) as exc_info: + list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + assert "NoneType" in str(exc_info.value) + + def test_error_chunk_handling(self): + """Test that error chunks are yielded instead of raising exceptions.""" + error_response = { + "response": { + "type": "error", + "error": "ValidationException", + "message": "Invocation of model ID amazon.nova-pro-v1:0 with on-demand throughput isn't supported", + } + } + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = error_response + + chunks = list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + # Debug: print chunks + print(f"\nReceived {len(chunks)} chunks:") + for i, chunk in enumerate(chunks): + print(f" Chunk {i}: {chunk}") + + # Should have error chunk and completion chunk + assert len(chunks) >= 2 + + # Find error chunk + error_chunks = [chunk for chunk in chunks if chunk.get("type") == "error"] + assert len(error_chunks) == 1, f"Expected 1 error chunk, got {len(error_chunks)}. All chunks: {chunks}" + + error_chunk = error_chunks[0] + assert error_chunk["type"] == "error" + assert error_chunk["error"] == "ValidationException" + assert "amazon.nova-pro-v1:0" in error_chunk["message"] + + # Should also have completion chunk(s) + completion_chunks = [chunk for chunk in chunks if chunk.get("type") == "completion"] + assert len(completion_chunks) >= 1 + + def test_error_chunk_with_streaming_body(self): + """Test error chunk handling with StreamingBody response.""" + error_content = json.dumps( + {"type": "error", "error": "Streaming response failed", "message": "Connection timeout"} + ) + mock_streaming_body = MockStreamingBody(error_content) + mock_response = {"response": mock_streaming_body} + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = mock_response + + chunks = list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + # Should have error chunk and completion chunk + assert len(chunks) >= 2 + + # Find error chunk + error_chunks = [chunk for chunk in chunks if chunk.get("type") == "error"] + assert len(error_chunks) == 1 + + error_chunk = error_chunks[0] + assert error_chunk["type"] == "error" + assert error_chunk["error"] == "Streaming response failed" + assert error_chunk["message"] == "Connection timeout" + + def test_invoke_agent_with_files_parameter(self): + """Test agent invocation with files parameter.""" + test_files = [ + {"fileReference": "file-123", "fileName": "document.pdf"}, + {"fileReference": "file-456", "fileName": "image.jpg"}, + ] + test_message_id = "msg-789" + + mock_response = {"response": {"result": "I can see the files you've uploaded."}} + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = mock_response + + chunks = list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + message_id=test_message_id, + files=test_files, + ) + ) + + # Verify invoke_agent_runtime was called with correct parameters + mock_boto_client.invoke_agent_runtime.assert_called_once() + call_kwargs = mock_boto_client.invoke_agent_runtime.call_args[1] + + # Verify payload includes files and message_id + payload_str = call_kwargs["payload"].decode("utf-8") + payload_dict = json.loads(payload_str) + + assert payload_dict["files"] == test_files + assert payload_dict["messageId"] == test_message_id + assert payload_dict["conversationId"] == self.test_conversation_id + assert payload_dict["input"] == self.test_input + assert payload_dict["userId"] == self.test_user_id + + # Should have content chunks and completion + assert len(chunks) >= 2 + + def test_invoke_agent_without_files_parameter(self): + """Test agent invocation without files parameter (backward compatibility).""" + mock_response = {"response": {"result": "Hello! How can I help you?"}} + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = mock_response + + chunks = list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + # Verify invoke_agent_runtime was called with correct parameters + mock_boto_client.invoke_agent_runtime.assert_called_once() + call_kwargs = mock_boto_client.invoke_agent_runtime.call_args[1] + + # Verify payload does not include files when not provided + payload_str = call_kwargs["payload"].decode("utf-8") + payload_dict = json.loads(payload_str) + + assert "files" not in payload_dict + assert payload_dict["conversationId"] == self.test_conversation_id + assert payload_dict["input"] == self.test_input + assert payload_dict["userId"] == self.test_user_id + # Should have auto-generated message_id + assert "messageId" in payload_dict + assert payload_dict["messageId"].startswith("msg-") + + def test_invoke_agent_with_empty_files_list(self): + """Test agent invocation with empty files list.""" + test_files = [] + test_message_id = "msg-empty-files" + + mock_response = {"response": {"result": "No files provided."}} + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = mock_response + + chunks = list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + message_id=test_message_id, + files=test_files, + ) + ) + + # Verify invoke_agent_runtime was called with correct parameters + mock_boto_client.invoke_agent_runtime.assert_called_once() + call_kwargs = mock_boto_client.invoke_agent_runtime.call_args[1] + + # Verify payload includes empty files list + payload_str = call_kwargs["payload"].decode("utf-8") + payload_dict = json.loads(payload_str) + + assert "files" not in payload_dict + assert payload_dict["messageId"] == test_message_id + + @patch("utils.agentcore_client.metrics") + def test_completion_chunk_with_usage_metadata(self, mock_metrics): + """Test that completion chunk preserves usage metadata.""" + mock_response = { + "response": { + "type": "completion", + "usage": { + "inputTokens": 1453, + "outputTokens": 271, + "totalTokens": 1724 + } + } + } + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = mock_response + + chunks = list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + # Find completion chunk + completion_chunks = [chunk for chunk in chunks if chunk.get("type") == "completion"] + assert len(completion_chunks) >= 1 + + # Verify usage metadata is preserved + completion_chunk = completion_chunks[0] + assert "usage" in completion_chunk + assert completion_chunk["usage"]["inputTokens"] == 1453 + assert completion_chunk["usage"]["outputTokens"] == 271 + assert completion_chunk["usage"]["totalTokens"] == 1724 + + # Verify metrics were reported + assert mock_metrics.add_metric.call_count == 3 + mock_metrics.flush_metrics.assert_called_once() + + @patch("utils.agentcore_client.metrics") + def test_completion_chunk_without_usage_metadata(self, mock_metrics): + """Test that completion chunk works without usage metadata.""" + mock_response = { + "response": { + "type": "completion" + } + } + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = mock_response + + chunks = list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + # Find completion chunk + completion_chunks = [chunk for chunk in chunks if chunk.get("type") == "completion"] + assert len(completion_chunks) >= 1 + + # Verify no usage metadata + completion_chunk = completion_chunks[0] + assert "usage" not in completion_chunk + + # Verify metrics were not reported + mock_metrics.add_metric.assert_not_called() + + @patch("utils.agentcore_client.metrics") + def test_completion_chunk_reports_metrics_to_cloudwatch(self, mock_metrics): + """Test that usage metrics are reported to CloudWatch.""" + mock_response = { + "response": { + "type": "completion", + "usage": { + "inputTokens": 100, + "outputTokens": 50, + "totalTokens": 150 + } + } + } + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = mock_response + + list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + # Verify all three metrics were added + metric_calls = mock_metrics.add_metric.call_args_list + assert len(metric_calls) == 3 + + # Verify metric values (using enum values, not names) + metric_values = {call[1]["name"]: call[1]["value"] for call in metric_calls} + assert metric_values["InputTokenCount"] == 100 + assert metric_values["OutputTokenCount"] == 50 + assert metric_values["TotalTokenCount"] == 150 + + # Verify metrics were flushed + mock_metrics.flush_metrics.assert_called_once() + + def test_thinking_chunk_handling(self): + """Test that thinking chunks are properly handled.""" + mock_response = { + "response": { + "type": "thinking", + "thinking": { + "thinkingMessage": "Let me think about this..." + } + } + } + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = mock_response + + chunks = list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + # Find thinking chunk + thinking_chunks = [chunk for chunk in chunks if chunk.get("type") == "thinking"] + assert len(thinking_chunks) == 1 + assert "thinking" in thinking_chunks[0] + + def test_tool_use_chunk_handling(self): + """Test that tool usage chunks are properly handled.""" + mock_response = { + "response": { + "type": "tool_use", + "toolUsage": { + "toolName": "test_tool", + "status": "completed", + "startTime": "2024-01-01T00:00:00Z" + } + } + } + + with patch.object(self.client, "client") as mock_boto_client: + mock_boto_client.invoke_agent_runtime.return_value = mock_response + + chunks = list( + self.client.invoke_agent( + input_text=self.test_input, + conversation_id=self.test_conversation_id, + user_id=self.test_user_id, + ) + ) + + # Find tool usage chunk + tool_chunks = [chunk for chunk in chunks if chunk.get("type") == "tool_use"] + assert len(tool_chunks) == 1 + assert tool_chunks[0]["toolUsage"]["toolName"] == "test_tool" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/source/lambda/agentcore-invocation/test/test_event_processor.py b/source/lambda/agentcore-invocation/test/test_event_processor.py new file mode 100644 index 00000000..5e0e2576 --- /dev/null +++ b/source/lambda/agentcore-invocation/test/test_event_processor.py @@ -0,0 +1,668 @@ +#!/usr/bin/env python3 +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json +from unittest.mock import patch +from uuid import UUID + +import pytest +from utils.constants import ( + CONNECTION_ID_KEY, + CONVERSATION_ID_KEY, + FILES_KEY, + INPUT_TEXT_KEY, + MESSAGE_ID_KEY, + MESSAGE_KEY, + REQUEST_CONTEXT_KEY, + USER_ID_KEY, +) +from utils.event_processor import EventProcessor, EventProcessorError, InvalidEventError, MissingDataError + + +class TestEventProcessorInitialization: + """Test EventProcessor initialization and basic functionality.""" + + def test_successful_initialization_with_valid_event(self): + """Test successful initialization with a valid event.""" + event = { + "body": json.dumps( + { + REQUEST_CONTEXT_KEY: { + CONNECTION_ID_KEY: "test-connection-123", + "authorizer": {"UserId": "user-456"}, + }, + MESSAGE_KEY: {INPUT_TEXT_KEY: "Hello, world!", CONVERSATION_ID_KEY: "conv-789"}, + } + ) + } + + processor = EventProcessor(event) + + assert processor.event == event + assert isinstance(processor.event_body, dict) + assert processor.event_body[REQUEST_CONTEXT_KEY][CONNECTION_ID_KEY] == "test-connection-123" + + def test_initialization_with_empty_body(self): + """Test initialization with empty body.""" + event = {"body": ""} + + with pytest.raises(InvalidEventError) as exc_info: + EventProcessor(event) + + assert "Invalid JSON in event body" in str(exc_info.value) + + def test_initialization_with_missing_body(self): + """Test initialization with missing body key.""" + event = {} + + processor = EventProcessor(event) + + assert processor.event_body == {} + + def test_initialization_with_invalid_json(self): + """Test initialization with invalid JSON in body.""" + event = {"body": "invalid json {"} + + with pytest.raises(InvalidEventError) as exc_info: + EventProcessor(event) + + assert "Invalid JSON in event body" in str(exc_info.value) + + def test_initialization_with_non_dict_json(self): + """Test initialization with valid JSON that's not a dictionary.""" + event = {"body": json.dumps(["not", "a", "dict"])} + + with pytest.raises(InvalidEventError) as exc_info: + EventProcessor(event) + + assert "Event body must be a JSON object" in str(exc_info.value) + + def test_initialization_with_string_json(self): + """Test initialization with string JSON.""" + event = {"body": json.dumps("just a string")} + + with pytest.raises(InvalidEventError) as exc_info: + EventProcessor(event) + + assert "Event body must be a JSON object" in str(exc_info.value) + + +class TestGetConnectionId: + """Test the get_connection_id method.""" + + def test_get_connection_id_success(self): + """Test successful retrieval of connection ID.""" + event = {"body": json.dumps({REQUEST_CONTEXT_KEY: {CONNECTION_ID_KEY: "test-connection-123"}})} + + processor = EventProcessor(event) + connection_id = processor.get_connection_id() + + assert connection_id == "test-connection-123" + + def test_get_connection_id_missing_request_context(self): + """Test get_connection_id when requestContext is missing.""" + event = {"body": json.dumps({})} + + processor = EventProcessor(event) + + with pytest.raises(InvalidEventError) as exc_info: + processor.get_connection_id() + + assert "Connection ID not found in event" in str(exc_info.value) + + def test_get_connection_id_missing_connection_id(self): + """Test get_connection_id when connectionId is missing.""" + event = {"body": json.dumps({REQUEST_CONTEXT_KEY: {}})} + + processor = EventProcessor(event) + + with pytest.raises(InvalidEventError) as exc_info: + processor.get_connection_id() + + assert "Connection ID not found in event" in str(exc_info.value) + + def test_get_connection_id_with_none_value(self): + """Test get_connection_id when connectionId is None.""" + event = {"body": json.dumps({REQUEST_CONTEXT_KEY: {CONNECTION_ID_KEY: None}})} + + processor = EventProcessor(event) + connection_id = processor.get_connection_id() + + assert connection_id is None + + +class TestGetMessage: + """Test the get_message method.""" + + def test_get_message_success(self): + """Test successful retrieval of message.""" + message_data = {INPUT_TEXT_KEY: "Hello, world!", CONVERSATION_ID_KEY: "conv-123"} + event = {"body": json.dumps({MESSAGE_KEY: message_data})} + + processor = EventProcessor(event) + message = processor.get_message() + + assert message == message_data + + def test_get_message_missing(self): + """Test get_message when message is missing.""" + event = {"body": json.dumps({})} + + processor = EventProcessor(event) + + with pytest.raises(MissingDataError) as exc_info: + processor.get_message() + + assert "Message is required but not found in event body" in str(exc_info.value) + + def test_get_message_empty(self): + """Test get_message when message is empty.""" + event = {"body": json.dumps({MESSAGE_KEY: {}})} + + processor = EventProcessor(event) + + # Empty dict {} is falsy in Python, so this should raise an error + with pytest.raises(MissingDataError) as exc_info: + processor.get_message() + + assert "Message is required but not found in event body" in str(exc_info.value) + + def test_get_message_none(self): + """Test get_message when message is None.""" + event = {"body": json.dumps({MESSAGE_KEY: None})} + + processor = EventProcessor(event) + + with pytest.raises(MissingDataError) as exc_info: + processor.get_message() + + assert "Message is required but not found in event body" in str(exc_info.value) + + +class TestGetInputText: + """Test the get_input_text method.""" + + def test_get_input_text_success(self): + """Test successful retrieval of input text.""" + event = {"body": json.dumps({MESSAGE_KEY: {INPUT_TEXT_KEY: "Hello, how are you?"}})} + + processor = EventProcessor(event) + input_text = processor.get_input_text() + + assert input_text == "Hello, how are you?" + + def test_get_input_text_missing(self): + """Test get_input_text when inputText is missing.""" + event = {"body": json.dumps({MESSAGE_KEY: {"someOtherField": "value"}})} + + processor = EventProcessor(event) + input_text = processor.get_input_text() + + assert input_text == "" + + def test_get_input_text_empty_string(self): + """Test get_input_text when inputText is empty string.""" + event = {"body": json.dumps({MESSAGE_KEY: {INPUT_TEXT_KEY: ""}})} + + processor = EventProcessor(event) + input_text = processor.get_input_text() + + assert input_text == "" + + def test_get_input_text_none(self): + """Test get_input_text when inputText is None.""" + event = {"body": json.dumps({MESSAGE_KEY: {INPUT_TEXT_KEY: None}})} + + processor = EventProcessor(event) + input_text = processor.get_input_text() + + # The get() method with default "" should return "" when value is None + assert input_text == "" + + +class TestGetConversationId: + """Test the get_conversation_id method.""" + + def test_get_conversation_id_success(self): + """Test successful retrieval of conversation ID.""" + event = {"body": json.dumps({MESSAGE_KEY: {CONVERSATION_ID_KEY: "conv-123-456"}})} + + processor = EventProcessor(event) + conversation_id = processor.get_conversation_id() + + assert conversation_id == "conv-123-456" + + def test_get_conversation_id_missing(self): + """Test get_conversation_id when conversationId is missing.""" + event = {"body": json.dumps({MESSAGE_KEY: {"someOtherField": "value"}})} + + processor = EventProcessor(event) + conversation_id = processor.get_conversation_id() + + # Should generate a new UUID + assert conversation_id != "" + assert len(conversation_id) == 36 # UUID4 length + # Verify it's a valid UUID + UUID(conversation_id) + + def test_get_conversation_id_empty_string(self): + """Test get_conversation_id when conversationId is empty string.""" + event = {"body": json.dumps({MESSAGE_KEY: {CONVERSATION_ID_KEY: ""}})} + + processor = EventProcessor(event) + conversation_id = processor.get_conversation_id() + + # Should generate a new UUID + assert conversation_id != "" + assert len(conversation_id) == 36 + UUID(conversation_id) + + def test_get_conversation_id_whitespace_only(self): + """Test get_conversation_id when conversationId is whitespace only.""" + event = {"body": json.dumps({MESSAGE_KEY: {CONVERSATION_ID_KEY: " "}})} + + processor = EventProcessor(event) + conversation_id = processor.get_conversation_id() + + # Should generate a new UUID + assert conversation_id != " " + assert len(conversation_id) == 36 + UUID(conversation_id) + + def test_get_conversation_id_none(self): + """Test get_conversation_id when conversationId is None.""" + event = {"body": json.dumps({MESSAGE_KEY: {CONVERSATION_ID_KEY: None}})} + + processor = EventProcessor(event) + conversation_id = processor.get_conversation_id() + + # Should generate a new UUID when None + assert conversation_id != "" + assert len(conversation_id) == 36 + UUID(conversation_id) + + +class TestGetUserId: + """Test the get_user_id method.""" + + def test_get_user_id_success(self): + """Test successful retrieval of user ID.""" + event = {"body": json.dumps({REQUEST_CONTEXT_KEY: {"authorizer": {"UserId": "user-123-456"}}})} + + processor = EventProcessor(event) + user_id = processor.get_user_id() + + assert user_id == "user-123-456" + + def test_get_user_id_missing_request_context(self): + """Test get_user_id when requestContext is missing.""" + event = {"body": json.dumps({})} + + processor = EventProcessor(event) + + with pytest.raises(InvalidEventError) as exc_info: + processor.get_user_id() + + assert "User ID not found in event" in str(exc_info.value) + + def test_get_user_id_missing_authorizer(self): + """Test get_user_id when authorizer is missing.""" + event = {"body": json.dumps({REQUEST_CONTEXT_KEY: {}})} + + processor = EventProcessor(event) + + with pytest.raises(InvalidEventError) as exc_info: + processor.get_user_id() + + assert "User ID not found in event" in str(exc_info.value) + + def test_get_user_id_missing_user_id(self): + """Test get_user_id when UserId is missing.""" + event = {"body": json.dumps({REQUEST_CONTEXT_KEY: {"authorizer": {}}})} + + processor = EventProcessor(event) + + with pytest.raises(InvalidEventError) as exc_info: + processor.get_user_id() + + assert "User ID not found in event" in str(exc_info.value) + + def test_get_user_id_none(self): + """Test get_user_id when UserId is None.""" + event = {"body": json.dumps({REQUEST_CONTEXT_KEY: {"authorizer": {"UserId": None}}})} + + processor = EventProcessor(event) + + with pytest.raises(InvalidEventError) as exc_info: + processor.get_user_id() + + assert "User ID not found in event" in str(exc_info.value) + + +class TestGetMessageId: + """Test the get_message_id method.""" + + def test_get_message_id_success(self): + """Test successful retrieval of message ID from WebSocket payload.""" + test_message_id = "websocket-msg-12345" + event = {"body": json.dumps({MESSAGE_KEY: {MESSAGE_ID_KEY: test_message_id}})} + + processor = EventProcessor(event) + message_id = processor.get_message_id() + + assert message_id == test_message_id + + def test_get_message_id_missing(self): + """Test get_message_id when messageId is missing from WebSocket payload generates new UUID""" + event = {"body": json.dumps({MESSAGE_KEY: {INPUT_TEXT_KEY: "Hello"}})} + + processor = EventProcessor(event) + message_id = processor.get_message_id() + + assert message_id != "" + assert len(message_id) == 36 # UUID4 length + # Verify it's a valid UUID + UUID(message_id) + + def test_get_message_id_empty_string(self): + """Test get_message_id when messageId is empty string.""" + event = {"body": json.dumps({MESSAGE_KEY: {MESSAGE_ID_KEY: ""}})} + + processor = EventProcessor(event) + message_id = processor.get_message_id() + + # Should generate a new UUID + assert message_id != "" + assert len(message_id) == 36 + UUID(message_id) + + def test_get_message_id_none_value(self): + """Test get_message_id when messageId is None.""" + event = {"body": json.dumps({MESSAGE_KEY: {MESSAGE_ID_KEY: None}})} + + processor = EventProcessor(event) + message_id = processor.get_message_id() + + # Should generate a new UUID when None + assert message_id != "" + assert len(message_id) == 36 + UUID(message_id) + + def test_get_message_id_generates_unique_ids(self): + """Test that get_message_id generates unique IDs when called multiple times.""" + event = {"body": json.dumps({MESSAGE_KEY: {INPUT_TEXT_KEY: "Hello"}})} + + processor = EventProcessor(event) + message_id1 = processor.get_message_id() + message_id2 = processor.get_message_id() + + assert message_id1 != message_id2 + UUID(message_id1) + UUID(message_id2) + + +class TestGetFiles: + """Test the get_files method.""" + + def test_get_files_success(self): + """Test successful retrieval of files from message.""" + test_files = [ + {"fileReference": "file-123", "fileName": "document.pdf"}, + {"fileReference": "file-456", "fileName": "image.jpg"}, + ] + event = {"body": json.dumps({MESSAGE_KEY: {"files": test_files}})} + + processor = EventProcessor(event) + files = processor.get_files() + + assert files == test_files + + def test_get_files_missing(self): + """Test get_files when files key is missing.""" + event = {"body": json.dumps({MESSAGE_KEY: {INPUT_TEXT_KEY: "Hello"}})} + + processor = EventProcessor(event) + files = processor.get_files() + + assert files == [] + + def test_get_files_empty_list(self): + """Test get_files when files is an empty list.""" + event = {"body": json.dumps({MESSAGE_KEY: {"files": []}})} + + processor = EventProcessor(event) + files = processor.get_files() + + assert files == [] + + def test_get_files_none_value(self): + """Test get_files when files is None.""" + event = {"body": json.dumps({MESSAGE_KEY: {"files": None}})} + + processor = EventProcessor(event) + files = processor.get_files() + + assert files is None + + +class TestProcessMethod: + """Test the process method.""" + + def test_process_success_with_complete_event(self): + """Test successful processing with complete event data.""" + event = { + "body": json.dumps( + { + REQUEST_CONTEXT_KEY: {CONNECTION_ID_KEY: "conn-123", "authorizer": {"UserId": "user-456"}}, + MESSAGE_KEY: {INPUT_TEXT_KEY: "Hello, world!", CONVERSATION_ID_KEY: "conv-789"}, + } + ) + } + + processor = EventProcessor(event) + result = processor.process() + + assert result[CONNECTION_ID_KEY] == "conn-123" + assert result[USER_ID_KEY] == "user-456" + assert result[INPUT_TEXT_KEY] == "Hello, world!" + assert result[CONVERSATION_ID_KEY] == "conv-789" + assert MESSAGE_ID_KEY in result + assert FILES_KEY in result + assert result[FILES_KEY] == [] # Empty files list when not provided + UUID(result[MESSAGE_ID_KEY]) # Verify it's a valid UUID + + def test_process_success_with_minimal_event(self): + """Test successful processing with minimal event data.""" + event = { + "body": json.dumps( + { + REQUEST_CONTEXT_KEY: {CONNECTION_ID_KEY: "conn-123", "authorizer": {"UserId": "user-456"}}, + MESSAGE_KEY: {"someField": "value"}, # Non-empty dict so get_message() works + } + ) + } + + processor = EventProcessor(event) + result = processor.process() + + assert result[CONNECTION_ID_KEY] == "conn-123" + assert result[USER_ID_KEY] == "user-456" + assert result[INPUT_TEXT_KEY] == "" + assert len(result[CONVERSATION_ID_KEY]) == 36 # Generated UUID + assert MESSAGE_ID_KEY in result + assert FILES_KEY in result + assert result[FILES_KEY] == [] # Empty files list when not provided + UUID(result[CONVERSATION_ID_KEY]) + UUID(result[MESSAGE_ID_KEY]) + + def test_process_success_with_files(self): + """Test successful processing with files included.""" + test_files = [ + {"fileReference": "file-123", "fileName": "document.pdf"}, + {"fileReference": "file-456", "fileName": "image.jpg"}, + ] + test_message_id = "msg-with-files-123" + event = { + "body": json.dumps( + { + REQUEST_CONTEXT_KEY: {CONNECTION_ID_KEY: "conn-123", "authorizer": {"UserId": "user-456"}}, + MESSAGE_KEY: { + INPUT_TEXT_KEY: "Analyze these files", + CONVERSATION_ID_KEY: "conv-789", + MESSAGE_ID_KEY: test_message_id, + FILES_KEY: test_files, + }, + } + ) + } + + processor = EventProcessor(event) + result = processor.process() + + assert result[CONNECTION_ID_KEY] == "conn-123" + assert result[USER_ID_KEY] == "user-456" + assert result[INPUT_TEXT_KEY] == "Analyze these files" + assert result[CONVERSATION_ID_KEY] == "conv-789" + assert result[MESSAGE_ID_KEY] == test_message_id + assert result[FILES_KEY] == test_files + + def test_process_missing_connection_id(self): + """Test process method when connection ID is missing.""" + event = {"body": json.dumps({REQUEST_CONTEXT_KEY: {"authorizer": {"UserId": "user-456"}}, MESSAGE_KEY: {}})} + + processor = EventProcessor(event) + + with pytest.raises(InvalidEventError) as exc_info: + processor.process() + + assert "Connection ID not found in event" in str(exc_info.value) + + def test_process_missing_user_id(self): + """Test process method when user ID is missing.""" + event = { + "body": json.dumps( + {REQUEST_CONTEXT_KEY: {CONNECTION_ID_KEY: "conn-123"}, MESSAGE_KEY: {"someField": "value"}} + ) + } + + processor = EventProcessor(event) + + with pytest.raises(InvalidEventError) as exc_info: + processor.process() + + assert "User ID not found in event" in str(exc_info.value) + + def test_process_missing_message(self): + """Test process method when message is missing.""" + event = { + "body": json.dumps( + {REQUEST_CONTEXT_KEY: {CONNECTION_ID_KEY: "conn-123", "authorizer": {"UserId": "user-456"}}} + ) + } + + processor = EventProcessor(event) + + with pytest.raises(MissingDataError) as exc_info: + processor.process() + + assert "Message is required but not found in event body" in str(exc_info.value) + + @patch("utils.event_processor.logger") + def test_process_logs_errors(self, mock_logger): + """Test that process method logs errors.""" + event = {"body": json.dumps({})} + + processor = EventProcessor(event) + + with pytest.raises(InvalidEventError): + processor.process() + + mock_logger.error.assert_called() + assert "Error processing event" in mock_logger.error.call_args[0][0] + + +class TestEventProcessorIntegration: + """Integration tests for EventProcessor.""" + + def test_complete_websocket_event_processing(self): + """Test processing a complete WebSocket event.""" + # Simulate a real WebSocket event structure + event = { + "body": json.dumps( + { + REQUEST_CONTEXT_KEY: { + CONNECTION_ID_KEY: "L0SM9cOFvHcCIhw=", + "routeKey": "sendMessage", + "authorizer": {"UserId": "test-user-12345", "principalId": "test-user-12345"}, + "requestId": "L0SM9dGrPHcFbGA=", + "apiId": "1234567890", + }, + MESSAGE_KEY: { + INPUT_TEXT_KEY: "What is the weather like today?", + CONVERSATION_ID_KEY: "conv-abcd-1234-efgh-5678", + "timestamp": "2023-12-25T10:30:45Z", + }, + } + ) + } + + processor = EventProcessor(event) + result = processor.process() + + # Verify all expected fields are present and correct + assert result[CONNECTION_ID_KEY] == "L0SM9cOFvHcCIhw=" + assert result[USER_ID_KEY] == "test-user-12345" + assert result[INPUT_TEXT_KEY] == "What is the weather like today?" + assert result[CONVERSATION_ID_KEY] == "conv-abcd-1234-efgh-5678" + assert MESSAGE_ID_KEY in result + assert FILES_KEY in result + assert result[FILES_KEY] == [] # Empty files list when not provided + + # Verify message ID is a valid UUID + UUID(result[MESSAGE_ID_KEY]) + + def test_event_with_special_characters(self): + """Test processing event with special characters in text.""" + event = { + "body": json.dumps( + { + REQUEST_CONTEXT_KEY: {CONNECTION_ID_KEY: "conn-123", "authorizer": {"UserId": "user-456"}}, + MESSAGE_KEY: { + INPUT_TEXT_KEY: "Hello! How are you? 😊 I'm testing special chars: @#$%^&*()", + CONVERSATION_ID_KEY: "conv-special-chars-123", + }, + } + ) + } + + processor = EventProcessor(event) + result = processor.process() + + assert result[INPUT_TEXT_KEY] == "Hello! How are you? 😊 I'm testing special chars: @#$%^&*()" + assert result[CONVERSATION_ID_KEY] == "conv-special-chars-123" + + def test_event_with_unicode_characters(self): + """Test processing event with Unicode characters.""" + event = { + "body": json.dumps( + { + REQUEST_CONTEXT_KEY: {CONNECTION_ID_KEY: "conn-unicode", "authorizer": {"UserId": "user-unicode"}}, + MESSAGE_KEY: { + INPUT_TEXT_KEY: "こんにちは世界! Здравствуй мир! مرحبا بالعالم!", + CONVERSATION_ID_KEY: "conv-unicode-test", + }, + }, + ensure_ascii=False, + ) + } + + processor = EventProcessor(event) + result = processor.process() + + assert result[INPUT_TEXT_KEY] == "こんにちは世界! Здравствуй мир! مرحبا بالعالم!" + assert result[CONVERSATION_ID_KEY] == "conv-unicode-test" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/source/lambda/agentcore-invocation/test/test_handler_integration.py b/source/lambda/agentcore-invocation/test/test_handler_integration.py new file mode 100644 index 00000000..4bed930d --- /dev/null +++ b/source/lambda/agentcore-invocation/test/test_handler_integration.py @@ -0,0 +1,1111 @@ +#!/usr/bin/env python3 +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json +import os +from unittest.mock import MagicMock, Mock, patch + +import pytest +from botocore.exceptions import ClientError + +# Set up environment variables before importing +os.environ["WEBSOCKET_CALLBACK_URL"] = "wss://test.execute-api.us-east-1.amazonaws.com/test" +os.environ["AGENT_RUNTIME_ARN"] = "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/test-runtime" +os.environ["_X_AMZN_TRACE_ID"] = "Root=1-12345678-123456789abcdef0;Parent=123456789abcdef0;Sampled=1" + +from handler import ( + format_response, + get_agentcore_client, + invoke_agent_core, + lambda_handler, + send_error_message, + send_websocket_message, +) +from utils.agentcore_client import AgentCoreClient, AgentCoreConfigurationError, AgentCoreInvocationError + + +class TestLambdaHandlerIntegration: + """Test lambda handler integration with AgentCore client.""" + + def setup_method(self): + """Set up test fixtures.""" + self.sample_event = { + "Records": [ + { + "messageId": "test-message-id-1", + "body": json.dumps( + { + "requestContext": { + "connectionId": "test-connection-1", + "authorizer": {"UserId": "test-user-1"}, + }, + "message": { + "conversationId": "test-conversation-1", + "inputText": "Hello, how are you?", + "userId": "test-user-1", + "messageId": "test-msg-1", + }, + } + ), + "messageAttributes": { + "connectionId": {"stringValue": "test-connection-1"}, + "conversationId": {"stringValue": "test-conversation-1"}, + "userId": {"stringValue": "test-user-1"}, + }, + } + ] + } + + self.mock_context = Mock() + self.mock_context.get_remaining_time_in_millis.return_value = 30000 + + def test_successful_lambda_handler_execution(self): + """Test successful lambda handler execution with AgentCore integration.""" + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_websocket_message" + ) as mock_send_ws: + + # Mock successful AgentCore client + mock_client = Mock() + mock_client.invoke_agent.return_value = [ + {"text": "Hello! I'm doing well, thank you for asking.", "type": "content"}, + {"type": "completion"}, + ] + mock_get_client.return_value = mock_client + + # Execute lambda handler + result = lambda_handler(self.sample_event, self.mock_context) + + # Verify successful execution + assert "batchItemFailures" in result + assert len(result["batchItemFailures"]) == 0 + + mock_client.invoke_agent.assert_called_once() + call_args = mock_client.invoke_agent.call_args + assert call_args[1]["input_text"] == "Hello, how are you?" + assert call_args[1]["conversation_id"] == "test-conversation-1" + assert call_args[1]["user_id"] == "test-user-1" + assert "message_id" in call_args[1] # Message ID should be present + assert "files" in call_args[1] # Files should be present (empty list) + + # Verify WebSocket messages were sent + assert mock_send_ws.call_count >= 2 # Content + END token + + def test_lambda_handler_with_multiple_records(self): + """Test lambda handler with multiple SQS records.""" + multi_record_event = { + "Records": [ + { + "messageId": "test-message-id-1", + "body": json.dumps( + { + "requestContext": { + "connectionId": "test-connection-1", + "authorizer": {"UserId": "test-user-1"}, + }, + "message": { + "conversationId": "test-conversation-1", + "inputText": "First message", + "userId": "test-user-1", + "messageId": "test-msg-1", + }, + } + ), + "messageAttributes": { + "connectionId": {"stringValue": "test-connection-1"}, + "conversationId": {"stringValue": "test-conversation-1"}, + "userId": {"stringValue": "test-user-1"}, + }, + }, + { + "messageId": "test-message-id-2", + "body": json.dumps( + { + "requestContext": { + "connectionId": "test-connection-2", + "authorizer": {"UserId": "test-user-2"}, + }, + "message": { + "conversationId": "test-conversation-2", + "inputText": "Second message", + "userId": "test-user-2", + "messageId": "test-msg-2", + }, + } + ), + "messageAttributes": { + "connectionId": {"stringValue": "test-connection-2"}, + "conversationId": {"stringValue": "test-conversation-2"}, + "userId": {"stringValue": "test-user-2"}, + }, + }, + ] + } + + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_websocket_message" + ) as mock_send_ws: + + # Mock successful AgentCore client + mock_client = Mock() + mock_client.invoke_agent.return_value = [ + {"text": "Response", "type": "content"}, + {"type": "completion"}, + ] + mock_get_client.return_value = mock_client + + # Execute lambda handler + result = lambda_handler(multi_record_event, self.mock_context) + + # Verify both records were processed + assert len(result["batchItemFailures"]) == 0 + assert mock_client.invoke_agent.call_count == 2 + + def test_lambda_handler_timeout_handling(self): + """Test lambda handler timeout handling.""" + # Set remaining time to be below threshold + self.mock_context.get_remaining_time_in_millis.return_value = 10000 + + with patch("handler.get_agentcore_client") as mock_get_client: + mock_client = Mock() + mock_get_client.return_value = mock_client + + # Execute lambda handler + result = lambda_handler(self.sample_event, self.mock_context) + + # Verify record was added to batch failures due to timeout + assert len(result["batchItemFailures"]) == 1 + assert result["batchItemFailures"][0]["itemIdentifier"] == "test-message-id-1" + + # Verify AgentCore was not called due to timeout + mock_client.invoke_agent.assert_not_called() + + def test_lambda_handler_error_propagation(self): + """Test lambda handler error handling and batch failure response.""" + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_error_message" + ) as mock_send_error: + + # Mock AgentCore client to raise error + mock_client = Mock() + mock_client.invoke_agent.side_effect = AgentCoreInvocationError("Runtime error") + mock_get_client.return_value = mock_client + + # Execute lambda handler + result = lambda_handler(self.sample_event, self.mock_context) + + # Verify error was handled + mock_send_error.assert_called_once() + + # Verify batch failure response + assert len(result["batchItemFailures"]) == 1 + assert result["batchItemFailures"][0]["itemIdentifier"] == "test-message-id-1" + + def test_lambda_handler_connection_specific_failure(self): + """Test lambda handler handling of connection-specific failures.""" + # Create event with multiple records for same connection + same_connection_event = { + "Records": [ + { + "messageId": "test-message-id-1", + "body": json.dumps( + { + "requestContext": { + "connectionId": "test-connection-1", + "authorizer": {"UserId": "test-user-1"}, + }, + "message": { + "conversationId": "test-conversation-1", + "inputText": "First message", + "userId": "test-user-1", + "messageId": "test-msg-1", + }, + } + ), + "messageAttributes": { + "connectionId": {"stringValue": "test-connection-1"}, + "conversationId": {"stringValue": "test-conversation-1"}, + "userId": {"stringValue": "test-user-1"}, + }, + }, + { + "messageId": "test-message-id-2", + "body": json.dumps( + { + "requestContext": { + "connectionId": "test-connection-1", + "authorizer": {"UserId": "test-user-1"}, + }, + "message": { + "conversationId": "test-conversation-2", + "inputText": "Second message", + "userId": "test-user-1", + "messageId": "test-msg-2", + }, + } + ), + "messageAttributes": { + "connectionId": {"stringValue": "test-connection-1"}, + "conversationId": {"stringValue": "test-conversation-2"}, + "userId": {"stringValue": "test-user-1"}, + }, + }, + ] + } + + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_error_message" + ) as mock_send_error: + + # Mock AgentCore client to raise error on first call + mock_client = Mock() + mock_client.invoke_agent.side_effect = AgentCoreInvocationError("Runtime error") + mock_get_client.return_value = mock_client + + # Execute lambda handler + result = lambda_handler(same_connection_event, self.mock_context) + + # Verify both records failed due to same connection + assert len(result["batchItemFailures"]) == 2 + failed_ids = [failure["itemIdentifier"] for failure in result["batchItemFailures"]] + assert "test-message-id-1" in failed_ids + assert "test-message-id-2" in failed_ids + + +class TestInvokeAgentCoreFunction: + """Test the invoke_agent_core function with various scenarios.""" + + def setup_method(self): + """Set up test fixtures.""" + self.connection_id = "test-connection" + self.conversation_id = "test-conversation" + self.input_text = "Test input" + self.user_id = "test-user" + self.message_id = "test-message" + + def test_successful_streaming_invocation(self): + """Test successful streaming invocation flow.""" + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_websocket_message" + ) as mock_send_ws: + + # Mock successful streaming response + mock_client = Mock() + mock_client.invoke_agent.return_value = [ + {"text": "Streaming response chunk 1", "type": "content"}, + {"text": "Streaming response chunk 2", "type": "content"}, + {"type": "completion"}, + ] + mock_get_client.return_value = mock_client + + # Execute function + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text=self.input_text, + user_id=self.user_id, + message_id=self.message_id, + files=[], + ) + + mock_client.invoke_agent.assert_called_once() + call_args = mock_client.invoke_agent.call_args + assert call_args[1]["input_text"] == self.input_text + assert call_args[1]["conversation_id"] == self.conversation_id + assert call_args[1]["user_id"] == self.user_id + assert "message_id" in call_args[1] # Message ID should be present + assert "files" in call_args[1] # Files should be present + + # Verify WebSocket messages were sent (2 content + 1 END token) + assert mock_send_ws.call_count == 3 + + def test_streaming_invocation_failure(self): + """Test streaming invocation failure (no fallback since we removed backwards compatibility).""" + with patch("handler.get_agentcore_client") as mock_get_client: + + # Mock streaming failure + mock_client = Mock() + mock_client.invoke_agent.side_effect = AgentCoreInvocationError("Streaming failed") + mock_get_client.return_value = mock_client + + # Execute function and expect error + with pytest.raises(AgentCoreInvocationError) as exc_info: + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text=self.input_text, + user_id=self.user_id, + message_id=self.message_id, + files=[], + ) + + # Verify only one call was made (no fallback) + assert mock_client.invoke_agent.call_count == 1 + assert "Streaming failed" in str(exc_info.value) + + def test_streaming_invocation_error_propagation(self): + """Test that streaming invocation errors are properly propagated.""" + with patch("handler.get_agentcore_client") as mock_get_client: + + # Mock streaming failure + mock_client = Mock() + mock_client.invoke_agent.side_effect = AgentCoreInvocationError("Streaming failed") + mock_get_client.return_value = mock_client + + # Execute function and expect error + with pytest.raises(AgentCoreInvocationError) as exc_info: + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text=self.input_text, + user_id=self.user_id, + message_id=self.message_id, + files=[], + ) + + # Verify only one call was made (no fallback) + assert mock_client.invoke_agent.call_count == 1 + assert "Streaming failed" in str(exc_info.value) + + def test_configuration_error_handling(self): + """Test handling of AgentCore configuration errors.""" + with patch("handler.get_agentcore_client") as mock_get_client: + + # Mock configuration error + mock_get_client.side_effect = AgentCoreConfigurationError("Missing runtime ARN") + + # Execute function and expect error propagation + with pytest.raises(AgentCoreConfigurationError): + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text=self.input_text, + user_id=self.user_id, + message_id=self.message_id, + files=[], + ) + + def test_error_chunk_handling(self): + """Test that error chunks from AgentCore raise an exception (matches chat lambda pattern).""" + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_websocket_message" + ) as mock_send_ws: + + # Mock AgentCore client returning error chunk + mock_client = Mock() + mock_client.invoke_agent.return_value = [ + {"type": "content", "text": "Processing your request..."}, + { + "type": "error", + "error": "ValidationException", + "message": "Invocation of model ID amazon.nova-pro-v1:0 with on-demand throughput isn't supported", + }, + ] + mock_get_client.return_value = mock_client + + # Execute function - should raise AgentCoreInvocationError + with pytest.raises(AgentCoreInvocationError) as exc_info: + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text=self.input_text, + user_id=self.user_id, + message_id=self.message_id, + files=[], + ) + + # Verify the exception contains the error message + assert "amazon.nova-pro-v1:0" in str(exc_info.value) + assert "AgentCore streaming error" in str(exc_info.value) + + # Verify content chunk was sent before error + assert mock_send_ws.call_count == 1 + + def test_invocation_with_pdf_file(self): + """Test successful invocation with PDF file attachment.""" + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_websocket_message" + ) as mock_send_ws: + + # Mock successful response with file processing + mock_client = Mock() + mock_client.invoke_agent.return_value = [ + {"text": "I've analyzed the PDF document you provided.", "type": "content"}, + {"text": "The document contains important information about...", "type": "content"}, + {"type": "completion"}, + ] + mock_get_client.return_value = mock_client + + # Test files with PDF (uppercase extension) + test_files = [ + { + "fileName": "document.PDF", + "fileType": "application/pdf", + "fileSize": 1024000, + "s3Key": "uploads/user123/document.PDF", + } + ] + + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text="Please analyze this document", + user_id=self.user_id, + message_id=self.message_id, + files=test_files, + ) + + mock_client.invoke_agent.assert_called_once() + call_args = mock_client.invoke_agent.call_args + assert call_args[1]["files"] == test_files + assert call_args[1]["input_text"] == "Please analyze this document" + + assert mock_send_ws.call_count == 3 # 2 content + 1 END token + + def test_invocation_with_multiple_files_mixed_case(self): + """Test successful invocation with multiple files having mixed case extensions.""" + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_websocket_message" + ) as mock_send_ws: + + mock_client = Mock() + mock_client.invoke_agent.return_value = [ + {"text": "I've processed all the files you uploaded:", "type": "content"}, + {"text": "- The image shows a chart with quarterly data", "type": "content"}, + {"text": "- The document contains the detailed analysis", "type": "content"}, + {"text": "- The spreadsheet has the raw numbers", "type": "content"}, + {"type": "completion"}, + ] + mock_get_client.return_value = mock_client + + test_files = [ + { + "fileName": "chart.JPEG", + "fileType": "image/jpeg", + "fileSize": 512000, + "s3Key": "uploads/user123/chart.JPEG", + }, + { + "fileName": "report.pdf", + "fileType": "application/pdf", + "fileSize": 2048000, + "s3Key": "uploads/user123/report.pdf", + }, + { + "fileName": "data.XLSX", + "fileType": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + "fileSize": 256000, + "s3Key": "uploads/user123/data.XLSX", + }, + ] + + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text="Compare the data across these files", + user_id=self.user_id, + message_id=self.message_id, + files=test_files, + ) + + # Verify AgentCore was called with all files + mock_client.invoke_agent.assert_called_once() + call_args = mock_client.invoke_agent.call_args + assert call_args[1]["files"] == test_files + assert len(call_args[1]["files"]) == 3 + + # Verify file extensions are preserved as-is + file_names = [f["fileName"] for f in call_args[1]["files"]] + assert "chart.JPEG" in file_names + assert "report.pdf" in file_names + assert "data.XLSX" in file_names + + # Verify WebSocket messages were sent + assert mock_send_ws.call_count == 5 # 4 content + 1 END token + + +class TestWebSocketCommunication: + """Test WebSocket message sending functionality.""" + + def test_send_websocket_message_success(self): + """Test successful WebSocket message sending.""" + with patch("handler.get_service_client") as mock_get_client: + mock_client = Mock() + mock_get_client.return_value = mock_client + + send_websocket_message( + connection_id="test-connection", + conversation_id="test-conversation", + message="Test message", + message_id="test-message", + ) + + # Verify client was called correctly + mock_client.post_to_connection.assert_called_once() + call_args = mock_client.post_to_connection.call_args + + assert call_args[1]["ConnectionId"] == "test-connection" + + # Verify message format + sent_data = json.loads(call_args[1]["Data"]) + assert sent_data["conversationId"] == "test-conversation" + assert sent_data["messageId"] == "test-message" + assert sent_data["data"] == "Test message" + + def test_send_websocket_message_error_handling(self): + """Test WebSocket message sending error handling.""" + with patch("handler.get_service_client") as mock_get_client, patch("handler.logger") as mock_logger: + + mock_client = Mock() + mock_client.post_to_connection.side_effect = Exception("WebSocket error") + mock_get_client.return_value = mock_client + + # Should not raise exception + with pytest.raises(Exception): + send_websocket_message( + connection_id="test-connection", + conversation_id="test-conversation", + message="Test message", + message_id="test-message", + ) + + def test_send_error_message_format(self): + """Test error message formatting and sending with fixed error message.""" + with patch("handler.WebsocketErrorHandler") as mock_error_handler_class: + mock_error_handler = Mock() + mock_error_handler_class.return_value = mock_error_handler + + send_error_message( + connection_id="test-connection", + conversation_id="test-conversation", + message_id="test-message", + ) + + # Verify WebsocketErrorHandler was instantiated correctly + mock_error_handler_class.assert_called_once() + call_kwargs = mock_error_handler_class.call_args[1] + assert call_kwargs["connection_id"] == "test-connection" + assert call_kwargs["conversation_id"] == "test-conversation" + assert call_kwargs["message_id"] == "test-message" + assert "trace_id" in call_kwargs + + # Verify post_token_to_connection was called with the fixed error message + mock_error_handler.post_token_to_connection.assert_called_once() + error_message = mock_error_handler.post_token_to_connection.call_args[0][0] + assert "AgentCore invocation service failed to respond" in error_message + assert "quote the following trace id:" in error_message + + def test_format_response_function(self): + """Test response formatting function.""" + # Test basic response + formatted = format_response("test-conversation", "test-message", data="test-data") + data = json.loads(formatted) + + assert data["conversationId"] == "test-conversation" + assert data["messageId"] == "test-message" + assert data["data"] == "test-data" + + # Test error response + formatted_error = format_response( + "test-conversation", "test-message", errorMessage="test-error", traceId="test-trace" + ) + error_data = json.loads(formatted_error) + + assert error_data["errorMessage"] == "test-error" + assert error_data["traceId"] == "test-trace" + + +class TestAgentCoreClientGlobal: + """Test global AgentCore client management.""" + + def test_get_agentcore_client_singleton(self): + """Test that get_agentcore_client returns singleton instance.""" + with patch("handler.AgentCoreClient") as mock_client_class: + mock_instance = Mock() + mock_client_class.return_value = mock_instance + + # Clear global client + import handler + + handler._agentcore_client = None + + # First call should create instance + client1 = get_agentcore_client() + assert client1 == mock_instance + mock_client_class.assert_called_once() + + # Second call should return same instance + client2 = get_agentcore_client() + assert client2 == mock_instance + assert client1 is client2 + # Should not create new instance + mock_client_class.assert_called_once() + + def test_get_agentcore_client_configuration_error(self): + """Test get_agentcore_client with configuration error.""" + with patch("handler.AgentCoreClient") as mock_client_class: + mock_client_class.side_effect = AgentCoreConfigurationError("Config error") + + # Clear global client + import handler + + handler._agentcore_client = None + + # Should propagate configuration error + with pytest.raises(AgentCoreConfigurationError): + get_agentcore_client() + + +class TestToolUsageChunkForwarding: + """Test tool usage chunk forwarding from agent to WebSocket.""" + + def setup_method(self): + """Set up test fixtures.""" + self.connection_id = "test-connection" + self.conversation_id = "test-conversation" + self.input_text = "Test input" + self.user_id = "test-user" + self.message_id = "test-message" + + def test_tool_usage_chunk_forwarding_started(self): + """Test forwarding of tool_use chunk with 'started' status.""" + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_websocket_message" + ) as mock_send_ws, patch("handler.send_tool_usage") as mock_send_tool: + + # Mock agent response with tool usage chunk + mock_client = Mock() + mock_client.invoke_agent.return_value = [ + { + "type": "tool_use", + "toolUsage": { + "toolName": "test_tool", + "status": "started", + "startTime": "2025-01-08T12:00:00Z", + "mcpServerName": "test-mcp-server", + }, + }, + {"text": "Tool result", "type": "content"}, + {"type": "completion"}, + ] + mock_get_client.return_value = mock_client + + # Execute function + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text=self.input_text, + user_id=self.user_id, + message_id=self.message_id, + files=[], + ) + + # Verify send_tool_usage was called with correct structure + mock_send_tool.assert_called_once() + call_args = mock_send_tool.call_args + + assert call_args[0][0] == self.connection_id + assert call_args[0][1] == self.conversation_id + assert call_args[0][3] == self.message_id + + tool_usage = call_args[0][2] + assert tool_usage["toolName"] == "test_tool" + assert tool_usage["status"] == "started" + assert tool_usage["startTime"] == "2025-01-08T12:00:00Z" + assert tool_usage["mcpServerName"] == "test-mcp-server" + + def test_tool_usage_chunk_forwarding_completed(self): + """Test forwarding of tool_use chunk with 'completed' status.""" + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_websocket_message" + ) as mock_send_ws, patch("handler.send_tool_usage") as mock_send_tool: + + # Mock agent response with completed tool usage + mock_client = Mock() + mock_client.invoke_agent.return_value = [ + { + "type": "tool_use", + "toolUsage": { + "toolName": "test_tool", + "status": "completed", + "startTime": "2025-01-08T12:00:00Z", + "endTime": "2025-01-08T12:00:05Z", + "toolInput": {"param": "value"}, + "toolOutput": "Success", + }, + }, + {"type": "completion"}, + ] + mock_get_client.return_value = mock_client + + # Execute function + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text=self.input_text, + user_id=self.user_id, + message_id=self.message_id, + files=[], + ) + + # Verify tool usage was sent + mock_send_tool.assert_called_once() + tool_usage = mock_send_tool.call_args[0][2] + + assert tool_usage["status"] == "completed" + assert tool_usage["endTime"] == "2025-01-08T12:00:05Z" + assert tool_usage["toolInput"] == {"param": "value"} + assert tool_usage["toolOutput"] == "Success" + + def test_tool_usage_chunk_forwarding_failed(self): + """Test forwarding of tool_use chunk with 'failed' status.""" + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_websocket_message" + ) as mock_send_ws, patch("handler.send_tool_usage") as mock_send_tool: + + # Mock agent response with failed tool usage + mock_client = Mock() + mock_client.invoke_agent.return_value = [ + { + "type": "tool_use", + "toolUsage": { + "toolName": "test_tool", + "status": "failed", + "startTime": "2025-01-08T12:00:00Z", + "endTime": "2025-01-08T12:00:02Z", + "error": "Tool execution failed", + }, + }, + {"type": "completion"}, + ] + mock_get_client.return_value = mock_client + + # Execute function + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text=self.input_text, + user_id=self.user_id, + message_id=self.message_id, + files=[], + ) + + # Verify tool usage was sent with error + mock_send_tool.assert_called_once() + tool_usage = mock_send_tool.call_args[0][2] + + assert tool_usage["status"] == "failed" + assert tool_usage["error"] == "Tool execution failed" + + def test_multiple_tool_usage_chunks(self): + """Test forwarding of multiple tool usage chunks in sequence.""" + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_websocket_message" + ) as mock_send_ws, patch("handler.send_tool_usage") as mock_send_tool: + + # Mock agent response with multiple tool usages + mock_client = Mock() + mock_client.invoke_agent.return_value = [ + { + "type": "tool_use", + "toolUsage": { + "toolName": "tool_1", + "status": "started", + "startTime": "2025-01-08T12:00:00Z", + }, + }, + { + "type": "tool_use", + "toolUsage": { + "toolName": "tool_1", + "status": "completed", + "startTime": "2025-01-08T12:00:00Z", + "endTime": "2025-01-08T12:00:02Z", + }, + }, + { + "type": "tool_use", + "toolUsage": { + "toolName": "tool_2", + "status": "started", + "startTime": "2025-01-08T12:00:03Z", + }, + }, + {"type": "completion"}, + ] + mock_get_client.return_value = mock_client + + # Execute function + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text=self.input_text, + user_id=self.user_id, + message_id=self.message_id, + files=[], + ) + + # Verify all tool usages were sent + assert mock_send_tool.call_count == 3 + + # Verify order and content + calls = mock_send_tool.call_args_list + assert calls[0][0][2]["toolName"] == "tool_1" + assert calls[0][0][2]["status"] == "started" + assert calls[1][0][2]["toolName"] == "tool_1" + assert calls[1][0][2]["status"] == "completed" + assert calls[2][0][2]["toolName"] == "tool_2" + assert calls[2][0][2]["status"] == "started" + + def test_tool_usage_with_mcp_server(self): + """Test tool usage chunk includes MCP server name.""" + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_websocket_message" + ) as mock_send_ws, patch("handler.send_tool_usage") as mock_send_tool: + + # Mock agent response with MCP tool + mock_client = Mock() + mock_client.invoke_agent.return_value = [ + { + "type": "tool_use", + "toolUsage": { + "toolName": "mcp_file_read", + "status": "completed", + "startTime": "2025-01-08T12:00:00Z", + "endTime": "2025-01-08T12:00:01Z", + "mcpServerName": "filesystem", + }, + }, + {"type": "completion"}, + ] + mock_get_client.return_value = mock_client + + # Execute function + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text=self.input_text, + user_id=self.user_id, + message_id=self.message_id, + files=[], + ) + + # Verify MCP server name is included + mock_send_tool.assert_called_once() + tool_usage = mock_send_tool.call_args[0][2] + assert tool_usage["mcpServerName"] == "filesystem" + + def test_tool_usage_without_mcp_server(self): + """Test tool usage chunk for built-in Strands tool (no MCP server).""" + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_websocket_message" + ) as mock_send_ws, patch("handler.send_tool_usage") as mock_send_tool: + + # Mock agent response with built-in tool + mock_client = Mock() + mock_client.invoke_agent.return_value = [ + { + "type": "tool_use", + "toolUsage": { + "toolName": "strands_builtin_tool", + "status": "completed", + "startTime": "2025-01-08T12:00:00Z", + "endTime": "2025-01-08T12:00:01Z", + }, + }, + {"type": "completion"}, + ] + mock_get_client.return_value = mock_client + + # Execute function + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text=self.input_text, + user_id=self.user_id, + message_id=self.message_id, + files=[], + ) + + # Verify no MCP server name + mock_send_tool.assert_called_once() + tool_usage = mock_send_tool.call_args[0][2] + assert "mcpServerName" not in tool_usage + + def test_malformed_tool_usage_chunk_handling(self): + """Test handling of malformed tool usage chunks.""" + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_websocket_message" + ) as mock_send_ws, patch("handler.send_tool_usage") as mock_send_tool: + + # Mock agent response with malformed tool usage (missing toolUsage field) + mock_client = Mock() + mock_client.invoke_agent.return_value = [ + {"type": "tool_use"}, # Missing toolUsage field + {"text": "Content", "type": "content"}, + {"type": "completion"}, + ] + mock_get_client.return_value = mock_client + + # Execute function - should not crash + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text=self.input_text, + user_id=self.user_id, + message_id=self.message_id, + files=[], + ) + + # Verify send_tool_usage was not called for malformed chunk + mock_send_tool.assert_not_called() + + # Verify content was still sent + mock_send_ws.assert_called() + + def test_send_tool_usage_websocket_message_format(self): + """Test that send_tool_usage formats WebSocket message correctly.""" + with patch("handler.get_service_client") as mock_get_client: + mock_client = Mock() + mock_get_client.return_value = mock_client + + tool_usage = { + "toolName": "test_tool", + "status": "completed", + "startTime": "2025-01-08T12:00:00Z", + "endTime": "2025-01-08T12:00:05Z", + } + + from handler import send_tool_usage + + send_tool_usage( + connection_id="test-connection", + conversation_id="test-conversation", + tool_usage=tool_usage, + message_id="test-message", + ) + + # Verify WebSocket message format + mock_client.post_to_connection.assert_called_once() + call_args = mock_client.post_to_connection.call_args + + assert call_args[1]["ConnectionId"] == "test-connection" + + # Parse and verify message structure + sent_data = json.loads(call_args[1]["Data"]) + assert sent_data["conversationId"] == "test-conversation" + assert sent_data["messageId"] == "test-message" + assert "toolUsage" in sent_data + assert sent_data["toolUsage"]["toolName"] == "test_tool" + assert sent_data["toolUsage"]["status"] == "completed" + + def test_send_tool_usage_error_handling(self): + """Test that send_tool_usage handles errors gracefully.""" + with patch("handler.get_service_client") as mock_get_client, patch("handler.logger") as mock_logger: + + mock_client = Mock() + mock_client.post_to_connection.side_effect = Exception("WebSocket error") + mock_get_client.return_value = mock_client + + tool_usage = { + "toolName": "test_tool", + "status": "started", + "startTime": "2025-01-08T12:00:00Z", + } + + from handler import send_tool_usage + + # Should not raise exception + send_tool_usage( + connection_id="test-connection", + conversation_id="test-conversation", + tool_usage=tool_usage, + message_id="test-message", + ) + + # Verify error was logged + assert any("Error sending tool usage" in str(call) for call in mock_logger.error.call_args_list) + + def test_tool_usage_does_not_interfere_with_keep_alive(self): + """Test that tool usage messages don't interfere with keep-alive.""" + with patch("handler.get_agentcore_client") as mock_get_client, patch( + "handler.send_websocket_message" + ) as mock_send_ws, patch("handler.send_tool_usage") as mock_send_tool, patch( + "handler.get_keep_alive_manager" + ) as mock_get_keep_alive: + + # Mock keep-alive manager + mock_keep_alive = Mock() + mock_get_keep_alive.return_value = mock_keep_alive + + # Mock agent response with tool usage + mock_client = Mock() + mock_client.invoke_agent.return_value = [ + { + "type": "tool_use", + "toolUsage": { + "toolName": "test_tool", + "status": "started", + "startTime": "2025-01-08T12:00:00Z", + }, + }, + {"text": "Content", "type": "content"}, + {"type": "completion"}, + ] + mock_get_client.return_value = mock_client + + # Execute function + invoke_agent_core( + connection_id=self.connection_id, + conversation_id=self.conversation_id, + input_text=self.input_text, + user_id=self.user_id, + message_id=self.message_id, + files=[], + ) + + # Verify keep-alive was started and stopped + mock_keep_alive.start_keep_alive.assert_called_once() + mock_keep_alive.stop_keep_alive.assert_called_once() + + # Verify activity was updated for each chunk + assert mock_keep_alive.update_activity.call_count >= 2 + + +class TestTraceIdHandling: + """Test trace ID handling in error messages.""" + + @patch("handler.WebsocketErrorHandler") + @patch("handler.os.environ.get") + def test_send_error_message_extracts_root_trace_id(self, mock_env_get, mock_error_handler): + """Test that send_error_message extracts just the root trace ID from X-Ray format.""" + from handler import send_error_message + + # Set up mock with full X-Ray trace ID format + full_trace_id = "Root=1-68f6b98e-7ae43e64d1ed2eb8ad2029c9;Parent=2a9e358718ca94a7;Sampled=0" + mock_env_get.return_value = full_trace_id + mock_handler_instance = MagicMock() + mock_error_handler.return_value = mock_handler_instance + + # Execute + send_error_message("conn-123", "conv-456", "msg-789") + + # Verify WebsocketErrorHandler was called with extracted root trace ID only + mock_error_handler.assert_called_once() + call_kwargs = mock_error_handler.call_args[1] + assert call_kwargs["trace_id"] == "1-68f6b98e-7ae43e64d1ed2eb8ad2029c9" + assert "Root=" not in call_kwargs["trace_id"] + assert "Parent=" not in call_kwargs["trace_id"] + + # Verify error message contains only the root trace ID + mock_handler_instance.post_token_to_connection.assert_called_once() + error_message = mock_handler_instance.post_token_to_connection.call_args[0][0] + assert "1-68f6b98e-7ae43e64d1ed2eb8ad2029c9" in error_message + assert "Root=" not in error_message + assert "Parent=" not in error_message + assert "Please contact your system administrator for support and quote the following trace id:" in error_message + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/source/lambda/agentcore-invocation/test/test_helper.py b/source/lambda/agentcore-invocation/test/test_helper.py new file mode 100644 index 00000000..7597fa4a --- /dev/null +++ b/source/lambda/agentcore-invocation/test/test_helper.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python3 +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json +import pytest +from datetime import datetime +from unittest.mock import Mock, patch, MagicMock +import boto3 +from moto import mock_aws + +from utils.helper import ( + get_session, + get_service_client, + get_metrics_client, + json_serializer, + _session, + _service_clients, + _metrics_namespaces, +) +from utils.constants import CloudWatchNamespaces + + +class TestGetSession: + """Test the get_session function.""" + + def setup_method(self): + """Reset global session before each test.""" + import utils.helper + + utils.helper._session = None + + def test_get_session_creates_new_session(self): + """Test that get_session creates a new session when none exists.""" + session = get_session() + + assert session is not None + assert isinstance(session, boto3.session.Session) + + def test_get_session_returns_cached_session(self): + """Test that get_session returns the same session on subsequent calls.""" + session1 = get_session() + session2 = get_session() + + assert session1 is session2 + + def test_get_session_with_existing_global_session(self): + """Test get_session when global session already exists.""" + import utils.helper + + mock_session = Mock(spec=boto3.session.Session) + utils.helper._session = mock_session + + session = get_session() + + assert session is mock_session + + +class TestGetServiceClient: + """Test the get_service_client function.""" + + def setup_method(self): + """Reset global clients cache before each test.""" + import utils.helper + + utils.helper._service_clients = {} + utils.helper._session = None + + @mock_aws + def test_get_service_client_creates_new_client(self): + """Test that get_service_client creates a new client.""" + client = get_service_client("cloudwatch") + + assert client is not None + assert hasattr(client, "put_metric_data") # CloudWatch client method + + @mock_aws + def test_get_service_client_returns_cached_client(self): + """Test that get_service_client returns cached client on subsequent calls.""" + client1 = get_service_client("cloudwatch") + client2 = get_service_client("cloudwatch") + + assert client1 is client2 + + @mock_aws + def test_get_service_client_with_kwargs(self): + """Test get_service_client with additional kwargs.""" + client1 = get_service_client("cloudwatch", region_name="us-east-1") + client2 = get_service_client("cloudwatch", region_name="us-west-2") + + # Should be different clients due to different kwargs + assert client1 is not client2 + + @mock_aws + def test_get_service_client_same_kwargs_returns_cached(self): + """Test that same service with same kwargs returns cached client.""" + client1 = get_service_client("cloudwatch", region_name="us-east-1") + client2 = get_service_client("cloudwatch", region_name="us-east-1") + + assert client1 is client2 + + @mock_aws + def test_get_service_client_different_services(self): + """Test that different services create different clients.""" + cloudwatch_client = get_service_client("cloudwatch") + s3_client = get_service_client("s3") + + assert cloudwatch_client is not s3_client + + def test_get_service_client_cache_key_generation(self): + """Test that cache keys are generated correctly.""" + import utils.helper + + with patch("utils.helper.get_session") as mock_get_session: + mock_session = Mock() + mock_client1 = Mock() + mock_client2 = Mock() + mock_session.client.side_effect = [mock_client1, mock_client2] + mock_get_session.return_value = mock_session + + # Call with no kwargs + client1 = get_service_client("s3") + + # Call with kwargs + client2 = get_service_client("s3", region_name="us-east-1") + + # Should create different cache entries + assert len(utils.helper._service_clients) == 2 + assert client1 is not client2 + + +class TestGetMetricsClient: + """Test the get_metrics_client function.""" + + def setup_method(self): + """Reset global metrics cache before each test.""" + import utils.helper + + utils.helper._metrics_namespaces = {} + + @patch("utils.helper.Metrics") + def test_get_metrics_client_creates_new_client(self, mock_metrics_class): + """Test that get_metrics_client creates a new Metrics client.""" + mock_metrics_instance = Mock() + mock_metrics_class.return_value = mock_metrics_instance + + client = get_metrics_client(CloudWatchNamespaces.AGENTCORE_INVOCATION) + + assert client is mock_metrics_instance + mock_metrics_class.assert_called_once_with( + namespace=CloudWatchNamespaces.AGENTCORE_INVOCATION.value, + service="GAABUseCase-None", # Since USE_CASE_UUID_ENV_VAR is not set + ) + + @patch("utils.helper.Metrics") + def test_get_metrics_client_returns_cached_client(self, mock_metrics_class): + """Test that get_metrics_client returns cached client on subsequent calls.""" + mock_metrics_instance = Mock() + mock_metrics_class.return_value = mock_metrics_instance + + client1 = get_metrics_client(CloudWatchNamespaces.AGENTCORE_INVOCATION) + client2 = get_metrics_client(CloudWatchNamespaces.AGENTCORE_INVOCATION) + + assert client1 is client2 + # Should only be called once due to caching + mock_metrics_class.assert_called_once() + + @patch("utils.helper.Metrics") + def test_get_metrics_client_different_namespaces(self, mock_metrics_class): + """Test that different namespaces create different clients.""" + mock_metrics_instance1 = Mock() + mock_metrics_instance2 = Mock() + mock_metrics_class.side_effect = [mock_metrics_instance1, mock_metrics_instance2] + + client1 = get_metrics_client(CloudWatchNamespaces.AGENTCORE_INVOCATION) + client2 = get_metrics_client(CloudWatchNamespaces.COLD_STARTS) + + assert client1 is not client2 + assert mock_metrics_class.call_count == 2 + + @patch("utils.helper.METRICS_SERVICE_NAME", "GAABUseCase-test-uuid-123") + @patch("utils.helper.Metrics") + def test_get_metrics_client_with_use_case_uuid(self, mock_metrics_class): + """Test get_metrics_client with USE_CASE_UUID environment variable set.""" + mock_metrics_instance = Mock() + mock_metrics_class.return_value = mock_metrics_instance + + client = get_metrics_client(CloudWatchNamespaces.AGENTCORE_INVOCATION) + + mock_metrics_class.assert_called_once_with( + namespace=CloudWatchNamespaces.AGENTCORE_INVOCATION.value, service="GAABUseCase-test-uuid-123" + ) + + +class TestJsonSerializer: + """Test the json_serializer function.""" + + def test_json_serializer_with_datetime(self): + """Test json_serializer with datetime object.""" + dt = datetime(2023, 12, 25, 10, 30, 45) + result = json_serializer(dt) + + assert result == "2023-12-25T10:30:45" + + def test_json_serializer_with_json_serializable_object(self): + """Test json_serializer with JSON serializable objects.""" + test_cases = [ + "string", + 123, + 45.67, + True, + False, + None, + [1, 2, 3], + {"key": "value"}, + ] + + for obj in test_cases: + result = json_serializer(obj) + assert result == obj + + def test_json_serializer_with_non_serializable_object(self): + """Test json_serializer with non-JSON serializable object.""" + + class CustomObject: + def __init__(self, value): + self.value = value + + def __str__(self): + return f"CustomObject({self.value})" + + obj = CustomObject("test") + result = json_serializer(obj) + + assert result == "CustomObject(test)" + + def test_json_serializer_with_complex_non_serializable_object(self): + """Test json_serializer with complex non-serializable object.""" + # Create an object that can't be JSON serialized + obj = set([1, 2, 3]) # Sets are not JSON serializable + result = json_serializer(obj) + + # Should return string representation + assert isinstance(result, str) + assert "1" in result and "2" in result and "3" in result + + def test_json_serializer_with_nested_datetime(self): + """Test json_serializer with nested structure containing datetime.""" + dt = datetime(2023, 12, 25, 10, 30, 45) + # This tests the case where datetime is at the top level + result = json_serializer(dt) + assert result == "2023-12-25T10:30:45" + + @patch("utils.helper.logger") + def test_json_serializer_logs_serialization_failure(self, mock_logger): + """Test that json_serializer logs when serialization fails.""" + + class NonSerializableObject: + def __str__(self): + return "NonSerializableObject" + + obj = NonSerializableObject() + result = json_serializer(obj) + + assert result == "NonSerializableObject" + mock_logger.info.assert_called_once() + assert "Serializing failed for object" in mock_logger.info.call_args[0][0] + + def test_json_serializer_with_empty_objects(self): + """Test json_serializer with empty objects.""" + test_cases = [ + "", + [], + {}, + 0, + ] + + for obj in test_cases: + result = json_serializer(obj) + assert result == obj + + +class TestHelperIntegration: + """Integration tests for helper functions.""" + + def setup_method(self): + """Reset all global caches before each test.""" + import utils.helper + + utils.helper._session = None + utils.helper._service_clients = {} + utils.helper._metrics_namespaces = {} + + @mock_aws + @patch("utils.helper.Metrics") + def test_helper_functions_work_together(self, mock_metrics_class): + """Test that helper functions work together correctly.""" + mock_metrics_instance = Mock() + mock_metrics_class.return_value = mock_metrics_instance + + # Get session + session = get_session() + assert session is not None + + # Get service client + client = get_service_client("cloudwatch") + assert client is not None + + # Get metrics client + metrics = get_metrics_client(CloudWatchNamespaces.AGENTCORE_INVOCATION) + assert metrics is mock_metrics_instance + + # Verify caching works + session2 = get_session() + client2 = get_service_client("cloudwatch") + metrics2 = get_metrics_client(CloudWatchNamespaces.AGENTCORE_INVOCATION) + + assert session is session2 + assert client is client2 + assert metrics is metrics2 + + def test_json_serializer_with_mixed_data_types(self): + """Test json_serializer with mixed data types in a complex structure.""" + dt = datetime(2023, 12, 25, 10, 30, 45) + + # Test individual serialization (since json_serializer handles single objects) + datetime_result = json_serializer(dt) + string_result = json_serializer("test") + number_result = json_serializer(42) + + assert datetime_result == "2023-12-25T10:30:45" + assert string_result == "test" + assert number_result == 42 + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/source/lambda/agentcore-invocation/test/test_keep_alive_manager.py b/source/lambda/agentcore-invocation/test/test_keep_alive_manager.py new file mode 100644 index 00000000..8847a433 --- /dev/null +++ b/source/lambda/agentcore-invocation/test/test_keep_alive_manager.py @@ -0,0 +1,300 @@ +#!/usr/bin/env python3 +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os +import time +import pytest +from unittest.mock import Mock, patch, call +import threading + +# Set up environment variables before importing +os.environ["WEBSOCKET_CALLBACK_URL"] = "wss://test.execute-api.us-east-1.amazonaws.com/test" +os.environ["AGENT_RUNTIME_ARN"] = "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/test-runtime" +os.environ["_X_AMZN_TRACE_ID"] = "Root=1-12345678-123456789abcdef0;Parent=123456789abcdef0;Sampled=1" + +from utils.keep_alive_manager import KeepAliveManager, get_keep_alive_manager +from utils.constants import KEEP_ALIVE_TOKEN, PROCESSING_TOKEN + + +class TestKeepAliveManager: + """Test KeepAliveManager functionality for long-running task management.""" + + def setup_method(self): + """Set up test fixtures.""" + self.mock_send_callback = Mock() + self.manager = KeepAliveManager(self.mock_send_callback) + self.connection_id = "test-connection-123" + self.conversation_id = "test-conversation-456" + self.message_id = "test-message-789" + + def teardown_method(self): + """Clean up after each test.""" + if self.manager: + self.manager.cleanup_all() + + def test_keep_alive_manager_initialization(self): + """Test KeepAliveManager initialization.""" + assert self.manager.send_message_callback == self.mock_send_callback + assert len(self.manager.active_connections) == 0 + assert self.manager.get_active_connection_count() == 0 + + def test_start_keep_alive_monitoring(self): + """Test starting keep-alive monitoring for a connection.""" + self.manager.start_keep_alive(self.connection_id, self.conversation_id, self.message_id) + + # Verify connection was added + assert self.connection_id in self.manager.active_connections + assert self.manager.get_active_connection_count() == 1 + + # Verify connection info + connection_info = self.manager.active_connections[self.connection_id] + assert connection_info["conversation_id"] == self.conversation_id + assert connection_info["message_id"] == self.message_id + assert "start_time" in connection_info + assert "last_keep_alive" in connection_info + assert "last_processing_update" in connection_info + + def test_stop_keep_alive_monitoring(self): + """Test stopping keep-alive monitoring for a connection.""" + # Start monitoring first + self.manager.start_keep_alive(self.connection_id, self.conversation_id, self.message_id) + assert self.manager.get_active_connection_count() == 1 + + # Stop monitoring + self.manager.stop_keep_alive(self.connection_id) + assert self.manager.get_active_connection_count() == 0 + assert self.connection_id not in self.manager.active_connections + + def test_update_activity(self): + """Test updating activity timestamps for a connection.""" + # Start monitoring + self.manager.start_keep_alive(self.connection_id, self.conversation_id, self.message_id) + + # Get initial timestamps + initial_info = self.manager.active_connections[self.connection_id].copy() + + # Wait a bit and update activity + time.sleep(0.1) + self.manager.update_activity(self.connection_id) + + # Verify timestamps were updated + updated_info = self.manager.active_connections[self.connection_id] + assert updated_info["last_keep_alive"] > initial_info["last_keep_alive"] + assert updated_info["last_processing_update"] > initial_info["last_processing_update"] + + def test_get_connection_status(self): + """Test getting connection status information.""" + # Test non-existent connection + status = self.manager.get_connection_status("non-existent") + assert status is None + + # Start monitoring + self.manager.start_keep_alive(self.connection_id, self.conversation_id, self.message_id) + + # Get status + status = self.manager.get_connection_status(self.connection_id) + assert status is not None + assert status["conversation_id"] == self.conversation_id + assert status["message_id"] == self.message_id + assert "duration" in status + assert "time_since_last_keep_alive" in status + assert "time_since_last_processing_update" in status + + def test_multiple_connections(self): + """Test managing multiple connections simultaneously.""" + connections = [ + ("conn-1", "conv-1", "msg-1"), + ("conn-2", "conv-2", "msg-2"), + ("conn-3", "conv-3", "msg-3"), + ] + + # Start monitoring for all connections + for conn_id, conv_id, msg_id in connections: + self.manager.start_keep_alive(conn_id, conv_id, msg_id) + + assert self.manager.get_active_connection_count() == 3 + + # Stop monitoring for one connection + self.manager.stop_keep_alive("conn-2") + assert self.manager.get_active_connection_count() == 2 + assert "conn-1" in self.manager.active_connections + assert "conn-2" not in self.manager.active_connections + assert "conn-3" in self.manager.active_connections + + def test_cleanup_all_connections(self): + """Test cleaning up all active connections.""" + # Start monitoring for multiple connections + for i in range(3): + self.manager.start_keep_alive(f"conn-{i}", f"conv-{i}", f"msg-{i}") + + assert self.manager.get_active_connection_count() == 3 + + # Cleanup all + self.manager.cleanup_all() + assert self.manager.get_active_connection_count() == 0 + + def test_keep_alive_message_sending(self): + """Test that keep-alive message sending mechanism works.""" + # Test the message sending method directly + connection_info = { + "conversation_id": self.conversation_id, + "message_id": self.message_id, + "start_time": time.time(), + } + + # Test direct message sending + self.manager._send_keep_alive_message(self.connection_id, connection_info) + + # Verify keep-alive message was sent + assert self.mock_send_callback.call_count == 1 + call_args = self.mock_send_callback.call_args + assert call_args[0][0] == self.connection_id + assert call_args[0][1] == self.conversation_id + assert call_args[0][2] == KEEP_ALIVE_TOKEN + assert call_args[0][3] == self.message_id + + def test_processing_update_sending(self): + """Test that processing update message sending mechanism works.""" + # Test the message sending method directly + connection_info = { + "conversation_id": self.conversation_id, + "message_id": self.message_id, + "start_time": time.time(), + } + + # Test direct message sending + self.manager._send_processing_update(self.connection_id, connection_info) + + # Verify processing update message was sent + assert self.mock_send_callback.call_count == 1 + call_args = self.mock_send_callback.call_args + assert call_args[0][0] == self.connection_id + assert call_args[0][1] == self.conversation_id + assert call_args[0][2] == PROCESSING_TOKEN + assert call_args[0][3] == self.message_id + + def test_send_message_callback_error_handling(self): + """Test error handling when send message callback fails.""" + # Create manager with failing callback + failing_callback = Mock(side_effect=Exception("WebSocket error")) + manager = KeepAliveManager(failing_callback) + + try: + # Start monitoring - this should handle callback errors gracefully + manager.start_keep_alive(self.connection_id, self.conversation_id, self.message_id) + + # Wait a bit for potential error handling + time.sleep(0.1) + + # Manager should still be functional + assert manager.get_active_connection_count() >= 0 # May be 0 if connection was removed due to error + + finally: + manager.cleanup_all() + + @patch("time.sleep") # Mock sleep to speed up test + def test_maximum_duration_handling(self, mock_sleep): + """Test that connections are cleaned up after maximum duration.""" + # Use very short max duration for testing + with patch("utils.keep_alive_manager.MAX_STREAMING_DURATION_SECONDS", 0.1): + self.manager.start_keep_alive(self.connection_id, self.conversation_id, self.message_id) + + # Wait for max duration to be exceeded + time.sleep(0.2) + + # Connection should be automatically removed + # Note: This test may be flaky due to threading timing + # In a real scenario, the connection would be removed by the keep-alive worker + + def test_thread_safety(self): + """Test thread safety of KeepAliveManager operations.""" + + def start_stop_connections(): + for i in range(10): + conn_id = f"thread-conn-{threading.current_thread().ident}-{i}" + self.manager.start_keep_alive(conn_id, f"conv-{i}", f"msg-{i}") + time.sleep(0.01) + self.manager.stop_keep_alive(conn_id) + + # Start multiple threads + threads = [] + for _ in range(3): + thread = threading.Thread(target=start_stop_connections) + threads.append(thread) + thread.start() + + # Wait for all threads to complete + for thread in threads: + thread.join() + + # All connections should be cleaned up + assert self.manager.get_active_connection_count() == 0 + + +class TestKeepAliveManagerGlobal: + """Test global KeepAliveManager instance management.""" + + def test_get_keep_alive_manager_singleton(self): + """Test that get_keep_alive_manager returns singleton instance.""" + mock_callback = Mock() + + # Clear global instance + import utils.keep_alive_manager + + utils.keep_alive_manager._keep_alive_manager = None + + # First call should create instance + manager1 = get_keep_alive_manager(mock_callback) + assert manager1 is not None + + # Second call should return same instance + manager2 = get_keep_alive_manager(mock_callback) + assert manager1 is manager2 + + # Cleanup + manager1.cleanup_all() + + def test_keep_alive_manager_integration_with_handler(self): + """Test integration of KeepAliveManager with handler functions.""" + from handler import send_websocket_message + + # Clear global instance first + import utils.keep_alive_manager + + utils.keep_alive_manager._keep_alive_manager = None + + # Test that we can create a manager with the actual send function + manager = get_keep_alive_manager(send_websocket_message) + assert manager is not None + assert manager.send_message_callback == send_websocket_message + + # Cleanup + manager.cleanup_all() + + +class TestKeepAliveConstants: + """Test keep-alive related constants.""" + + def test_keep_alive_constants_exist(self): + """Test that all required keep-alive constants are defined.""" + from utils.constants import ( + KEEP_ALIVE_TOKEN, + PROCESSING_TOKEN, + KEEP_ALIVE_INTERVAL_SECONDS, + PROCESSING_UPDATE_INTERVAL_SECONDS, + MAX_STREAMING_DURATION_SECONDS, + ) + + assert KEEP_ALIVE_TOKEN == "##KEEP_ALIVE##" + assert PROCESSING_TOKEN == "##PROCESSING##" + assert isinstance(KEEP_ALIVE_INTERVAL_SECONDS, int) + assert isinstance(PROCESSING_UPDATE_INTERVAL_SECONDS, int) + assert isinstance(MAX_STREAMING_DURATION_SECONDS, int) + assert KEEP_ALIVE_INTERVAL_SECONDS > 0 + assert PROCESSING_UPDATE_INTERVAL_SECONDS > 0 + assert MAX_STREAMING_DURATION_SECONDS > 0 + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/source/lambda/agentcore-invocation/utils/__init__.py b/source/lambda/agentcore-invocation/utils/__init__.py new file mode 100644 index 00000000..7d20a24e --- /dev/null +++ b/source/lambda/agentcore-invocation/utils/__init__.py @@ -0,0 +1,9 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from .constants import * +from .event_processor import * +from .helper import * +from .agentcore_client import * +from .keep_alive_manager import * +from .websocket_error_handler import * diff --git a/source/lambda/agentcore-invocation/utils/agentcore_client.py b/source/lambda/agentcore-invocation/utils/agentcore_client.py new file mode 100644 index 00000000..20b6439b --- /dev/null +++ b/source/lambda/agentcore-invocation/utils/agentcore_client.py @@ -0,0 +1,478 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +"""AgentCore client for invoking runtime services.""" + +import json +import os +import time +from typing import Any, Dict, Iterator, List, Optional + +import boto3 +from aws_lambda_powertools import Logger, Tracer +from aws_lambda_powertools.metrics import MetricUnit +from botocore.config import Config +from botocore.exceptions import BotoCoreError, ClientError +from utils.constants import AGENT_RUNTIME_ARN_ENV_VAR, FILES_KEY, CloudWatchNamespaces, CloudWatchMetrics +from utils.helper import get_metrics_client + +logger = Logger(utc=True) +tracer = Tracer() +metrics = get_metrics_client(CloudWatchNamespaces.AWS_BEDROCK) + + +class AgentCoreClientError(Exception): + """Base exception class for AgentCore client errors""" + + +class AgentCoreInvocationError(AgentCoreClientError): + """Exception raised when AgentCore invocation fails""" + + +class AgentCoreConfigurationError(AgentCoreClientError): + """Exception raised when AgentCore configuration is invalid""" + + +class AgentCoreClient: + """ + Client for invoking AgentCore Runtime using boto3. + + This client handles initialization, invocation, and response processing + for the deployed AgentCore Runtime container. + """ + + def __init__(self): + """ + Initialize the AgentCore client with runtime ARN from environment variables. + + Raises: + AgentCoreConfigurationError: If required environment variables are missing + """ + self.agent_runtime_arn = os.environ.get(AGENT_RUNTIME_ARN_ENV_VAR) + + if not self.agent_runtime_arn: + error_msg = f"Missing required environment variable: {AGENT_RUNTIME_ARN_ENV_VAR}" + logger.error(error_msg) + raise AgentCoreConfigurationError(error_msg) + + try: + config = Config( + read_timeout=300, # 5 minutes for reading response + connect_timeout=10, # 10 seconds for initial connection + retries={"max_attempts": 3, "mode": "standard"}, + ) + self.client = boto3.client("bedrock-agentcore", config=config) + logger.info( + f"AgentCore client initialized with runtime ARN: {self.agent_runtime_arn} " + f"(read_timeout=300s, connect_timeout=10s)" + ) + except Exception as e: + error_msg = f"Failed to initialize bedrock-agentcore client: {str(e)}" + logger.error(error_msg) + raise AgentCoreConfigurationError(error_msg) from e + + @tracer.capture_method + def invoke_agent( + self, + input_text: str, + conversation_id: str, + user_id: str, + message_id: Optional[str] = None, + files: Optional[List[Dict[str, Any]]] = None, + ) -> Iterator[Dict[str, Any]]: + """ + Invoke the AgentCore Runtime and return streaming response. + + Args: + input_text: The user input text to send to the agent + conversation_id: The conversation ID for context + user_id: The user ID for personalization and access control + message_id: The message ID for this interaction + files: List of file references (optional) + + Returns: + Iterator[Dict[str, Any]]: Streaming chunks from AgentCore + + Raises: + AgentCoreInvocationError: If the invocation fails + """ + payload_dict = { + "conversationId": conversation_id, + "messageId": message_id or f"msg-{int(time.time() * 1000)}", + "input": input_text, + "userId": user_id, + } + + if files: + payload_dict[FILES_KEY] = files + + logger.debug(f"Payload extracted from event: {payload_dict}") + + try: + payload_bytes = json.dumps(payload_dict).encode("utf-8") + + response = self.client.invoke_agent_runtime( + agentRuntimeArn=self.agent_runtime_arn, + payload=payload_bytes, + contentType="application/json", + accept="application/json", + runtimeUserId=user_id, + runtimeSessionId=f"{conversation_id}_{user_id}", + ) + + logger.info(f"AgentCore invocation successful for conversation {conversation_id}") + + return self._process_response(response, conversation_id) + + except (ClientError, BotoCoreError) as e: + tracer_id = os.getenv("_X_AMZN_TRACE_ID") + error_msg = f"AgentCore boto3 error during invocation: {str(e)}" + logger.error(error_msg, xray_trace_id=tracer_id) + raise AgentCoreInvocationError(error_msg) from e + except Exception as e: + tracer_id = os.getenv("_X_AMZN_TRACE_ID") + error_msg = f"Unexpected error during AgentCore invocation: {str(e)}" + logger.error(error_msg, xray_trace_id=tracer_id) + raise AgentCoreInvocationError(error_msg) from e + + def _process_response(self, response: Dict[str, Any], conversation_id: str) -> Iterator[Dict[str, Any]]: + """ + Process AgentCore Runtime response and yield chunks. + + Supports both streaming (StreamingBody) and complete (dict/string) responses, + converting them into a uniform chunked iterator interface for consistent handling. + + Args: + response: Response from AgentCore Runtime (streaming or complete) + conversation_id: Conversation ID for logging context + + Yields: + Dict: Response chunks with 'type' and optional 'text' fields + """ + start_time = time.time() + logger.info(f"Starting response streaming for conversation {conversation_id}") + + try: + response_content = response.get("response") + + if not response_content: + logger.warning(f"No response content to stream for conversation {conversation_id}") + yield {"type": "completion"} + return + + if hasattr(response_content, "read"): + yield from self._process_streaming_response(response_content, conversation_id) + else: + yield from self._process_complete_response(response_content, conversation_id) + + yield {"type": "completion"} + + elapsed_time = time.time() - start_time + logger.info(f"Response streaming completed for conversation {conversation_id} in {elapsed_time:.2f}s") + + except Exception as e: + logger.error(f"Error during response streaming for conversation {conversation_id}: {str(e)}") + yield {"type": "error", "error": str(e)} + yield {"type": "completion"} + + def _process_streaming_response(self, response_content: Any, conversation_id: str) -> Iterator[Dict[str, Any]]: + """ + Process streaming response from AgentCore Runtime. + + Args: + response_content: StreamingBody response content + conversation_id: Conversation ID for logging context + + Yields: + Dict: Response chunks with 'type' and optional 'text' fields + """ + logger.info(f"Processing real StreamingBody for conversation {conversation_id}") + + try: + try: + yield from self._process_chunked_stream(response_content, conversation_id) + except TypeError: + yield from self._process_full_stream(response_content, conversation_id) + except Exception as e: + logger.error(f"Error reading StreamingBody for conversation {conversation_id}: {str(e)}") + yield {"type": "error", "error": str(e)} + + def _process_chunked_stream(self, response_content: Any, conversation_id: str) -> Iterator[Dict[str, Any]]: + """ + Process streaming response in chunks. + + Args: + response_content: StreamingBody response content + conversation_id: Conversation ID for logging context + + Yields: + Dict: Response chunks with 'type' and optional 'text' fields + """ + chunk_size = 1024 # Read 1KB at a time + buffer = "" + + while True: + chunk_bytes = response_content.read(chunk_size) + if not chunk_bytes: + break + + chunk_text = chunk_bytes.decode("utf-8") + buffer += chunk_text + + lines = buffer.split("\n") + buffer = lines[-1] + + for line in lines[:-1]: + yield from self._process_stream_line(line, conversation_id) + + if buffer.strip(): + yield from self._process_stream_line(buffer, conversation_id) + + def _process_stream_line(self, line: str, conversation_id: str) -> Iterator[Dict[str, Any]]: + """ + Process a single line from the stream. + + Args: + line: Line content to process + conversation_id: Conversation ID for logging context + + Yields: + Dict: Processed chunk if valid JSON + """ + if not line.strip(): + return + + try: + line_content = line.strip() + if line_content.startswith("data: "): + line_content = line_content[6:] + + chunk_data = json.loads(line_content) + processed_chunk = self._process_agentcore_chunk(chunk_data, conversation_id) + if processed_chunk: + yield processed_chunk + except json.JSONDecodeError: + logger.debug(f"Skipping non-JSON line: {line.strip()[:50]}") + + def _process_full_stream(self, response_content: Any, conversation_id: str) -> Iterator[Dict[str, Any]]: + """ + Process streaming response by reading all at once. + + Args: + response_content: StreamingBody response content + conversation_id: Conversation ID for logging context + + Yields: + Dict: Response chunks with 'type' and optional 'text' fields + """ + logger.info("StreamingBody doesn't support chunked reading, reading all at once") + response_text = response_content.read().decode("utf-8") + + try: + response_data = json.loads(response_text) + processed_chunk = self._process_agentcore_chunk(response_data, conversation_id) + if processed_chunk: + yield processed_chunk + except json.JSONDecodeError: + if response_text: + yield {"text": response_text, "type": "content"} + + def _process_complete_response(self, response_content: Any, conversation_id: str) -> Iterator[Dict[str, Any]]: + """ + Process complete (non-streaming) response. + + Args: + response_content: Complete response content + conversation_id: Conversation ID for logging context + + Yields: + Dict: Response chunks with 'type' and optional 'text' fields + """ + logger.info(f"Processing complete response for conversation {conversation_id}") + + if isinstance(response_content, dict): + yield from self._process_dict_response(response_content, conversation_id) + elif isinstance(response_content, str): + yield {"text": response_content, "type": "content"} + else: + yield {"text": str(response_content), "type": "content"} + + def _process_dict_response( + self, response_content: Dict[str, Any], conversation_id: str + ) -> Iterator[Dict[str, Any]]: + """ + Process dictionary response content. + + Args: + response_content: Dictionary response content + conversation_id: Conversation ID for logging context + + Yields: + Dict: Response chunks with 'type' and optional 'text' fields + """ + if response_content.get("type") == "error" or "error" in response_content: + processed_chunk = self._process_agentcore_chunk(response_content, conversation_id) + if processed_chunk: + yield processed_chunk + yield {"type": "completion"} + return + + for field in ["result", "text", "content", "message", "output"]: + if field in response_content and response_content[field]: + yield {"text": str(response_content[field]), "type": "content"} + yield {"type": "completion"} + return + + processed_chunk = self._process_agentcore_chunk(response_content, conversation_id) + if processed_chunk: + yield processed_chunk + yield {"type": "completion"} + + def _process_agentcore_chunk(self, chunk: Dict[str, Any], conversation_id: str) -> Optional[Dict[str, Any]]: + """ + Process an AgentCore Runtime chunk from the streaming response. + + Args: + chunk: Raw chunk from the AgentCore Runtime stream + conversation_id: Conversation ID for logging context + + Returns: + Processed chunk ready for WebSocket forwarding, or None if chunk should be skipped + """ + try: + if isinstance(chunk, dict): + return self._process_dict_chunk(chunk, conversation_id) + elif isinstance(chunk, str): + return {"text": chunk, "type": "content"} + else: + logger.debug(f"Unexpected AgentCore chunk type {type(chunk)} for conversation {conversation_id}") + return None + + except Exception as e: # pylint: disable=broad-except + logger.error( + "Error processing AgentCore chunk for conversation %s: %s", + conversation_id, + str(e), + ) + return None + + def _handle_error_chunk(self, chunk: Dict[str, Any], conversation_id: str) -> Dict[str, Any]: + """Handle error chunk processing.""" + error_msg = chunk.get("error", chunk.get("message", "Unknown error")) + logger.error(f"AgentCore error event received for conversation {conversation_id}: {error_msg}") + return {"type": "error", "error": error_msg, "message": chunk.get("message", error_msg)} + + def _handle_thinking_chunk(self, chunk: Dict[str, Any], conversation_id: str) -> Dict[str, Any]: + """Handle thinking chunk processing.""" + logger.debug(f"AgentCore thinking event received for conversation {conversation_id}: {chunk['thinking']}") + return chunk + + def _handle_tool_use_chunk(self, chunk: Dict[str, Any], conversation_id: str) -> Dict[str, Any]: + """Handle tool usage chunk processing.""" + logger.debug(f"AgentCore tool usage event received for conversation {conversation_id}: {chunk['toolUsage']}") + return chunk + + def _report_usage_metrics(self, usage_metadata: Dict[str, Any]) -> None: + """Report token usage metrics to CloudWatch.""" + logger.info(usage_metadata) + input_tokens = usage_metadata.get("inputTokens", 0) + output_tokens = usage_metadata.get("outputTokens", 0) + total_tokens = usage_metadata.get("totalTokens", 0) + + token_metrics = { + CloudWatchMetrics.LLM_INPUT_TOKEN_COUNT.value: input_tokens, + CloudWatchMetrics.LLM_OUTPUT_TOKEN_COUNT.value: output_tokens, + CloudWatchMetrics.LLM_TOTAL_TOKEN_COUNT.value: total_tokens, + } + + for metric_name, token_count in token_metrics.items(): + if token_count: + metrics.add_metric(name=metric_name, unit=MetricUnit.Count, value=int(token_count)) + metrics.flush_metrics() + + def _handle_completion_chunk(self, chunk: Dict[str, Any], conversation_id: str) -> Dict[str, Any]: + """Handle completion chunk processing.""" + logger.debug(f"AgentCore completion event received for conversation {conversation_id}") + + if "usage" in chunk: + self._report_usage_metrics(chunk["usage"]) + else: + logger.info("Bedrock usage metrics are not provided. Not reporting the metrics") + + # Preserve all fields from the completion chunk (including usage metadata) + completion_chunk = {"type": "completion"} + for key, value in chunk.items(): + if key != "type" and value is not None: + completion_chunk[key] = value + return completion_chunk + + def _process_dict_chunk(self, chunk: Dict[str, Any], conversation_id: str) -> Optional[Dict[str, Any]]: + """ + Process a dictionary chunk from AgentCore. + + Args: + chunk: Dictionary chunk from AgentCore + conversation_id: Conversation ID for logging context + + Returns: + Processed chunk or None + """ + # Check for error type FIRST before extracting content + if chunk.get("type") == "error" or "error" in chunk: + return self._handle_error_chunk(chunk, conversation_id) + + content_result = self._extract_content_from_chunk(chunk) + if content_result: + return content_result + + delta_result = self._extract_delta_from_chunk(chunk) + if delta_result: + return delta_result + + if chunk.get("type") == "thinking" and "thinking" in chunk: + return self._handle_thinking_chunk(chunk, conversation_id) + + if chunk.get("type") == "tool_use" and "toolUsage" in chunk: + return self._handle_tool_use_chunk(chunk, conversation_id) + + if chunk.get("type") == "completion" or chunk.get("event") == "done": + return self._handle_completion_chunk(chunk, conversation_id) + + logger.debug(f"Unrecognized AgentCore chunk format for conversation {conversation_id}: {list(chunk.keys())}") + return None + + def _extract_content_from_chunk(self, chunk: Dict[str, Any]) -> Optional[Dict[str, Any]]: + """ + Extract content from standard chunk fields. + + Args: + chunk: Dictionary chunk from AgentCore + + Returns: + Content chunk or None + """ + for field in ["result", "response", "text", "content", "message"]: + if field in chunk and chunk[field]: + return {"text": str(chunk[field]), "type": "content"} + return None + + def _extract_delta_from_chunk(self, chunk: Dict[str, Any]) -> Optional[Dict[str, Any]]: + """ + Extract delta updates from chunk. + + Args: + chunk: Dictionary chunk from AgentCore + + Returns: + Delta content chunk or None + """ + if "delta" not in chunk: + return None + + delta = chunk["delta"] + if isinstance(delta, dict) and "text" in delta: + return {"text": str(delta["text"]), "type": "content"} + elif isinstance(delta, str): + return {"text": delta, "type": "content"} + + return None diff --git a/source/lambda/agentcore-invocation/utils/constants.py b/source/lambda/agentcore-invocation/utils/constants.py new file mode 100644 index 00000000..8b98964f --- /dev/null +++ b/source/lambda/agentcore-invocation/utils/constants.py @@ -0,0 +1,58 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os +from enum import Enum + +USE_CASE_UUID_ENV_VAR = "USE_CASE_UUID" +WEBSOCKET_CALLBACK_URL_ENV_VAR = "WEBSOCKET_CALLBACK_URL" +TRACE_ID_ENV_VAR = "_X_AMZN_TRACE_ID" +AGENT_RUNTIME_ARN_ENV_VAR = "AGENT_RUNTIME_ARN" + +USE_CASE_UUID_SHORT = os.getenv(USE_CASE_UUID_ENV_VAR).split('-')[0] +METRICS_SERVICE_NAME = f"GAABUseCase-{USE_CASE_UUID_SHORT}" + +REQUEST_CONTEXT_KEY = "requestContext" +CONNECTION_ID_KEY = "connectionId" +MESSAGE_KEY = "message" +AUTH_TOKEN_KEY = "authToken" +CONVERSATION_ID_KEY = "conversationId" +INPUT_TEXT_KEY = "inputText" +USER_ID_KEY = "userId" +MESSAGE_ID_KEY = "messageId" +FILES_KEY = "files" + +END_CONVERSATION_TOKEN = "##END_CONVERSATION##" +KEEP_ALIVE_TOKEN = "##KEEP_ALIVE##" +PROCESSING_TOKEN = "##PROCESSING##" + +LAMBDA_REMAINING_TIME_THRESHOLD_MS = 20000 +KEEP_ALIVE_INTERVAL_SECONDS = 30 +PROCESSING_UPDATE_INTERVAL_SECONDS = 10 +MAX_STREAMING_DURATION_SECONDS = 300 + +AGENTCORE_REQUIRED_ENV_VARS = [ + USE_CASE_UUID_ENV_VAR, + WEBSOCKET_CALLBACK_URL_ENV_VAR, + TRACE_ID_ENV_VAR, + AGENT_RUNTIME_ARN_ENV_VAR, +] + + +class CloudWatchNamespaces(str, Enum): + """Supported CloudWatch Namespaces""" + + API_GATEWAY = "AWS/ApiGateway" + AWS_COGNITO = "AWS/Cognito" + AWS_BEDROCK_AGENT = "AWS/Bedrock/Agent" + COLD_STARTS = "Solution/ColdStarts" + AGENTCORE_INVOCATION = "Solution/AgentCoreInvocation" + AWS_BEDROCK = "AWS/Bedrock" + + +class CloudWatchMetrics(str, Enum): + """Supported Cloudwatch Metrics""" + + LLM_INPUT_TOKEN_COUNT = "InputTokenCount" + LLM_OUTPUT_TOKEN_COUNT = "OutputTokenCount" + LLM_TOTAL_TOKEN_COUNT = "TotalTokenCount" diff --git a/source/lambda/agentcore-invocation/utils/event_processor.py b/source/lambda/agentcore-invocation/utils/event_processor.py new file mode 100644 index 00000000..05b3c222 --- /dev/null +++ b/source/lambda/agentcore-invocation/utils/event_processor.py @@ -0,0 +1,172 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json +from typing import Any, Dict, List, Optional +from uuid import uuid4 + +from aws_lambda_powertools import Logger, Tracer +from utils.constants import ( + CONNECTION_ID_KEY, + CONVERSATION_ID_KEY, + FILES_KEY, + INPUT_TEXT_KEY, + MESSAGE_ID_KEY, + MESSAGE_KEY, + REQUEST_CONTEXT_KEY, + USER_ID_KEY, +) + +logger = Logger(utc=True) +tracer = Tracer() + + +class EventProcessorError(Exception): + """Base exception class for EventProcessor errors""" + + pass + + +class InvalidEventError(EventProcessorError): + """Exception raised when the event is invalid""" + + pass + + +class MissingDataError(EventProcessorError): + """Exception raised when required data is missing""" + + pass + + +class EventProcessor: + """This class is used to process a sqs event record whose body contains the requestContext and message + from the websocket route invocation + """ + + def __init__(self, event: Dict): + self.event = event + try: + self.event_body = json.loads(event.get("body", "{}")) + except json.JSONDecodeError as e: + logger.error(f"Failed to parse event body: {e}") + raise InvalidEventError("Invalid JSON in event body") from e + + if not isinstance(self.event_body, dict): + raise InvalidEventError("Event body must be a JSON object") + + def get_connection_id(self) -> Optional[str]: + """ + Retrieve the connection ID from the event. + + Returns: + Optional[str]: The connection ID, or None if not found in the event. + + Raises: + InvalidEventError: If the connection ID is not found in the event. + """ + try: + return self.event_body[REQUEST_CONTEXT_KEY][CONNECTION_ID_KEY] + except KeyError as e: + logger.error("Connection ID not found in event") + raise InvalidEventError("Connection ID not found in event") from e + + def get_message(self) -> Dict: + """ + Retrieve the message from the event body. + + Returns: + Dict: The message dictionary. + + Raises: + MissingDataError: If the message is not found in the event body. + """ + message = self.event_body.get(MESSAGE_KEY) + if not message: + logger.error("Message not found in event body") + raise MissingDataError("Message is required but not found in event body") + return message + + def get_input_text(self) -> str: + """ + Retrieve the input text from the message. + + Returns: + str: The input text, or an empty string if not found in the message. + """ + input_text = self.get_message().get(INPUT_TEXT_KEY, "") + return input_text if input_text is not None else "" + + def get_conversation_id(self) -> str: + """ + Retrieve the conversation ID from the message, or generate a new one if not found. + + Returns: + str: The conversation ID. + """ + conversation_id = self.get_message().get(CONVERSATION_ID_KEY, "") + if not conversation_id or (isinstance(conversation_id, str) and conversation_id.strip() == ""): + return str(uuid4()) + return conversation_id + + def get_user_id(self) -> Optional[str]: + """ + Retrieve the user ID from the event. + + Returns: + str: The user ID + """ + user_id = self.event_body.get(REQUEST_CONTEXT_KEY, {}).get("authorizer", {}).get("UserId") + if user_id is None: + logger.error("User ID not found in event") + raise InvalidEventError("User ID not found in event") + else: + return user_id + + def get_files(self) -> List[Dict[str, Any]]: + """ + Retrieve the files from the message. + + Returns: + List[Dict[str, Any]]: The files list. + """ + return self.get_message().get(FILES_KEY, []) + + def get_message_id(self) -> str: + """ + Retrieve the message ID from the WebSocket message payload. + + Returns: + str: The message ID from the event payload, or generates a new UUID if not found. + """ + message_id = self.get_message().get(MESSAGE_ID_KEY) + if not message_id: + return str(uuid4()) + return message_id + + @tracer.capture_method + def process(self) -> Dict: + """ + Process the event and return relevant information. + + Returns: + Dict: A dictionary containing the connection ID, conversation ID, + input text, user ID, and message ID + + Raises: + EventProcessorError: If any error occurs during event processing. + """ + try: + result = { + CONNECTION_ID_KEY: self.get_connection_id(), + CONVERSATION_ID_KEY: self.get_conversation_id(), + INPUT_TEXT_KEY: self.get_input_text(), + USER_ID_KEY: self.get_user_id(), + MESSAGE_ID_KEY: self.get_message_id(), + FILES_KEY: self.get_files(), + } + + return result + except EventProcessorError as e: + logger.error(f"Error processing event: {e}") + raise diff --git a/source/lambda/agentcore-invocation/utils/helper.py b/source/lambda/agentcore-invocation/utils/helper.py new file mode 100644 index 00000000..0f376b40 --- /dev/null +++ b/source/lambda/agentcore-invocation/utils/helper.py @@ -0,0 +1,115 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json +import boto3 +from datetime import datetime + +from aws_lambda_powertools import Logger, Metrics, Tracer + +from utils.constants import METRICS_SERVICE_NAME, CloudWatchNamespaces + +logger = Logger(utc=True) +tracer = Tracer() +_metrics_namespaces = dict() +_service_clients = dict() +_session = None + + +@tracer.capture_method +def get_session(): + """ + Get or create a boto3 session. + + Returns: + boto3.session.Session: The boto3 session + """ + global _session + if not _session: + _session = boto3.session.Session() + return _session + + +@tracer.capture_method +def get_service_client(service_name, **kwargs): + """ + Get or create a boto3 service client. + + This function manages a cache of service clients for different AWS services. + If a client for the requested service already exists, it returns the cached client. + Otherwise, it creates a new client, caches it, and then returns it. + + Args: + service_name (str): The name of the AWS service + **kwargs: Additional arguments to pass to the client constructor + + Returns: + boto3 client: The service client for the specified service + """ + global _service_clients + session = get_session() + + cache_key = f"{service_name}_{hash(frozenset(kwargs.items()) if kwargs else frozenset())}" + + if cache_key not in _service_clients: + logger.debug(f"Cache miss for {service_name}. Creating a new one and cache it") + _service_clients[cache_key] = session.client(service_name, **kwargs) + + return _service_clients[cache_key] + + +@tracer.capture_method +def get_metrics_client(namespace: CloudWatchNamespaces) -> Metrics: + """ + Retrieves or creates a Metrics client for the specified CloudWatch namespace. + + This function manages a cache of Metrics clients for different CloudWatch namespaces. + If a client for the requested namespace already exists, it returns the cached client. + Otherwise, it creates a new client, caches it, and then returns it. + + Args: + namespace (CloudWatchNamespaces): The CloudWatch namespace for which to get or create a Metrics client. + + Returns: + Metrics: A Metrics client object for the specified namespace. + + Note: + This function uses a global dictionary to cache Metrics clients. + It is decorated with @tracer.capture_method for performance monitoring. + + Example: + >>> client = get_metrics_client(CloudWatchNamespaces.AGENTCORE_INVOCATION) + >>> # Use the client to publish metrics + >>> client.put_metric(name="InvocationCount", unit=MetricUnit.Count, value=1) + """ + + global _metrics_namespaces + + if namespace not in _metrics_namespaces: + logger.debug(f"Cache miss for {namespace}. Creating a new cache entry.") + _metrics_namespaces[namespace] = Metrics(namespace=namespace.value, service=METRICS_SERVICE_NAME) + + return _metrics_namespaces[namespace] + + +def json_serializer(obj): + """ + Custom JSON serializer that handles datetime objects and other non-JSON serializable objects. + + Args: + obj: The object to serialize. + + Returns: + str or obj: ISO format string if obj is a datetime, original object if JSON serializable, + or string representation for non-serializable objects. + """ + if isinstance(obj, datetime): + return obj.isoformat() + try: + json.dumps(obj) + return obj + except Exception as ex: + logger.info( + f"Serializing failed for object: {obj}. Exception: {ex}. Converting the object into string for JSON dumps..." + ) + return str(obj) diff --git a/source/lambda/agentcore-invocation/utils/keep_alive_manager.py b/source/lambda/agentcore-invocation/utils/keep_alive_manager.py new file mode 100644 index 00000000..710acc3d --- /dev/null +++ b/source/lambda/agentcore-invocation/utils/keep_alive_manager.py @@ -0,0 +1,343 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import time +import threading +from typing import Callable, Optional +from aws_lambda_powertools import Logger + +from utils.constants import ( + KEEP_ALIVE_INTERVAL_SECONDS, + PROCESSING_UPDATE_INTERVAL_SECONDS, + MAX_STREAMING_DURATION_SECONDS, + KEEP_ALIVE_TOKEN, + PROCESSING_TOKEN, +) + +logger = Logger(utc=True) + + +class KeepAliveManager: + """ + Manages keep-alive and processing update messages for long-running AgentCore operations. + + This class handles: + 1. Keep-alive pings to maintain WebSocket connections + 2. Processing updates to inform users that work is ongoing + 3. Connection health monitoring during streaming + 4. Automatic cleanup when operations complete or timeout + """ + + def __init__(self, send_message_callback: Callable[[str, str, str, str], None]): + """ + Initialize the KeepAliveManager. + + Args: + send_message_callback: Function to send messages via WebSocket + Signature: (connection_id, conversation_id, message, message_id) -> None + """ + self.send_message_callback = send_message_callback + self.active_connections = {} # connection_id -> connection_info + self.keep_alive_thread = None + self.processing_thread = None + self.shutdown_event = threading.Event() + self.lock = threading.Lock() + + def start_keep_alive(self, connection_id: str, conversation_id: str, message_id: str) -> None: + """ + Start keep-alive monitoring for a connection during long-running operations. + + Args: + connection_id: WebSocket connection ID + conversation_id: Conversation ID + message_id: Message ID for response formatting + """ + with self.lock: + # Store connection info + self.active_connections[connection_id] = { + "conversation_id": conversation_id, + "message_id": message_id, + "start_time": time.time(), + "last_keep_alive": time.time(), + "last_processing_update": time.time(), + } + + logger.info(f"Started keep-alive monitoring for connection {connection_id}") + + # Start background threads if not already running + if self.keep_alive_thread is None or not self.keep_alive_thread.is_alive(): + self.shutdown_event.clear() + self.keep_alive_thread = threading.Thread(target=self._keep_alive_worker, daemon=True) + self.keep_alive_thread.start() + + if self.processing_thread is None or not self.processing_thread.is_alive(): + self.processing_thread = threading.Thread(target=self._processing_update_worker, daemon=True) + self.processing_thread.start() + + def stop_keep_alive(self, connection_id: str) -> None: + """ + Stop keep-alive monitoring for a specific connection. + + Args: + connection_id: WebSocket connection ID to stop monitoring + """ + with self.lock: + if connection_id in self.active_connections: + connection_info = self.active_connections.pop(connection_id) + duration = time.time() - connection_info["start_time"] + logger.info(f"Stopped keep-alive monitoring for connection {connection_id} after {duration:.2f}s") + + # If no more active connections, signal shutdown + if not self.active_connections: + self.shutdown_event.set() + + def update_activity(self, connection_id: str) -> None: + """ + Update the last activity time for a connection (called when content is sent). + + Args: + connection_id: WebSocket connection ID + """ + with self.lock: + if connection_id in self.active_connections: + self.active_connections[connection_id]["last_keep_alive"] = time.time() + self.active_connections[connection_id]["last_processing_update"] = time.time() + + def _keep_alive_worker(self) -> None: + """Background worker that sends keep-alive messages.""" + logger.info("Keep-alive worker started") + + while not self.shutdown_event.is_set(): + try: + current_time = time.time() + connections_to_process = self._get_connections_snapshot() + + for connection_id, connection_info in connections_to_process: + self._process_keep_alive_for_connection(connection_id, connection_info, current_time) + + self.shutdown_event.wait(5) # Check every 5 seconds + + except Exception as e: + logger.error(f"Error in keep-alive worker: {str(e)}") + self.shutdown_event.wait(5) + + logger.info("Keep-alive worker stopped") + + def _get_connections_snapshot(self) -> list: + """ + Get a snapshot of active connections to avoid holding lock too long. + + Returns: + List of (connection_id, connection_info) tuples + """ + with self.lock: + return list(self.active_connections.items()) + + def _process_keep_alive_for_connection( + self, connection_id: str, connection_info: dict, current_time: float + ) -> None: + """ + Process keep-alive for a single connection. + + Args: + connection_id: WebSocket connection ID + connection_info: Connection information dictionary + current_time: Current timestamp + """ + try: + if self._should_stop_connection(connection_id, connection_info, current_time): + return + + if self._should_send_keep_alive(connection_info, current_time): + self._send_keep_alive_message(connection_id, connection_info) + self._update_keep_alive_time(connection_id, current_time) + + except Exception as e: + logger.error(f"Error sending keep-alive for connection {connection_id}: {str(e)}") + self.stop_keep_alive(connection_id) + + def _should_stop_connection(self, connection_id: str, connection_info: dict, current_time: float) -> bool: + """ + Check if connection should be stopped due to timeout. + + Args: + connection_id: WebSocket connection ID + connection_info: Connection information dictionary + current_time: Current timestamp + + Returns: + True if connection should be stopped + """ + if current_time - connection_info["start_time"] > MAX_STREAMING_DURATION_SECONDS: + logger.warning(f"Connection {connection_id} exceeded maximum streaming duration, stopping keep-alive") + self.stop_keep_alive(connection_id) + return True + return False + + def _should_send_keep_alive(self, connection_info: dict, current_time: float) -> bool: + """ + Check if keep-alive message should be sent. + + Args: + connection_info: Connection information dictionary + current_time: Current timestamp + + Returns: + True if keep-alive should be sent + """ + return current_time - connection_info["last_keep_alive"] >= KEEP_ALIVE_INTERVAL_SECONDS + + def _update_keep_alive_time(self, connection_id: str, current_time: float) -> None: + """ + Update the last keep-alive time for a connection. + + Args: + connection_id: WebSocket connection ID + current_time: Current timestamp + """ + with self.lock: + if connection_id in self.active_connections: + self.active_connections[connection_id]["last_keep_alive"] = current_time + + def _processing_update_worker(self) -> None: + """Background worker that sends processing update messages.""" + logger.info("Processing update worker started") + + while not self.shutdown_event.is_set(): + try: + current_time = time.time() + connections_to_process = [] + + with self.lock: + connections_to_process = list(self.active_connections.items()) + + for connection_id, connection_info in connections_to_process: + try: + if ( + current_time - connection_info["last_processing_update"] + >= PROCESSING_UPDATE_INTERVAL_SECONDS + ): + self._send_processing_update(connection_id, connection_info) + + # Update last processing update time + with self.lock: + if connection_id in self.active_connections: + self.active_connections[connection_id]["last_processing_update"] = current_time + + except Exception as e: + logger.error(f"Error sending processing update for connection {connection_id}: {str(e)}") + # Remove problematic connection + self.stop_keep_alive(connection_id) + + # Sleep for a short interval before next check + self.shutdown_event.wait(3) # Check every 3 seconds + + except Exception as e: + logger.error(f"Error in processing update worker: {str(e)}") + self.shutdown_event.wait(3) + + logger.info("Processing update worker stopped") + + def _send_keep_alive_message(self, connection_id: str, connection_info: dict) -> None: + """ + Send a keep-alive message to maintain WebSocket connection. + + Args: + connection_id: WebSocket connection ID + connection_info: Connection information dictionary + """ + try: + self.send_message_callback( + connection_id, connection_info["conversation_id"], KEEP_ALIVE_TOKEN, connection_info["message_id"] + ) + logger.debug(f"Sent keep-alive message to connection {connection_id}") + except Exception as e: + logger.error(f"Failed to send keep-alive message to connection {connection_id}: {str(e)}") + raise + + def _send_processing_update(self, connection_id: str, connection_info: dict) -> None: + """ + Send a processing update to inform the user that work is ongoing. + + Args: + connection_id: WebSocket connection ID + connection_info: Connection information dictionary + """ + try: + duration = time.time() - connection_info["start_time"] + self.send_message_callback( + connection_id, connection_info["conversation_id"], PROCESSING_TOKEN, connection_info["message_id"] + ) + logger.debug(f"Sent processing update to connection {connection_id} (duration: {duration:.1f}s)") + except Exception as e: + logger.error(f"Failed to send processing update to connection {connection_id}: {str(e)}") + raise + + def get_connection_status(self, connection_id: str) -> Optional[dict]: + """ + Get status information for a connection. + + Args: + connection_id: WebSocket connection ID + + Returns: + Dict with connection status or None if not found + """ + with self.lock: + if connection_id in self.active_connections: + connection_info = self.active_connections[connection_id].copy() + current_time = time.time() + connection_info["duration"] = current_time - connection_info["start_time"] + connection_info["time_since_last_keep_alive"] = current_time - connection_info["last_keep_alive"] + connection_info["time_since_last_processing_update"] = ( + current_time - connection_info["last_processing_update"] + ) + return connection_info + return None + + def cleanup_all(self) -> None: + """Clean up all active connections and stop background threads.""" + logger.info("Cleaning up all keep-alive connections") + + with self.lock: + connection_count = len(self.active_connections) + self.active_connections.clear() + + self.shutdown_event.set() + + # Wait for threads to finish + if self.keep_alive_thread and self.keep_alive_thread.is_alive(): + self.keep_alive_thread.join(timeout=5) + + if self.processing_thread and self.processing_thread.is_alive(): + self.processing_thread.join(timeout=5) + + logger.info(f"Cleaned up {connection_count} keep-alive connections") + + def get_active_connection_count(self) -> int: + """Get the number of active connections being monitored.""" + with self.lock: + return len(self.active_connections) + + +# Global keep-alive manager instance +_keep_alive_manager = None + + +def get_keep_alive_manager(send_message_callback: Callable[[str, str, str, str], None]) -> KeepAliveManager: + """ + Get or create the global KeepAliveManager instance. + + Args: + send_message_callback: Function to send messages via WebSocket + + Returns: + KeepAliveManager: The global keep-alive manager instance + """ + global _keep_alive_manager + + if _keep_alive_manager is None: + _keep_alive_manager = KeepAliveManager(send_message_callback) + logger.info("Initialized global KeepAliveManager") + + return _keep_alive_manager diff --git a/source/lambda/agentcore-invocation/utils/websocket_error_handler.py b/source/lambda/agentcore-invocation/utils/websocket_error_handler.py new file mode 100644 index 00000000..06a22b0e --- /dev/null +++ b/source/lambda/agentcore-invocation/utils/websocket_error_handler.py @@ -0,0 +1,106 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json +import os +from typing import Optional + +from aws_lambda_powertools import Logger + +from utils.constants import ( + CONVERSATION_ID_KEY, + END_CONVERSATION_TOKEN, + MESSAGE_ID_KEY, + TRACE_ID_ENV_VAR, + WEBSOCKET_CALLBACK_URL_ENV_VAR, +) +from utils.helper import get_service_client + +logger = Logger(utc=True) + + +class WebsocketErrorHandler: + """ + WebsocketErrorHandler is used to send error responses to a websocket client for a provided connection ID. + + Attributes: + connection_url (str): The connection URL for the websocket client. + connection_id (str): The connection ID for the websocket client, retrieved from the event object + trace_id (Optional[str]): The x-ray trace ID to track the request in x-ray. + client (botocore.client): client that establishes the connection to the websocket API + message_id (Optional[str]): The message ID to include in the response. For erroring messages, unless provided, this will be set as None. + + Methods: + post_token_to_connection(payload): Sends a payload to the client that is connected to a websocket. + format_response(payload): Formats the payload in a format that the websocket accepts + """ + + def __init__( + self, connection_id: str, conversation_id: str, trace_id: Optional[str], message_id: Optional[str] = None + ) -> None: + self._connection_url = os.environ.get(WEBSOCKET_CALLBACK_URL_ENV_VAR) + self._connection_id = connection_id + self._trace_id = trace_id + self._client = get_service_client("apigatewaymanagementapi", endpoint_url=self.connection_url) + self._conversation_id = conversation_id + self._message_id = message_id + + @property + def connection_url(self) -> str: + return self._connection_url + + @property + def trace_id(self) -> str: + return self._trace_id + + @property + def client(self) -> str: + return self._client + + @property + def connection_id(self) -> str: + return self._connection_id + + @property + def message_id(self) -> Optional[str]: + return self._message_id + + @property + def conversation_id(self) -> Optional[str]: + return self._conversation_id + + def post_token_to_connection(self, payload) -> None: + """ + Sends an error payload to the client that is connected to a websocket. Also sends an END_CONVERSATION_TOKEN once the + message payload ends + + Args: + payload (str): Token to send to the client. + + Raises: + Exception: if there is an error posting the payload to the connection + """ + try: + self.client.post_to_connection( + ConnectionId=self.connection_id, Data=self.format_response(errorMessage=payload, traceId=self.trace_id) + ) + self.client.post_to_connection( + ConnectionId=self.connection_id, Data=self.format_response(data=END_CONVERSATION_TOKEN) + ) + except Exception as ex: + logger.error( + f"Error sending token to connection {self.connection_id}: {ex}", + xray_trace_id=os.environ.get(TRACE_ID_ENV_VAR), + ) + raise ex + + def format_response(self, **kwargs) -> str: + """ + Formats the payload in a format that the websocket accepts + + Args: + kwargs: The keyword arguments which will be converted to a json string + """ + response_dict = {CONVERSATION_ID_KEY: self.conversation_id, MESSAGE_ID_KEY: self.message_id} + response_dict.update(kwargs) + return json.dumps(response_dict) diff --git a/source/lambda/chat/poetry.lock b/source/lambda/chat/poetry.lock index 05c6f104..f5aeeb2a 100644 --- a/source/lambda/chat/poetry.lock +++ b/source/lambda/chat/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -78,27 +78,27 @@ wrapt = "*" [[package]] name = "boto3" -version = "1.40.15" +version = "1.40.53" description = "The AWS SDK for Python" optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "boto3-1.40.15-py3-none-any.whl", hash = "sha256:52b8aa78c9906c4e49dcec6817c041df33c9825073bf66e7df8fc00afbe47b4b"}, - {file = "boto3-1.40.15.tar.gz", hash = "sha256:271b379ce5ad35ca82f1009e917528a182eed0e2de197ccffb0c51acadec5c79"}, + {file = "boto3-1.40.53-py3-none-any.whl", hash = "sha256:65ded2738de259bd9030feb4772ec7b53d5b661befa88ce836117c3df8265309"}, + {file = "boto3-1.40.53.tar.gz", hash = "sha256:3f8cf56034cfde20dd0abca01349f64ab65734d90c3fbf7357e8a84cb64a62ee"}, ] [package.dependencies] -botocore = ">=1.40.15,<1.41.0" +botocore = ">=1.40.53,<1.41.0" jmespath = ">=0.7.1,<2.0.0" -s3transfer = ">=0.13.0,<0.14.0" +s3transfer = ">=0.14.0,<0.15.0" [package.extras] crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-layer" -version = "3.0.7" +version = "4.0.0" description = "Layer for AWS Boto3 python SDK" optional = false python-versions = "^3.13" @@ -107,8 +107,8 @@ files = [] develop = true [package.dependencies] -boto3 = "1.40.15" -botocore = "1.40.15" +boto3 = "1.40.53" +botocore = "1.40.53" urllib3 = "2.5.0" [package.source] @@ -117,14 +117,14 @@ url = "../layers/aws_boto3" [[package]] name = "botocore" -version = "1.40.15" +version = "1.40.53" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "botocore-1.40.15-py3-none-any.whl", hash = "sha256:b364e039d2b67e509cfb089cb39b295251e48a60cc68fd591defbe10b44d83f9"}, - {file = "botocore-1.40.15.tar.gz", hash = "sha256:4960800e4c5a7b43db22550979c22f5a324cbaf75ef494bbb2cf400ef1e6aca7"}, + {file = "botocore-1.40.53-py3-none-any.whl", hash = "sha256:840322b0af4be7a6e2effddb4eb388053c25af0618f627f37d8b03cc1edbc928"}, + {file = "botocore-1.40.53.tar.gz", hash = "sha256:4ebb9e6648c4896d3f0cdda5ff30b5de9a83aeb591be89a16f98cc5ee3cd371c"}, ] [package.dependencies] @@ -493,7 +493,7 @@ test-randomorder = ["pytest-randomly"] [[package]] name = "custom-boto3-init" -version = "3.0.7" +version = "4.0.0" description = "Initialize boto config for AWS Python SDK with custom configuration" optional = false python-versions = "^3.13" @@ -817,28 +817,28 @@ tools = ["beautifulsoup4 (>=4.13.4)", "bedrock-agentcore (>=0.1.0) ; python_vers [[package]] name = "langchain-core" -version = "0.3.74" +version = "0.3.80" description = "Building applications with LLMs through composability" optional = false -python-versions = ">=3.9" +python-versions = "<4.0.0,>=3.9.0" groups = ["test"] files = [ - {file = "langchain_core-0.3.74-py3-none-any.whl", hash = "sha256:088338b5bc2f6a66892f9afc777992c24ee3188f41cbc603d09181e34a228ce7"}, - {file = "langchain_core-0.3.74.tar.gz", hash = "sha256:ff604441aeade942fbcc0a3860a592daba7671345230c2078ba2eb5f82b6ba76"}, + {file = "langchain_core-0.3.80-py3-none-any.whl", hash = "sha256:2141e3838d100d17dce2359f561ec0df52c526bae0de6d4f469f8026c5747456"}, + {file = "langchain_core-0.3.80.tar.gz", hash = "sha256:29636b82513ab49e834764d023c4d18554d3d719a185d37b019d0a8ae948c6bb"}, ] [package.dependencies] -jsonpatch = ">=1.33,<2.0" -langsmith = ">=0.3.45" -packaging = ">=23.2" -pydantic = ">=2.7.4" -PyYAML = ">=5.3" +jsonpatch = ">=1.33.0,<2.0.0" +langsmith = ">=0.3.45,<1.0.0" +packaging = ">=23.2.0,<26.0.0" +pydantic = ">=2.7.4,<3.0.0" +PyYAML = ">=5.3.0,<7.0.0" tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<10.0.0" -typing-extensions = ">=4.7" +typing-extensions = ">=4.7.0,<5.0.0" [[package]] name = "langchain-layer" -version = "3.0.7" +version = "4.0.0" description = "Layer for LangChain libraries" optional = false python-versions = "^3.13" @@ -849,7 +849,7 @@ develop = true [package.dependencies] langchain = "0.3.27" langchain-aws = "0.2.31" -langchain-core = "0.3.74" +langchain-core = "0.3.80" numpy = "2.2.2" pydantic = "2.11.0" requests = "2.32.4" @@ -1595,14 +1595,14 @@ tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asy [[package]] name = "s3transfer" -version = "0.13.1" +version = "0.14.0" description = "An Amazon S3 Transfer Manager" optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "s3transfer-0.13.1-py3-none-any.whl", hash = "sha256:a981aa7429be23fe6dfc13e80e4020057cbab622b08c0315288758d67cabc724"}, - {file = "s3transfer-0.13.1.tar.gz", hash = "sha256:c3fdba22ba1bd367922f27ec8032d6a1cf5f10c934fb5d68cf60fd5a23d936cf"}, + {file = "s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456"}, + {file = "s3transfer-0.14.0.tar.gz", hash = "sha256:eff12264e7c8b4985074ccce27a3b38a485bb7f7422cc8046fee9be4983e4125"}, ] [package.dependencies] diff --git a/source/lambda/chat/pyproject.toml b/source/lambda/chat/pyproject.toml index ec4350ff..902c315e 100644 --- a/source/lambda/chat/pyproject.toml +++ b/source/lambda/chat/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "llm-chat-lambda" -version = "3.0.7" +version = "4.0.0" authors = [ "Amazon Web Services" ] description = "Lambda implementation for chat feature" packages = [ diff --git a/source/lambda/custom-authorizer/package-lock.json b/source/lambda/custom-authorizer/package-lock.json index 9c65cdb2..d1db1c08 100644 --- a/source/lambda/custom-authorizer/package-lock.json +++ b/source/lambda/custom-authorizer/package-lock.json @@ -1,12 +1,12 @@ { "name": "@amzn/custom-authorizer", - "version": "3.0.7", + "version": "4.0.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@amzn/custom-authorizer", - "version": "3.0.7", + "version": "4.0.0", "license": "Apache-2.0", "dependencies": { "aws-jwt-verify": "^4.0.1", @@ -20,7 +20,7 @@ "aws-sdk-client-mock": "^4.1.0", "eslint": "^9.16.0", "jest": "^29.7.0", - "prettier": "^3.4.2", + "prettier": "^3.6.2", "ts-jest": "^29.2.5", "ts-node": "^10.9.2", "typescript": "^5.7.2" @@ -780,10 +780,11 @@ } }, "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -3658,9 +3659,9 @@ "dev": true }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -4231,9 +4232,9 @@ } }, "node_modules/prettier": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.4.2.tgz", - "integrity": "sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ==", + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", "dev": true, "bin": { "prettier": "bin/prettier.cjs" @@ -5588,9 +5589,9 @@ } }, "js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, "requires": { "argparse": "^1.0.7", @@ -7739,9 +7740,9 @@ "dev": true }, "js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "requires": { "argparse": "^2.0.1" @@ -8183,9 +8184,9 @@ "dev": true }, "prettier": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.4.2.tgz", - "integrity": "sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ==", + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", "dev": true }, "pretty-format": { diff --git a/source/lambda/custom-authorizer/package.json b/source/lambda/custom-authorizer/package.json index a7c54aae..b137548a 100644 --- a/source/lambda/custom-authorizer/package.json +++ b/source/lambda/custom-authorizer/package.json @@ -1,6 +1,6 @@ { "name": "@amzn/custom-authorizer", - "version": "3.0.7", + "version": "4.0.0", "description": "This lambda function is used as a custom REQUEST authorizer for APIs", "main": "rest-handler.ts", "scripts": { @@ -13,7 +13,7 @@ "code-linter-js": "./node_modules/eslint/bin/eslint.js lambda --ext .js", "code-linter-ts": "./node_modules/eslint/bin/eslint.js bin lib --ext .ts", "code-linter": "npm run code-linter-ts && npm run code-linter-js", - "code-formatter": "./node_modules/prettier/bin-prettier.js --config .prettierrc.yml '**/*.ts' '**/*.js' --write" + "code-formatter": "prettier --config ../../../.prettierrc.yml --ignore-path ../../../.prettierignore --write '**/*.{js,ts,json,css,md}'" }, "author": { "name": "Amazon Web Services", @@ -32,7 +32,7 @@ "aws-sdk-client-mock": "^4.1.0", "eslint": "^9.16.0", "jest": "^29.7.0", - "prettier": "^3.4.2", + "prettier": "^3.6.2", "ts-jest": "^29.2.5", "ts-node": "^10.9.2", "typescript": "^5.7.2" diff --git a/source/lambda/custom-authorizer/rest-authorizer.ts b/source/lambda/custom-authorizer/rest-authorizer.ts index d3c1fefe..8bdcc0e4 100644 --- a/source/lambda/custom-authorizer/rest-authorizer.ts +++ b/source/lambda/custom-authorizer/rest-authorizer.ts @@ -3,6 +3,7 @@ import { AuthResponse, APIGatewayRequestAuthorizerEvent } from 'aws-lambda'; import { CognitoJwtVerifier } from 'aws-jwt-verify'; +import { AWSClientManager } from 'aws-sdk-lib'; import { denyAllPolicy, getPolicyDocument } from './utils/get-policy'; import { matchArnWithValidation } from './utils/match-policy'; import { @@ -11,7 +12,7 @@ import { } from '@aws-sdk/client-cognito-identity-provider'; import { jwtDecode } from 'jwt-decode'; -const cognitoClient = new CognitoIdentityProviderClient({}); +const cognitoClient = AWSClientManager.getServiceClient('cognito'); /** * Cognito JWT verifier to validate incoming APIGateway websocket authorization request. diff --git a/source/lambda/custom-authorizer/test/authorizer.test.ts b/source/lambda/custom-authorizer/test/authorizer.test.ts index 62b60305..7bdc5695 100644 --- a/source/lambda/custom-authorizer/test/authorizer.test.ts +++ b/source/lambda/custom-authorizer/test/authorizer.test.ts @@ -1,6 +1,10 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn(() => ({ region: 'us-east-1' })) +})); + import { APIGatewayRequestAuthorizerEvent } from 'aws-lambda'; import { mockValidWebsocketRequestEvent, diff --git a/source/lambda/custom-authorizer/tsconfig.json b/source/lambda/custom-authorizer/tsconfig.json index bee9787e..6773764c 100644 --- a/source/lambda/custom-authorizer/tsconfig.json +++ b/source/lambda/custom-authorizer/tsconfig.json @@ -29,6 +29,9 @@ "moduleResolution": "Node", "rootDir": ".", "paths": { + "aws-sdk-lib": [ + "../layers/aws-sdk-lib/dist" + ], "aws-node-user-agent-config": [ "../layers/aws-node-user-agent-config/dist" ], diff --git a/source/lambda/custom-authorizer/utils/get-policy.ts b/source/lambda/custom-authorizer/utils/get-policy.ts index e5f5ede4..7d59f2a8 100644 --- a/source/lambda/custom-authorizer/utils/get-policy.ts +++ b/source/lambda/custom-authorizer/utils/get-policy.ts @@ -5,7 +5,7 @@ import { DynamoDBClient } from '@aws-sdk/client-dynamodb'; import { BatchGetCommand, BatchGetCommandOutput, DynamoDBDocumentClient } from '@aws-sdk/lib-dynamodb'; import { CognitoAccessTokenPayload } from 'aws-jwt-verify/jwt-model'; import { AuthResponse } from 'aws-lambda'; -import { customAwsConfig } from 'aws-node-user-agent-config'; +import { AWSClientManager } from 'aws-sdk-lib'; /** * Function that generates a IAM policy that denies all requests. @@ -79,7 +79,7 @@ export const getPolicyDocument = async (idToken: CognitoAccessTokenPayload): Pro * @returns */ export const batchQueryDynamoDB = async (tableName: string, groups: string[]): Promise => { - const ddbClient = new DynamoDBClient(customAwsConfig()); + const ddbClient = AWSClientManager.getServiceClient('dynamodb'); const ddbDocClient = DynamoDBDocumentClient.from(ddbClient); const result = await ddbDocClient.send( diff --git a/source/lambda/custom-resource/lambda_func.py b/source/lambda/custom-resource/lambda_func.py index ead1e4ed..50ca86c8 100644 --- a/source/lambda/custom-resource/lambda_func.py +++ b/source/lambda/custom-resource/lambda_func.py @@ -10,20 +10,30 @@ from custom_config import DEFAULT_APP_NAME from operations import ( admin_policy, - anonymous_metrics, + agentcore_oauth_client, + agentcore_outbound_permissions, + send_metrics, copy_model_info_to_ddb, copy_web_ui, cw_log_retention, - update_llm_config, + deploy_agent_core, + deploy_agent_core_memory, + deploy_mcp_gateway, + deploy_mcp_runtime, gen_domain_prefix, + gen_ecr_repo_prefix, gen_uuid, get_arns_for_inference_profile, get_compatible_azs, + lambda_version_generator, + multimodal_bucket_notifications, operation_types, redeploy_api, + update_llm_config, update_s3_policy, use_case_policy, webconfig, + sleep ) from operations.operation_types import FAILED, RESOURCE, RESOURCE_PROPERTIES @@ -35,7 +45,8 @@ # A dictionary for all custom resource operations invoked from CloudFormation operations_dictionary = { operation_types.GEN_UUID: gen_uuid.execute, - operation_types.ANONYMOUS_METRIC: anonymous_metrics.execute, + operation_types.METRIC: send_metrics.execute, + operation_types.ANONYMOUS_METRIC: send_metrics.execute, # Support deletion of existing resources with this type. operation_types.WEBCONFIG: webconfig.execute, operation_types.COPY_WEB_UI: copy_web_ui.execute, operation_types.UPDATE_BUCKET_POLICY: update_s3_policy.execute, @@ -44,10 +55,20 @@ operation_types.COPY_MODEL_INFO: copy_model_info_to_ddb.execute, operation_types.GET_COMPATIBLE_AZS: get_compatible_azs.execute, operation_types.GEN_DOMAIN_PREFIX: gen_domain_prefix.execute, + operation_types.GEN_ECR_REPO_PREFIX: gen_ecr_repo_prefix.execute, operation_types.CW_LOG_RETENTION: cw_log_retention.execute, operation_types.UPDATE_LLM_CONFIG: update_llm_config.execute, operation_types.GET_MODEL_RESOURCE_ARNS: get_arns_for_inference_profile.execute, operation_types.REDEPLOY_API: redeploy_api.execute, + operation_types.DEPLOY_AGENT_CORE: deploy_agent_core.execute, + operation_types.DEPLOY_AGENT_CORE_MEMORY: deploy_agent_core_memory.execute, + operation_types.DEPLOY_MCP_GATEWAY: deploy_mcp_gateway.execute, + operation_types.DEPLOY_MCP_RUNTIME: deploy_mcp_runtime.execute, + operation_types.LAMBDA_VERSION_GENERATOR: lambda_version_generator.execute, + operation_types.AGENTCORE_OAUTH_CLIENT: agentcore_oauth_client.execute, + operation_types.AGENTCORE_OUTBOUND_PERMISSIONS: agentcore_outbound_permissions.execute, + operation_types.MULTIMODAL_BUCKET_NOTIFICATIONS: multimodal_bucket_notifications.execute, + operation_types.SLEEP: sleep.execute, } diff --git a/source/lambda/custom-resource/lambda_ops_metrics.py b/source/lambda/custom-resource/lambda_ops_metrics.py index bd6a6276..fd484d04 100644 --- a/source/lambda/custom-resource/lambda_ops_metrics.py +++ b/source/lambda/custom-resource/lambda_ops_metrics.py @@ -7,6 +7,7 @@ from custom_config import DEFAULT_APP_NAME from utils.constants import PUBLISH_METRICS_PERIOD_IN_SECONDS, USE_CASE_UUID_ENV_VAR from utils.data import BuilderMetrics +from utils.lambda_context_parser import get_invocation_account_id from utils.metrics import push_builder_metrics, verify_env_setup from utils.metrics_payload import get_metrics_payload @@ -19,7 +20,7 @@ @metrics.log_metrics(capture_cold_start_metric=True) # type: ignore @tracer.capture_lambda_handler @logger.inject_lambda_context -def handler(*_): +def handler(event, context): try: verify_env_setup() metric_data = get_metrics_payload(PUBLISH_METRICS_PERIOD_IN_SECONDS) @@ -28,9 +29,10 @@ def handler(*_): logger.info("Skipping metrics publishing — all metric values are empty.") return + account_id = get_invocation_account_id(context) builder_metrics = BuilderMetrics( - USE_CASE_UUID, os.environ["SOLUTION_ID"], os.environ["SOLUTION_VERSION"], metric_data + USE_CASE_UUID, os.environ["SOLUTION_ID"], os.environ["SOLUTION_VERSION"], metric_data, account_id ) push_builder_metrics(builder_metrics) except Exception as ex: - logger.error(f"Error occurred when sending cloudwatch anonymous metrics, Error is {ex}") + logger.error(f"Error occurred when sending cloudwatch metrics, Error is {ex}") diff --git a/source/lambda/custom-resource/operations/agentcore_oauth_client.py b/source/lambda/custom-resource/operations/agentcore_oauth_client.py new file mode 100644 index 00000000..7064c60d --- /dev/null +++ b/source/lambda/custom-resource/operations/agentcore_oauth_client.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import uuid + +from aws_lambda_powertools import Logger, Tracer +from helper import get_service_client +from cfn_response import send_response +from operations import operation_types +from operations.operation_types import FAILED, PHYSICAL_RESOURCE_ID, RESOURCE, RESOURCE_PROPERTIES, SUCCESS + +logger = Logger(utc=True) +tracer = Tracer() + +# Required keys in the incoming event object +CLIENT_ID = "CLIENT_ID" +CLIENT_SECRET = "CLIENT_SECRET" +DISCOVERY_URL = "DISCOVERY_URL" +PROVIDER_NAME = "PROVIDER_NAME" +AWS_REGION = "AWS_REGION" + +# other constants +OPERATION_TYPE = operation_types.AGENTCORE_OAUTH_CLIENT + + +class AgentCoreIdentityError(Exception): + """Raised when AgentCore Identity API calls fail""" + pass + + +@tracer.capture_method +def verify_env_setup(event): + """This method verifies if all the necessary properties are correctly set in the event object as received by the lambda function's handler + + Args: + event (LambdaEvent): An event received by the lambda function that is passed by AWS services when invoking the function's handler + + Raises: + ValueError: If any of the properties in the custom resource properties are not set correctly or are not available + """ + if event[RESOURCE_PROPERTIES][RESOURCE] != OPERATION_TYPE: + err_msg = f"Operation type not available or did not match from the request. Expecting operation type to be {OPERATION_TYPE}" + logger.error(err_msg) + raise ValueError(err_msg) + + required_fields = [CLIENT_ID, CLIENT_SECRET, DISCOVERY_URL, PROVIDER_NAME] + for field in required_fields: + if event[RESOURCE_PROPERTIES].get(field, None) in ["", None]: + err_msg = f"{field} has not been passed. Hence operation cannot be performed" + logger.error(err_msg) + raise ValueError(err_msg) + + +@tracer.capture_method +def create(event, context): + """This method creates an OAuth2 credential provider using the AgentCore Identity client. + + Args: + event (LambdaEvent): An event object received by the lambda function that is passed by AWS services when invoking the function's handler + context (LambdaContext): A context object received by the lambda function that is passed by AWS services when invoking the function's handler + + Returns: + dict: Response containing the provider ID + + Raises: + AgentCoreIdentityError: If the API call to create OAuth client fails + """ + client_id = event[RESOURCE_PROPERTIES][CLIENT_ID] + client_secret = event[RESOURCE_PROPERTIES][CLIENT_SECRET] + discovery_url = event[RESOURCE_PROPERTIES][DISCOVERY_URL] + provider_name = event[RESOURCE_PROPERTIES][PROVIDER_NAME] + + try: + bedrock_agentcore_client = get_service_client("bedrock-agentcore-control") + bedrock_agentcore_client.create_oauth2_credential_provider( + name=provider_name, + credentialProviderVendor="CustomOauth2", + oauth2ProviderConfigInput={ + "customOauth2ProviderConfig": { + "oauthDiscovery": { + "discoveryUrl": discovery_url + }, + "clientId": client_id, + "clientSecret": client_secret, + } + } + ) + + logger.info(f"Successfully created OAuth2 provider {provider_name}") + + except Exception as error: + logger.error(f"Error occurred when creating OAuth2 provider, error is {error}") + raise AgentCoreIdentityError(f"Failed to create OAuth2 provider: {error}") from error + + +@tracer.capture_method +def delete(event, context): + """This method deletes the OAuth2 credential provider using bedrock-agentcore-control client. + + Args: + event (LambdaEvent): An event object received by the lambda function that is passed by AWS services when invoking the function's handler + context (LambdaContext): A context object received by the lambda function that is passed by AWS services when invoking the function's handler + + Raises: + AgentCoreIdentityError: If the API call to delete OAuth client fails + """ + provider_name = event[RESOURCE_PROPERTIES][PROVIDER_NAME] + + try: + bedrock_agentcore_client = get_service_client("bedrock-agentcore-control") + bedrock_agentcore_client.delete_oauth2_credential_provider(name=provider_name) + logger.info(f"Successfully deleted OAuth2 provider {provider_name}") + + except Exception as error: + logger.error(f"Error occurred when deleting OAuth2 provider, error is {error}") + raise AgentCoreIdentityError(f"Failed to delete OAuth2 provider: {error}") from error + + +@tracer.capture_method +def execute(event, context): + """This sub-module implements creation and deletion of AgentCore OAuth2 credential providers. + + Args: + event (LambdaEvent): An event object received by the lambda function that is passed by AWS services when invoking the function's handler + context (LambdaContext): A context object received by the lambda function that is passed by AWS services when invoking the function's handler + + Raises: + Exception: if there are any errors in creating or deleting OAuth client. During the handling of this exception it also sends a 'FAILED' status to + the AWS Cloudformation service. + """ + physical_resource_id = event.get(PHYSICAL_RESOURCE_ID, uuid.uuid4().hex[:8]) + + try: + verify_env_setup(event) + + if event["RequestType"] == "Create": + create(event, context) + send_response(event, context, SUCCESS, {}, physical_resource_id) + elif event["RequestType"] == "Delete": + delete(event, context) + send_response(event, context, SUCCESS, {}, physical_resource_id) + else: + logger.info(f"Operation type {event['RequestType']} is a no-op operation.") + send_response(event, context, SUCCESS, {}, physical_resource_id) + + except Exception as ex: + logger.error(f"Error occurred when managing OAuth2 provider. Error is {ex}") + send_response(event, context, FAILED, {}, physical_resource_id=physical_resource_id, reason=str(ex)) diff --git a/source/lambda/custom-resource/operations/agentcore_outbound_permissions.py b/source/lambda/custom-resource/operations/agentcore_outbound_permissions.py new file mode 100644 index 00000000..72f41231 --- /dev/null +++ b/source/lambda/custom-resource/operations/agentcore_outbound_permissions.py @@ -0,0 +1,276 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from typing import List +import uuid + +from aws_lambda_powertools import Logger, Tracer +from cfn_response import send_response +from operations import operation_types +from utils.auth_manager import AuthManager +from operations.operation_types import FAILED, PHYSICAL_RESOURCE_ID, RESOURCE, RESOURCE_PROPERTIES, SUCCESS + +from utils.lambda_context_parser import get_invocation_account_id + +from helper import get_service_resource +from utils.constants import ( + USE_CASE_CONFIG_RECORD_CONFIG_ATTRIBUTE_NAME, + USE_CASE_CONFIG_RECORD_KEY, + USE_CASE_CONFIG_RECORD_KEY_ATTRIBUTE_NAME, + USE_CASE_CONFIG_TABLE_NAME, +) +from utils.data import MCPServerData + +logger = Logger(utc=True) +tracer = Tracer() + +# Required keys in the incoming event object +USE_CASE_ID = "USE_CASE_ID" +USE_CASE_CLIENT_ID = "USE_CASE_CLIENT_ID" + +# Config keys +AGENT_BUILDER_PARAMS = "AgentBuilderParams" +MCP_SERVERS = "MCPServers" +WORKFLOW_PARAMS = "WorkflowParams" +AGENTS_AS_TOOLS_PARAMS = "AgentsAsToolsParams" +AGENTS = "Agents" + +# other constants +OPERATION_TYPE = operation_types.AGENTCORE_OUTBOUND_PERMISSIONS + + +@tracer.capture_method +def verify_env_setup(event): + """Verifies if all necessary properties are correctly set in the event object. + + Args: + event (LambdaEvent): Event received by the lambda function handler + + Raises: + ValueError: If any required properties are missing or invalid + """ + if event[RESOURCE_PROPERTIES][RESOURCE] != OPERATION_TYPE: + err_msg = f"Operation type not available or did not match from the request. Expecting operation type to be {OPERATION_TYPE}" + logger.error(err_msg) + raise ValueError(err_msg) + + required_fields = [USE_CASE_ID, USE_CASE_CLIENT_ID, USE_CASE_CONFIG_TABLE_NAME, USE_CASE_CONFIG_RECORD_KEY] + for field in required_fields: + if event[RESOURCE_PROPERTIES].get(field, None) in ["", None]: + err_msg = f"{field} has not been passed. Hence operation cannot be performed" + logger.error(err_msg) + raise ValueError(err_msg) + + +def _extract_properties(event): + """Extract common properties from event.""" + return ( + event[RESOURCE_PROPERTIES][USE_CASE_ID], + event[RESOURCE_PROPERTIES][USE_CASE_CLIENT_ID], + event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_TABLE_NAME], + event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_RECORD_KEY], + ) + + +def _manage_permissions(auth_manager, mcp_servers: List[MCPServerData], operation): + """Manage permissions for a list of MCP server IDs. + + Args: + auth_manager: AuthManager instance + use_case_id: Use case identifier + mcp_server_ids: List of MCP server IDs + operation: 'add' or 'remove' + + Returns: + List of processed MCP server IDs + """ + processed = [] + for mcp_server in mcp_servers: + if operation == "add": + auth_manager.add_permission(mcp_server) + elif operation == "remove": + auth_manager.remove_permission(mcp_server) + processed.append(mcp_server.agentcore_id) + return processed + + +@tracer.capture_method +def create(event, context): + """Creates outbound permissions for AgentCore MCP servers. + + Args: + event (LambdaEvent): Event object from lambda handler + context (LambdaContext): Context object from lambda handler + + Returns: + list: List of added MCP server IDs + """ + use_case_id, client_id, use_case_config_table_name, use_case_config_record_key = _extract_properties(event) + + auth_manager = AuthManager(client_id, use_case_id) + mcp_servers = get_mcp_servers(use_case_config_table_name, use_case_config_record_key, context) + added_permissions = _manage_permissions(auth_manager, mcp_servers, "add") + + return added_permissions + + +@tracer.capture_method +def update(event, context): + """Updates outbound permissions for AgentCore MCP servers. + + Args: + event (LambdaEvent): Event object from lambda handler + context (LambdaContext): Context object from lambda handler + + Returns: + tuple: (added_permissions, removed_permissions) + """ + use_case_id, client_id, use_case_config_table_name, use_case_config_record_key = _extract_properties(event) + + # Get current MCP servers from config + current_mcp_servers = get_mcp_servers(use_case_config_table_name, use_case_config_record_key, context) + + # Get old MCP servers from old config + old_properties = event.get("OldResourceProperties", {}) + if not old_properties: + raise ValueError("OldResourceProperties not found in update event") + + old_config_key = old_properties.get(USE_CASE_CONFIG_RECORD_KEY) + old_config_table = old_properties.get(USE_CASE_CONFIG_TABLE_NAME) + + if not old_config_key or not old_config_table: + raise ValueError("Old config key or table name not found in OldResourceProperties") + + old_mcp_servers = get_mcp_servers(old_config_table, old_config_key, context) + + # Calculate differences by comparing agentcore_ids + current_ids = {server.agentcore_id for server in current_mcp_servers} + old_ids = {server.agentcore_id for server in old_mcp_servers} + + new_servers = [server for server in current_mcp_servers if server.agentcore_id not in old_ids] + removed_servers = [server for server in old_mcp_servers if server.agentcore_id not in current_ids] + + auth_manager = AuthManager(client_id, use_case_id) + + # Add new permissions + added_permissions = _manage_permissions(auth_manager, new_servers, "add") + + # Remove old permissions + removed_permissions = _manage_permissions(auth_manager, removed_servers, "remove") + + return added_permissions, removed_permissions + + +@tracer.capture_method +def delete(event, context): + """Deletes outbound permissions for AgentCore MCP servers. + + Args: + event (LambdaEvent): Event object from lambda handler + context (LambdaContext): Context object from lambda handler + + Returns: + list: List of removed MCP server IDs + """ + use_case_id, client_id, use_case_config_table_name, use_case_config_record_key = _extract_properties(event) + + auth_manager = AuthManager(client_id, use_case_id) + mcp_servers = get_mcp_servers(use_case_config_table_name, use_case_config_record_key, context) + removed_permissions = _manage_permissions(auth_manager, mcp_servers, "remove") + + return removed_permissions + + +@tracer.capture_method +def execute(event, context): + """Manages AgentCore outbound permissions based on CloudFormation request type. + + Args: + event (LambdaEvent): Event object from lambda handler + context (LambdaContext): Context object from lambda handler + """ + physical_resource_id = event.get(PHYSICAL_RESOURCE_ID, uuid.uuid4().hex[:8]) + try: + verify_env_setup(event) + + added, removed = [], [] + + if event["RequestType"] == "Create": + added = create(event, context) + elif event["RequestType"] == "Update": + added, removed = update(event, context) + elif event["RequestType"] == "Delete": + removed = delete(event, context) + else: + logger.info(f"Operation type {event['RequestType']} is a no-op operation.") + send_response(event, context, FAILED, {}, physical_resource_id) + return + + response = {"Added": added, "Removed": removed} + send_response(event, context, SUCCESS, response, physical_resource_id) + + except Exception as ex: + logger.error(f"Error occurred when managing outbound permissions. Error is {ex}") + send_response(event, context, FAILED, {}, physical_resource_id=physical_resource_id, reason=str(ex)) + + +@tracer.capture_method +def get_usecase_config(table_name: str, key: str) -> dict: + ddb_resource = get_service_resource("dynamodb") + config_table = ddb_resource.Table(table_name) + usecase_config = ( + config_table.get_item( + Key={USE_CASE_CONFIG_RECORD_KEY_ATTRIBUTE_NAME: key}, + ) + .get("Item", {}) + .get(USE_CASE_CONFIG_RECORD_CONFIG_ATTRIBUTE_NAME) + ) + + if usecase_config is None: + raise ValueError(f"No record found in the table {table_name} for the key {key}") + + return usecase_config + + +@tracer.capture_method +def get_mcp_servers(table_name: str, key: str, context) -> list[MCPServerData]: + config = get_usecase_config(table_name, key) + account_id = get_invocation_account_id(context) + + all_mcp_servers = [] + + # Check for AgentBuilder use case - MCP servers directly in AgentBuilderParams + agent_builder_mcp_configs = config.get(AGENT_BUILDER_PARAMS, {}).get(MCP_SERVERS, []) + if agent_builder_mcp_configs: + for mcp_config in agent_builder_mcp_configs: + all_mcp_servers.append( + MCPServerData( + type=mcp_config.get("Type"), + url=mcp_config.get("Url"), + use_case_id=mcp_config.get("UseCaseId"), + use_case_name=mcp_config.get("UseCaseName"), + account_id=account_id, + ) + ) + + # Check for Workflow use case - MCP servers nested in agents + workflow_params = config.get("WorkflowParams", {}) + agents_as_tools = workflow_params.get("AgentsAsToolsParams", {}) + agents = agents_as_tools.get("Agents", []) + + for agent in agents: + agent_mcp_configs = agent.get(AGENT_BUILDER_PARAMS, {}).get(MCP_SERVERS, []) + if agent_mcp_configs: + for mcp_config in agent_mcp_configs: + all_mcp_servers.append( + MCPServerData( + type=mcp_config.get("Type"), + url=mcp_config.get("Url"), + use_case_id=mcp_config.get("UseCaseId"), + use_case_name=mcp_config.get("UseCaseName"), + account_id=account_id, + ) + ) + + return all_mcp_servers diff --git a/source/lambda/custom-resource/operations/deploy_agent_core.py b/source/lambda/custom-resource/operations/deploy_agent_core.py new file mode 100644 index 00000000..46ae9b9d --- /dev/null +++ b/source/lambda/custom-resource/operations/deploy_agent_core.py @@ -0,0 +1,662 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os +import re +from typing import Optional + +from aws_lambda_powertools import Logger +from botocore.exceptions import ClientError +from cfn_response import send_response +from helper import get_service_client +from operations.operation_types import FAILED, RESOURCE_PROPERTIES, SUCCESS +from operations.shared import retry_with_backoff +from utils.agent_core_utils import ( + format_error_message, + handle_client_error, + initialize_bedrock_client, + validate_event_properties, +) +from utils.constants import AGENTCORE_RUNTIME_IDLE_TIMEOUT_SECONDS + + +logger = Logger(utc=True) + + +def _ensure_ecr_image_exists(image_uri: str): + """ + Ensure ECR image exists for AgentCore SDK validation. + Triggers pull-through cache if image doesn't exist. + + Raises: + ValueError: If image URI format is invalid + ClientError: If ECR operations fail and image cannot be pulled + """ + operation_context = {"image_uri": image_uri} + + # Parse ECR URI format: account.dkr.ecr.region.amazonaws.com/repo:tag + match = re.match(r'(\d+)\.dkr\.ecr\.([^.]+)\.amazonaws\.com/(.+?)(?::(.+))?$', image_uri) + if not match: + error_msg = f"Invalid ECR URI format: {image_uri}" + logger.error(error_msg) + raise ValueError(error_msg) + + registry_id, region, repository_name, tag = match.groups() + operation_context.update({"registry_id": registry_id, "region": region, "repository_name": repository_name}) + + logger.info(f"Ensuring ECR image exists: {image_uri}") + ecr_client = get_service_client("ecr", region_name=region) + + # Default to 'latest' tag if none specified (ECR standard behavior) + image_tag = tag or "latest" + logger.info(f"Using image tag: {image_tag}") + ecr_params = { + "registryId": registry_id, + "repositoryName": repository_name, + "imageIds": [{'imageTag': image_tag}] + } + + try: + # Check if image already exists + ecr_client.describe_images(**ecr_params) + logger.info(f"ECR image {image_uri} already exists") + except ClientError as e: + if e.response["Error"]["Code"] in ["RepositoryNotFoundException", "ImageNotFoundException"]: + logger.info(f"ECR image {image_uri} does not exist, triggering pull-through cache") + try: + # Trigger pull-through cache + ecr_client.batch_get_image(**ecr_params) + except ClientError as e: + handle_client_error(e, "batch_get_image", operation_context) + + # Wait for image to be available (up to ~2.5 minutes) + retry_with_backoff(ecr_client.describe_images, max_attempts=10, base_delay=2, **ecr_params) + logger.info(f"Successfully pulled image via pull-through cache: {image_uri}") + else: + handle_client_error(e, "describe_images", operation_context) + raise + + +def _extract_resource_properties(resource_properties): + """Extract and validate required resource properties.""" + try: + multimodal_data_metadata_table = resource_properties.get("MultimodalDataMetadataTable", "") + multimodal_data_bucket = resource_properties.get("MultimodalDataBucket", "") + + # Validate that both multimodal parameters are provided together or neither is provided + has_metadata_table = bool(multimodal_data_metadata_table) + has_bucket = bool(multimodal_data_bucket) + + if has_metadata_table != has_bucket: + error_msg = "Both MultimodalDataBucket and MultimodalDataMetadataTable must be provided together or neither should be provided" + logger.error(error_msg) + raise ValueError(error_msg) + + return { + "agent_runtime_name": resource_properties["AgentRuntimeName"], + "agent_image_uri": resource_properties["AgentImageUri"], + "execution_role_arn": resource_properties["ExecutionRoleArn"], + "use_case_config_key": resource_properties["UseCaseConfigRecordKey"], + "use_case_config_table_name": resource_properties["UseCaseConfigTableName"], + "use_case_uuid": resource_properties["UseCaseUUID"], + "memory_id": resource_properties.get("MemoryId"), + "multimodal_data_metadata_table": multimodal_data_metadata_table, + "multimodal_data_bucket": multimodal_data_bucket, + "memory_strategy_id": resource_properties.get("MemoryStrategyId") + } + except KeyError as e: + missing_param = str(e).strip("'") + error_msg = f"Missing required parameter: {missing_param}" + logger.error(error_msg) + raise ValueError(error_msg) + + +def _handle_create_request(props, operation_context): + """Handle CloudFormation Create request.""" + logger.info(f"Creating AgentCore Runtime '{props['agent_runtime_name']}' with image '{props['agent_image_uri']}'") + logger.info(f"Using execution role: {props['execution_role_arn']}") + + # Ensure ECR image exists before AgentCore validation + _ensure_ecr_image_exists(props["agent_image_uri"]) + + # Memory ID should be provided from the separate memory deployment + memory_id = props.get("memory_id") + if not memory_id: + logger.warning("No memory ID provided, runtime will be created without memory configuration") + + memory_strategy_id = props.get("memory_strategy_id") + + agent_runtime_arn, agent_runtime_id = retry_with_backoff( + create_agent_runtime, + runtime_name=props["agent_runtime_name"], + image_uri=props["agent_image_uri"], + execution_role_arn=props["execution_role_arn"], + config_table_name=props["use_case_config_table_name"], + use_case_config_key=props["use_case_config_key"], + use_case_uuid=props["use_case_uuid"], + memory_id=memory_id, + multimodal_data_metadata_table=props["multimodal_data_metadata_table"], + multimodal_data_bucket=props["multimodal_data_bucket"], + memory_strategy_id=memory_strategy_id, + max_attempts=9, + base_delay=2 + ) + + operation_context["agent_runtime_id"] = agent_runtime_id + operation_context["agent_memory_id"] = memory_id + return agent_runtime_arn + + +def _handle_update_request(props, operation_context): + """Handle CloudFormation Update request.""" + logger.info(f"Updating AgentCore Runtime '{props['agent_runtime_name']}' with image '{props['agent_image_uri']}'") + logger.info(f"Using execution role: {props['execution_role_arn']}") + + # Ensure ECR image exists before AgentCore validation + _ensure_ecr_image_exists(props["agent_image_uri"]) + + # Memory ID should be provided from the separate memory deployment + memory_id = props.get("memory_id") + if not memory_id: + logger.warning("No memory ID provided, runtime will be updated without memory configuration") + + memory_strategy_id = props.get("memory_strategy_id") + + agent_runtime_arn, agent_runtime_id = retry_with_backoff( + update_agent_runtime, + runtime_name=props["agent_runtime_name"], + image_uri=props["agent_image_uri"], + execution_role_arn=props["execution_role_arn"], + config_table_name=props["use_case_config_table_name"], + use_case_config_key=props["use_case_config_key"], + use_case_uuid=props["use_case_uuid"], + memory_id=memory_id, + multimodal_data_metadata_table=props["multimodal_data_metadata_table"], + multimodal_data_bucket=props["multimodal_data_bucket"], + memory_strategy_id=memory_strategy_id, + max_attempts=9, + base_delay=2 + ) + operation_context["agent_runtime_id"] = agent_runtime_id + operation_context["agent_memory_id"] = memory_id + return agent_runtime_arn + + +def _handle_delete_request(props): + """Handle CloudFormation Delete request.""" + logger.info(f"Deleting AgentCore Runtime '{props['agent_runtime_name']}'") + retry_with_backoff(delete_agent_runtime, runtime_name=props["agent_runtime_name"]) + return "" + + +def execute(event, context): + """ + Deploy AgentCore Runtime using the bedrock-agentcore service. + + Args: + event: CloudFormation custom resource event + context: Lambda context object + """ + physical_resource_id = None + operation_context = {} + + try: + resource_properties = event.get(RESOURCE_PROPERTIES, {}) + request_type = event.get("RequestType", "Unknown") + use_case_config_key = resource_properties.get("UseCaseConfigRecordKey", "unknown") + + operation_context = { + "request_type": request_type, + "use_case_config_key": use_case_config_key, + } + + logger.info(f"AgentCore deployment operation - Request Type: {request_type}") + logger.info(f"Resource Properties: {resource_properties}") + + validate_event_properties(event) + props = _extract_resource_properties(resource_properties) + operation_context.update( + { + "agent_runtime_name": props["agent_runtime_name"], + "agent_image_uri": props["agent_image_uri"], + "execution_role_arn": props["execution_role_arn"], + "use_case_uuid": props["use_case_uuid"], + "multimodal_data_metadata_table": props["multimodal_data_metadata_table"], + "multimodal_data_bucket": props["multimodal_data_bucket"], + } + ) + + initialize_bedrock_client() + + if request_type == "Create": + agent_runtime_arn = _handle_create_request(props, operation_context) + # Physical resource ID should be consistent throughout updates to ensure CloudFormation doesn't send a delete + physical_resource_id = operation_context.get("agent_runtime_id") + elif request_type == "Update": + agent_runtime_arn = _handle_update_request(props, operation_context) + physical_resource_id = operation_context.get("agent_runtime_id") + elif request_type == "Delete": + agent_runtime_arn = _handle_delete_request(props) + else: + error_msg = f"Unknown CloudFormation request type: {request_type}. Expected Create, Update, or Delete." + logger.error(error_msg) + raise ValueError(error_msg) + + response_data = { + "AgentRuntimeArn": agent_runtime_arn, + "AgentRuntimeName": props["agent_runtime_name"], + "AgentRuntimeId": operation_context.get("agent_runtime_id", ""), + "AgentMemoryId": operation_context.get("agent_memory_id", ""), + } + + logger.info(f"Returning success response with data: {response_data}") + send_response(event, context, SUCCESS, response_data, physical_resource_id) + + except ValueError as ve: + error_msg = f"Configuration Error: {str(ve)}" + logger.error(error_msg, extra={"operation_context": operation_context}) + physical_resource_id = ( + physical_resource_id or f"agent-runtime-{operation_context.get('use_case_config_key', 'unknown')}" + ) + send_response(event, context, FAILED, {}, physical_resource_id, reason=error_msg) + + except ClientError as ce: + try: + handle_client_error(ce, "deploy_agent_core_main", operation_context) + except ClientError: + pass + + error_code = ce.response["Error"]["Code"] + error_message = ce.response["Error"]["Message"] + detailed_message = format_error_message( + "deploy AgentCore runtime", error_code, error_message, operation_context + ) + + physical_resource_id = ( + physical_resource_id or f"agent-runtime-{operation_context.get('use_case_config_key', 'unknown')}" + ) + send_response(event, context, FAILED, {}, physical_resource_id, reason=detailed_message) + + except Exception as ex: + error_msg = f"Unexpected error in deploy_agent_core operation: {str(ex)}" + logger.error(error_msg, extra={"operation_context": operation_context, "exception_type": type(ex).__name__}) + physical_resource_id = ( + physical_resource_id or f"agent-runtime-{operation_context.get('use_case_config_key', 'unknown')}" + ) + send_response(event, context, FAILED, {}, physical_resource_id, reason=f"Unexpected Error: {str(ex)}") + + +def _build_runtime_environment_variables( + config_table_name: str, + use_case_config_key: str, + use_case_uuid: str, + memory_id: Optional[str] = None, + multimodal_data_metadata_table: Optional[str] = None, + multimodal_data_bucket: Optional[str] = None, + memory_strategy_id: Optional[str] = None, + additional_env_vars: Optional[dict] = None, +): + """Build environment variables for runtime configuration.""" + # Extract short ID from UUID (first segment is 8 chars) + use_case_short_id = use_case_uuid.split("-")[0] + m2m_identity_name = f"gaab-oauth-provider-{use_case_short_id}" + + environment_variables = { + "USE_CASE_TABLE_NAME": config_table_name, + "USE_CASE_CONFIG_KEY": use_case_config_key, + "USE_CASE_UUID": use_case_uuid, + "AWS_REGION": os.getenv("AWS_REGION"), + "M2M_IDENTITY_NAME": m2m_identity_name, + "AWS_SDK_USER_AGENT": os.getenv("AWS_SDK_USER_AGENT", "{}"), # pass along into runtime to attach to SDK client + } + + if memory_id: + environment_variables["MEMORY_ID"] = memory_id + + if memory_strategy_id: + environment_variables["MEMORY_STRATEGY_ID"] = memory_strategy_id + + # Add multimodal data environment variables if provided + if multimodal_data_metadata_table: + environment_variables["MULTIMODAL_METADATA_TABLE_NAME"] = multimodal_data_metadata_table + + if multimodal_data_bucket: + environment_variables["MULTIMODAL_DATA_BUCKET"] = multimodal_data_bucket + + # additional environment variables (for MCP runtime) + if additional_env_vars: + environment_variables.update(additional_env_vars) + + return environment_variables + + +def _build_runtime_request(runtime_name: str, image_uri: str, execution_role_arn: str, environment_variables: dict): + """Build the runtime creation request.""" + return { + "agentRuntimeName": runtime_name, + "agentRuntimeArtifact": {"containerConfiguration": {"containerUri": image_uri}}, + "roleArn": execution_role_arn, + "networkConfiguration": {"networkMode": "PUBLIC"}, + "protocolConfiguration": {"serverProtocol": "HTTP"}, + "environmentVariables": environment_variables, + "lifecycleConfiguration": {"idleRuntimeSessionTimeout": AGENTCORE_RUNTIME_IDLE_TIMEOUT_SECONDS}, + } + + +def _validate_runtime_response(response): + """Validate runtime creation response and extract ARN and ID.""" + runtime_arn = response.get("agentRuntimeArn") + runtime_id = response.get("agentRuntimeId") + + if not runtime_arn: + raise ValueError("Runtime creation succeeded but no ARN returned") + if not runtime_id: + raise ValueError("Runtime creation succeeded but no ID returned") + + return runtime_arn, runtime_id + + +def create_agent_runtime( + runtime_name: str, + image_uri: str, + execution_role_arn: str, + config_table_name: str, + use_case_config_key: str, + use_case_uuid: str, + memory_id: str = None, + multimodal_data_metadata_table: str = None, + multimodal_data_bucket: str = None, + memory_strategy_id: str = None, + additional_env_vars: dict = None, +): + """ + Create a new AgentCore Runtime using bedrock-agentcore CreateAgentRuntime API. + + Args: + runtime_name: Unique name for the AgentCore Runtime + image_uri: ECR image URI for the agent container + execution_role_arn: IAM role ARN for runtime execution + config_table_name: DynamoDB table name for configuration storage + use_case_config_key: Use case identifier + use_case_uuid: Use case UUID for generating M2M identity name + memory_id: Memory configuration ID (required for both short and long term memory) + multimodal_data_metadata_table: Multimodal data metadata table name + multimodal_data_bucket: Multimodal data bucket name + additional_env_vars: Additional environment variables to include (for MCP runtime) + + Returns: + tuple: (AgentCore Runtime ARN, Runtime ID) + + Raises: + ClientError: If bedrock-agentcore API call fails + ValueError: If required parameters are missing + """ + operation_context = { + "runtime_name": runtime_name, + "image_uri": image_uri, + "execution_role_arn": execution_role_arn, + "use_case_config_key": use_case_config_key, + "use_case_uuid": use_case_uuid, + "memory_id": memory_id, + "multimodal_data_metadata_table": multimodal_data_metadata_table, + "multimodal_data_bucket": multimodal_data_bucket, + } + + try: + bedrock_agentcore_client = get_service_client("bedrock-agentcore-control") + + environment_variables = _build_runtime_environment_variables( + config_table_name, + use_case_config_key, + use_case_uuid, + memory_id, + multimodal_data_metadata_table, + multimodal_data_bucket, + memory_strategy_id, + additional_env_vars, + ) + logger.info(f"Creating AgentCore Runtime '{runtime_name}' with environment variables: {environment_variables}") + + runtime_request = _build_runtime_request(runtime_name, image_uri, execution_role_arn, environment_variables) + logger.info(f"Creating runtime with request: {runtime_request}") + response = bedrock_agentcore_client.create_agent_runtime(**runtime_request) + runtime_arn, runtime_id = _validate_runtime_response(response) + + logger.info(f"Successfully created AgentCore Runtime with ARN: {runtime_arn}, ID: {runtime_id}") + return runtime_arn, runtime_id + + except ClientError as e: + handle_client_error(e, "create_agent_runtime", operation_context) + raise + except ValueError as e: + logger.error(f"Parameter validation failed for runtime creation: {str(e)}") + raise + except Exception as e: + logger.error(f"Unexpected error creating AgentCore Runtime: {str(e)}") + raise + + +def _find_runtime_id_by_name(bedrock_agentcore_client, runtime_name: str): + """Find runtime ID by runtime name.""" + # Paginate response + list_response = bedrock_agentcore_client.list_agent_runtimes() + while True: + for runtime in list_response.get("agentRuntimes", []): + if runtime.get("agentRuntimeName") == runtime_name: + runtime_id = runtime.get("agentRuntimeId") + logger.info(f"Found runtime ID '{runtime_id}' for runtime name '{runtime_name}'") + return runtime_id + + if list_response.get("nextToken"): + list_response = bedrock_agentcore_client.list_agent_runtimes(nextToken=list_response.get("nextToken")) + else: + break + + raise ValueError(f"AgentCore Runtime '{runtime_name}' not found for update") + + +def _get_runtime_description(bedrock_agentcore_client, runtime_id: str, runtime_name: str): + """Get runtime description with error handling.""" + try: + describe_response = bedrock_agentcore_client.get_agent_runtime(agentRuntimeId=runtime_id) + logger.info(f"Current runtime configuration: {describe_response}") + return describe_response + except ClientError as e: + if e.response["Error"]["Code"] == "ResourceNotFoundException": + raise ValueError(f"AgentCore Runtime '{runtime_name}' not found for update") + raise + + +def _build_update_request( + runtime_id: str, image_uri: str, execution_role_arn: str, describe_response: dict, environment_variables: dict +): + """Build the runtime update request.""" + return { + "agentRuntimeId": runtime_id, + "agentRuntimeArtifact": {"containerConfiguration": {"containerUri": image_uri}}, + "roleArn": execution_role_arn, + "networkConfiguration": describe_response.get("networkConfiguration", {}), + "environmentVariables": environment_variables, + } + + +def _log_configuration_changes(describe_response: dict, image_uri: str, execution_role_arn: str): + """Log configuration changes being made.""" + current_image_uri = ( + describe_response.get("agentRuntimeArtifact", {}).get("containerConfiguration", {}).get("containerUri") + ) + current_role_arn = describe_response.get("roleArn") + + if image_uri != current_image_uri: + logger.info(f"Updating image URI from '{current_image_uri}' to '{image_uri}'") + if execution_role_arn != current_role_arn: + logger.info(f"Updating execution role from '{current_role_arn}' to '{execution_role_arn}'") + + +def update_agent_runtime( + runtime_name: str, + image_uri: str, + execution_role_arn: str, + config_table_name: str, + use_case_config_key: str, + use_case_uuid: str, + memory_id: str, + multimodal_data_metadata_table: str = None, + multimodal_data_bucket: str = None, + memory_strategy_id: str = None, +): + """ + Update an existing AgentCore Runtime using bedrock-agentcore UpdateAgentRuntime API. + + Args: + runtime_name: Unique name for the AgentCore Runtime + image_uri: ECR image URI for the agent container + execution_role_arn: IAM role ARN for runtime execution + config_table_name: DynamoDB table name for configuration storage + use_case_config_key: Use case identifier + use_case_uuid: Use case UUID for generating M2M identity name + memory_id: AgentCore memory instance ID + multimodal_data_metadata_table: Multimodal data metadata table name + multimodal_data_bucket: Multimodal data bucket name + + Returns: + str: Updated AgentCore Runtime ARN + + Raises: + ClientError: If bedrock-agentcore API call fails + ValueError: If required parameters are missing or runtime doesn't exist + """ + operation_context = { + "runtime_name": runtime_name, + "image_uri": image_uri, + "execution_role_arn": execution_role_arn, + "use_case_config_key": use_case_config_key, + "use_case_uuid": use_case_uuid, + "memory_id": memory_id, + "multimodal_data_metadata_table": multimodal_data_metadata_table, + "multimodal_data_bucket": multimodal_data_bucket, + } + + try: + bedrock_agentcore_client = get_service_client("bedrock-agentcore-control") + + try: + runtime_id = _find_runtime_id_by_name(bedrock_agentcore_client, runtime_name) + except ClientError as e: + handle_client_error(e, "list_agent_runtimes", operation_context) + raise + + logger.info(f"Describing existing AgentCore Runtime '{runtime_name}' (ID: {runtime_id}) before update") + describe_response = _get_runtime_description(bedrock_agentcore_client, runtime_id, runtime_name) + + environment_variables = _build_runtime_environment_variables( + config_table_name, + use_case_config_key, + use_case_uuid, + memory_id, + multimodal_data_metadata_table, + multimodal_data_bucket, + memory_strategy_id + ) + + logger.info(f"Updating AgentCore Runtime '{runtime_name}' with environment variables: {environment_variables}") + + update_request = _build_update_request( + runtime_id, image_uri, execution_role_arn, describe_response, environment_variables + ) + _log_configuration_changes(describe_response, image_uri, execution_role_arn) + + logger.info(f"Updating environment variables: {environment_variables}") + logger.info(f"Updating runtime with request: {update_request}") + response = bedrock_agentcore_client.update_agent_runtime(**update_request) + + runtime_arn = response.get("agentRuntimeArn") + if not runtime_arn: + raise ValueError("Runtime update succeeded but no ARN returned") + + logger.info(f"Successfully updated AgentCore Runtime with ARN: {runtime_arn}") + return runtime_arn, runtime_id + + except ClientError as e: + handle_client_error(e, "update_agent_runtime", operation_context) + raise + except ValueError as e: + logger.error(f"Parameter validation failed for runtime update: {str(e)}") + raise + except Exception as e: + logger.error(f"Unexpected error updating AgentCore Runtime: {str(e)}") + raise + + +def _find_runtime_for_deletion(bedrock_agentcore_client, runtime_name: str): + """Find runtime for deletion.""" + list_response = bedrock_agentcore_client.list_agent_runtimes() + + for runtime in list_response.get("agentRuntimes", []): + if runtime.get("agentRuntimeName") == runtime_name: + runtime_id = runtime.get("agentRuntimeId") + logger.info(f"Found runtime ID '{runtime_id}' for runtime name '{runtime_name}'") + return runtime_id + + logger.warning(f"AgentCore Runtime '{runtime_name}' not found in list, may already be deleted") + return None + + +def _delete_runtime_resource(bedrock_agentcore_client, runtime_id: str, runtime_name: str, operation_context: dict): + """Delete the runtime resource.""" + try: + logger.info(f"Deleting AgentCore Runtime '{runtime_name}' with ID '{runtime_id}'") + bedrock_agentcore_client.delete_agent_runtime(agentRuntimeId=runtime_id) + logger.info(f"Successfully initiated deletion of AgentCore Runtime '{runtime_id}'") + except ClientError as e: + if e.response["Error"]["Code"] == "ResourceNotFoundException": + logger.warning(f"AgentCore Runtime '{runtime_id}' not found, may already be deleted") + else: + handle_client_error(e, "delete_agent_runtime", operation_context) + raise + + +def delete_agent_runtime(runtime_name: str): + """ + Delete an AgentCore Runtime using bedrock-agentcore API. + + Args: + runtime_name: Unique name for the AgentCore Runtime to delete + + Raises: + ClientError: If bedrock-agentcore API call fails + Exception: For unexpected errors during deletion + """ + operation_context = {"runtime_name": runtime_name} + + try: + bedrock_agentcore_client = get_service_client("bedrock-agentcore-control") + logger.info(f"Starting deletion of AgentCore Runtime '{runtime_name}'") + + try: + runtime_id = _find_runtime_for_deletion(bedrock_agentcore_client, runtime_name) + + if not runtime_id: + return # Runtime not found, already deleted + + except ClientError as e: + if e.response["Error"]["Code"] == "ResourceNotFoundException": + logger.warning(f"AgentCore Runtime '{runtime_name}' not found, may already be deleted") + return + else: + handle_client_error(e, "get_agent_runtime", operation_context) + except Exception as e: + logger.warning(f"Unexpected error describing runtime before deletion: {str(e)}") + + if runtime_id: + _delete_runtime_resource(bedrock_agentcore_client, runtime_id, runtime_name, operation_context) + + logger.info(f"Completed cleanup operations for AgentCore Runtime '{runtime_name}'") + + except ClientError as e: + handle_client_error(e, "delete_agent_runtime", operation_context) + raise + except Exception as e: + logger.error(f"Unexpected error in delete_agent_runtime: {str(e)}") + raise diff --git a/source/lambda/custom-resource/operations/deploy_agent_core_memory.py b/source/lambda/custom-resource/operations/deploy_agent_core_memory.py new file mode 100644 index 00000000..efca3989 --- /dev/null +++ b/source/lambda/custom-resource/operations/deploy_agent_core_memory.py @@ -0,0 +1,293 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from aws_lambda_powertools import Logger +from botocore.exceptions import ClientError +from cfn_response import send_response +from operations.operation_types import FAILED, RESOURCE_PROPERTIES, SUCCESS +from operations.shared import retry_with_backoff +from utils.agent_core_utils import format_error_message, handle_client_error, validate_event_properties, initialize_bedrock_client +from helper import get_service_client + +logger = Logger(utc=True) + + +def _extract_resource_properties(resource_properties): + """Extract and validate required resource properties.""" + try: + return { + "agent_runtime_name": resource_properties["AgentRuntimeName"], + "enable_long_term_memory": resource_properties.get("EnableLongTermMemory", "No"), + } + except KeyError as e: + missing_param = str(e).strip("'") + error_msg = f"Missing required parameter: {missing_param}" + logger.error(error_msg) + raise ValueError(error_msg) + + +def execute(event, context): + """ + Deploy AgentCore Memory using the bedrock-agentcore service. + + Args: + event: CloudFormation custom resource event + context: Lambda context object + """ + physical_resource_id = None + operation_context = {} + + try: + resource_properties = event.get(RESOURCE_PROPERTIES, {}) + request_type = event.get("RequestType", "Unknown") + agent_runtime_name = resource_properties.get("AgentRuntimeName", "unknown") + + # For update/delete operations, get memory ID from physical resource ID + existing_memory_id = event.get("PhysicalResourceId") if request_type in ["Update", "Delete"] else None + physical_resource_id = existing_memory_id or f"agent-memory-{agent_runtime_name}" + + operation_context = { + "request_type": request_type, + "agent_runtime_name": agent_runtime_name, + "physical_resource_id": physical_resource_id, + "existing_memory_id": existing_memory_id, + } + + logger.info(f"AgentCore Memory deployment operation - Request Type: {request_type}") + logger.info(f"Resource Properties: {resource_properties}") + + validate_event_properties(event) + props = _extract_resource_properties(resource_properties) + operation_context.update(props) + + initialize_bedrock_client() + memory_strategy_id = None + if request_type == "Create": + memory_id, memory_strategy_id = _handle_create_request(props) + physical_resource_id = memory_id + elif request_type == "Update": + memory_id, memory_strategy_id = _handle_update_request(props, existing_memory_id) + physical_resource_id = memory_id + elif request_type == "Delete": + memory_id = _handle_delete_request(existing_memory_id) + else: + error_msg = f"Unknown CloudFormation request type: {request_type}. Expected Create, Update, or Delete." + logger.error(error_msg) + raise ValueError(error_msg) + + response_data = { + "MemoryId": memory_id, + "MemoryStrategyId": memory_strategy_id if memory_strategy_id else "" + } + + logger.info(f"Returning success response with data: {response_data}") + send_response(event, context, SUCCESS, response_data, physical_resource_id) + + except ValueError as ve: + error_msg = f"Configuration Error: {str(ve)}" + logger.error(error_msg, extra={"operation_context": operation_context}) + physical_resource_id = physical_resource_id or f"agent-memory-{operation_context.get('agent_runtime_name', 'unknown')}" + send_response(event, context, FAILED, {}, physical_resource_id, reason=error_msg) + + except ClientError as ce: + error_code = ce.response["Error"]["Code"] + error_message = ce.response["Error"]["Message"] + detailed_message = f"Failed to deploy AgentCore memory: {error_code} - {error_message}" + + physical_resource_id = physical_resource_id or f"agent-memory-{operation_context.get('agent_runtime_name', 'unknown')}" + send_response(event, context, FAILED, {}, physical_resource_id, reason=detailed_message) + + except Exception as ex: + error_msg = f"Unexpected error in deploy_agent_core_memory operation: {str(ex)}" + logger.error(error_msg, extra={"operation_context": operation_context, "exception_type": type(ex).__name__}) + physical_resource_id = physical_resource_id or f"agent-memory-{operation_context.get('agent_runtime_name', 'unknown')}" + send_response(event, context, FAILED, {}, physical_resource_id, reason=f"Unexpected Error: {str(ex)}") + + +def _handle_create_request(props): + """Handle CloudFormation Create request.""" + logger.info(f"Creating AgentCore Memory '{props['agent_runtime_name']}'") + logger.info(f"Memory settings - Enable: {props['enable_long_term_memory']}") + + memory_id, memory_strategy_id = retry_with_backoff( + create_memory_configuration, props["enable_long_term_memory"], props["agent_runtime_name"] + ) + + return memory_id, memory_strategy_id + + +def _handle_update_request(props, memory_id): + """Handle CloudFormation Update request.""" + logger.info(f"Updating AgentCore Memory '{memory_id}'") + logger.info(f"Memory settings - Enable: {props['enable_long_term_memory']}") + + memory_strategy_id = retry_with_backoff( + update_memory_configuration, memory_id, props["enable_long_term_memory"] + ) + + return memory_id, memory_strategy_id + + +def _handle_delete_request(memory_id): + """Handle CloudFormation Delete request.""" + if memory_id and not memory_id.startswith("agent-memory-"): + logger.info(f"Deleting AgentCore Memory '{memory_id}'") + retry_with_backoff(delete_memory_configuration, memory_id) + else: + logger.info("No valid memory ID found for deletion") + + return "" + + +def create_memory_configuration(enable_long_term_memory: str, memory_name: str): + """ + Create memory configuration for AgentCore using bedrock-agentcore create_memory API. + + Args: + enable_long_term_memory: "Yes" or "No" to enable long-term memory + memory_name: Name for the memory configuration + + Returns: + str: Memory ID + + Raises: + ClientError: If bedrock-agentcore API call fails + ValueError: If parameters are invalid + """ + + try: + bedrock_agentcore_client = get_service_client("bedrock-agentcore-control") + + memory_request = { + "name": memory_name, + "eventExpiryDuration": 90, + } + + if enable_long_term_memory == "Yes": + logger.info("Creating memory with semanticMemoryStrategy") + memory_request["memoryStrategies"] = [ + {"semanticMemoryStrategy": {"name": f"{memory_name}_semantic"}} + ] + else: + logger.info("Creating memory without memory strategies") + + logger.info(f"Creating memory: {memory_request}") + + response = bedrock_agentcore_client.create_memory(**memory_request) + + memory_id = response.get("memory").get("id") + memory_strategy_id = next((strategy.get("strategyId") for strategy in response.get("memory", {}).get("strategies", []) if strategy.get("type") == "SEMANTIC"), None) + if not memory_id: + raise ValueError("Memory creation succeeded but no ID returned") + + logger.info(f"Memory creation initiated with ID: {memory_id}, waiting for completion...") + + try: + waiter = bedrock_agentcore_client.get_waiter("memory_created") + waiter.wait( + memoryId=memory_id, + WaiterConfig={ + "Delay": 5, + "MaxAttempts": 60, + }, + ) + logger.info(f"Successfully created and verified memory with ID: {memory_id}") + except Exception as waiter_error: + logger.warning(f"Memory waiter failed, but memory creation may have succeeded: {str(waiter_error)}") + + return memory_id, memory_strategy_id + + except ClientError as e: + logger.error(f"Failed to create memory: {e.response['Error']['Code']} - {e.response['Error']['Message']}") + raise + except ValueError as e: + logger.error(f"Parameter validation failed for memory creation: {str(e)}") + raise + except Exception as e: + logger.error(f"Unexpected error creating memory configuration: {str(e)}") + raise + + +def update_memory_configuration(memory_id: str, enable_long_term_memory: str): + """ + Update existing memory configuration using bedrock-agentcore update_memory API. + + Args: + memory_id: ID of the existing memory configuration to update + enable_long_term_memory: "Yes" or "No" to enable long-term memory + + Raises: + ClientError: If bedrock-agentcore API call fails + ValueError: If parameters are invalid + """ + try: + bedrock_agentcore_client = get_service_client("bedrock-agentcore-control") + + # Get current memory configuration + describe_response = bedrock_agentcore_client.get_memory(memoryId=memory_id) + current_strategies = describe_response.get("memory", {}).get("strategies", []) + + memory_update_request = {"memoryId": memory_id, "memoryStrategies": {}} + should_update = False + memory_strategy_id = None + if enable_long_term_memory == "Yes": + # Add semantic memory strategy if not exists + memory_strategy_id = next((strategy.get("strategyId") for strategy in current_strategies if strategy.get("type") == "SEMANTIC"), None) + + if not memory_strategy_id: + memory_update_request["memoryStrategies"]["addMemoryStrategies"] = [ + {"semanticMemoryStrategy": {"name": "semantic_memory"}} + ] + should_update = True + else: + # Remove semantic memory strategies + semantic_strategy_ids = [ + {"memoryStrategyId": strategy.get("strategyId")} + for strategy in current_strategies + if strategy.get("type") == "SEMANTIC" and strategy.get("strategyId") + ] + if semantic_strategy_ids: + memory_update_request["memoryStrategies"]["deleteMemoryStrategies"] = semantic_strategy_ids + should_update = True + + if should_update: + response = bedrock_agentcore_client.update_memory(**memory_update_request) + memory_strategy_id = next((strategy.get("strategyId") for strategy in response.get("memory", {}).get("strategies", []) if strategy.get("type") == "SEMANTIC"), None) + logger.info(f"Successfully updated memory configuration: {memory_id}") + return memory_strategy_id + + except ClientError as e: + logger.error(f"Failed to update memory: {e.response['Error']['Code']} - {e.response['Error']['Message']}") + raise + except Exception as e: + logger.error(f"Unexpected error updating memory configuration: {str(e)}") + raise + + +def delete_memory_configuration(memory_id: str): + """ + Delete memory configuration using bedrock-agentcore delete_memory API. + + Args: + memory_id: ID of the memory configuration to delete + + Raises: + ClientError: If bedrock-agentcore API call fails + """ + try: + bedrock_agentcore_client = get_service_client("bedrock-agentcore-control") + response = bedrock_agentcore_client.delete_memory(memoryId=memory_id) + logger.info(response) + logger.info(f"Successfully initiated deletion of memory configuration: {memory_id}") + + except ClientError as e: + if e.response["Error"]["Code"] == "ResourceNotFoundException": + logger.warning(f"Memory configuration '{memory_id}' not found, may already be deleted") + else: + logger.error(f"Failed to delete memory: {e.response['Error']['Code']} - {e.response['Error']['Message']}") + raise + except Exception as e: + logger.error(f"Unexpected error deleting memory configuration: {str(e)}") + raise + diff --git a/source/lambda/custom-resource/operations/deploy_mcp_gateway.py b/source/lambda/custom-resource/operations/deploy_mcp_gateway.py new file mode 100644 index 00000000..f0a47222 --- /dev/null +++ b/source/lambda/custom-resource/operations/deploy_mcp_gateway.py @@ -0,0 +1,81 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os +from aws_lambda_powertools import Logger, Tracer +from cfn_response import send_response +from utils.mcp_config_manager import MCPConfigManager + +from utils.mcp_factory import MCPGatewayFactory +from utils.lambda_target_creator import LambdaTargetCreator +from utils.smithy_target_creator import SmithyTargetCreator +from utils.openapi_target_creator import OpenAPITargetCreator +from operations.operation_types import SUCCESS, FAILED + +MCPGatewayFactory.register_target_creator("lambda", LambdaTargetCreator) +MCPGatewayFactory.register_target_creator("smithyModel", SmithyTargetCreator) +MCPGatewayFactory.register_target_creator("openApiSchema", OpenAPITargetCreator) + +logger = Logger() +tracer = Tracer() + +from utils.gateway_mcp import GatewayMCP + + +def validate_required_props(request_type, properties): + # Only validate required properties for Create and Update operations + if request_type in ["Create", "Update"]: + required_properties = { + "USE_CASE_CONFIG_RECORD_KEY": properties.get("USE_CASE_CONFIG_RECORD_KEY"), + "GATEWAY_ROLE_ARN": properties.get("GATEWAY_ROLE_ARN"), + "USE_CASE_CONFIG_TABLE_NAME": properties.get("USE_CASE_CONFIG_TABLE_NAME"), + "MCPAgentCoreName": properties.get("MCPAgentCoreName"), + } + for prop_name, prop_value in required_properties.items(): + if not prop_value: + raise ValueError(f"{prop_name} is required in ResourceProperties") + + +@tracer.capture_method +def execute(event, context): + try: + properties = event.get("ResourceProperties", {}) + request_type = event.get("RequestType") + # Setting to unknown: + # If gatewayId is not created, There is no need to delete gateway and targets, and delete can be skipped + physical_resource_id = event.get("PhysicalResourceId", 'unknown') + + validate_required_props(request_type, properties) + + config_manager = MCPConfigManager(properties["USE_CASE_CONFIG_TABLE_NAME"]) + validated_config = config_manager.get_mcp_gateway_config(properties["USE_CASE_CONFIG_RECORD_KEY"]) if request_type != "Delete" else {} + + gateway_mcp = GatewayMCP( + config=validated_config, + cognito_user_pool_id=properties.get("COGNITO_USER_POOL_ID"), + gateway_role_arn=properties["GATEWAY_ROLE_ARN"], + gateway_name=properties["MCPAgentCoreName"], + schema_bucket_name=properties["S3_BUCKET_NAME"], + gateway_id=physical_resource_id + ) + + if request_type not in ["Create", "Update", "Delete"]: + raise ValueError(f"Unsupported request type: {request_type}") + + # calling create, update or delete based on request type + getattr(gateway_mcp, request_type.lower())() + + response_data = gateway_mcp.to_dict() + + if request_type != "Delete": + config_manager.update_gateway_config( + properties.get("USE_CASE_CONFIG_RECORD_KEY"), + response_data, + ) + response_data["message"] = f"MCP gateway {request_type} completed" + + send_response(event, context, SUCCESS, response_data, gateway_mcp.gateway_id) + + except Exception as e: + error_msg = f"Error: {str(e)}" + send_response(event, context, FAILED, {}, physical_resource_id=physical_resource_id, reason=error_msg) diff --git a/source/lambda/custom-resource/operations/deploy_mcp_runtime.py b/source/lambda/custom-resource/operations/deploy_mcp_runtime.py new file mode 100644 index 00000000..18c130ef --- /dev/null +++ b/source/lambda/custom-resource/operations/deploy_mcp_runtime.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from aws_lambda_powertools import Logger, Tracer +from cfn_response import send_response +from utils.mcp_config_manager import MCPConfigManager +from operations.operation_types import SUCCESS, FAILED +from utils.runtime_mcp import RuntimeMCP + +logger = Logger() +tracer = Tracer() + + +def validate_required_props(request_type, properties): + """Validate required properties for Create and Update operations.""" + if request_type in ["Create", "Update"]: + required_properties = { + "USE_CASE_CONFIG_RECORD_KEY": properties.get("USE_CASE_CONFIG_RECORD_KEY"), + "EXECUTION_ROLE_ARN": properties.get("EXECUTION_ROLE_ARN"), + "USE_CASE_CONFIG_TABLE_NAME": properties.get("USE_CASE_CONFIG_TABLE_NAME"), + "MCPAgentCoreName": properties.get("MCPAgentCoreName"), + } + for prop_name, prop_value in required_properties.items(): + if not prop_value: + raise ValueError(f"{prop_name} is required in ResourceProperties") + + +@tracer.capture_method +def execute(event, context): + try: + properties = event.get("ResourceProperties", {}) + request_type = event.get("RequestType") + physical_resource_id = event.get("PhysicalResourceId", "unknown") + validate_required_props(request_type, properties) + + config_manager = MCPConfigManager(properties["USE_CASE_CONFIG_TABLE_NAME"]) + validated_config = ( + config_manager.get_mcp_runtime_config(properties["USE_CASE_CONFIG_RECORD_KEY"]) + if request_type != "Delete" + else {} + ) + + runtime_mcp = RuntimeMCP( + config=validated_config, + cognito_user_pool_id=properties.get("COGNITO_USER_POOL_ID"), + runtime_name=properties["MCPAgentCoreName"], + execution_role_arn=properties["EXECUTION_ROLE_ARN"], + table_name=properties["USE_CASE_CONFIG_TABLE_NAME"], + config_key=properties["USE_CASE_CONFIG_RECORD_KEY"], + runtime_id=physical_resource_id, + ) + + if request_type not in ["Create", "Update", "Delete"]: + raise ValueError(f"Unsupported request type: {request_type}") + + # Call create, update or delete based on request type + getattr(runtime_mcp, request_type.lower())() + + response_data = runtime_mcp.to_dict() + + if request_type != "Delete": + config_manager.update_runtime_config( + properties.get("USE_CASE_CONFIG_RECORD_KEY"), + response_data, + ) + response_data["message"] = f"MCP runtime {request_type} completed" + + send_response(event, context, SUCCESS, response_data, runtime_mcp.runtime_id) + + except Exception as e: + error_msg = f"Error: {str(e)}" + send_response(event, context, FAILED, {}, physical_resource_id=physical_resource_id, reason=error_msg) diff --git a/source/lambda/custom-resource/operations/gen_ecr_repo_prefix.py b/source/lambda/custom-resource/operations/gen_ecr_repo_prefix.py new file mode 100644 index 00000000..56ebfc6a --- /dev/null +++ b/source/lambda/custom-resource/operations/gen_ecr_repo_prefix.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json +import re + +from aws_lambda_powertools import Logger, Tracer +from cfn_response import send_response +from operations import operation_types +from operations.operation_types import FAILED, RESOURCE, RESOURCE_PROPERTIES, SUCCESS + +logger = Logger(utc=True) +tracer = Tracer() + + +@tracer.capture_method +def verify_env_setup(event): + if event[RESOURCE_PROPERTIES][RESOURCE] != operation_types.GEN_ECR_REPO_PREFIX: + err_msg = f"Operation type not available or did not match from the request. Expecting operation type to be {operation_types.GEN_ECR_REPO_PREFIX}" + logger.error(f"{err_msg}. Here are the resource properties received {json.dumps(event[RESOURCE_PROPERTIES])}") + raise ValueError(err_msg) + + # Either StackName (for deployment platform) or UseCaseShortId (for standalone) must be provided + if "StackName" not in event[RESOURCE_PROPERTIES] and "UseCaseShortId" not in event[RESOURCE_PROPERTIES]: + err_msg = "Missing required property: either StackName or UseCaseShortId must be provided" + logger.error(f"{err_msg}. Here are the resource properties received {json.dumps(event[RESOURCE_PROPERTIES])}") + raise ValueError(err_msg) + + +@tracer.capture_method +def sanitize_and_truncate_prefix(stack_name: str, max_length: int = 30) -> str: + """ + Sanitize and truncate stack name to create a valid ECR repository prefix. + + ECR repository prefix requirements: + - Max 30 characters + - Lowercase alphanumeric characters, hyphens, underscores, and dots only + - Cannot start or end with special characters + + Args: + stack_name: The CloudFormation stack name + max_length: Maximum allowed length (default 30 for ECR) + + Returns: + Sanitized and truncated repository prefix + """ + # Convert to lowercase and replace invalid characters with hyphens + sanitized = re.sub(r"[^a-z0-9._-]", "-", stack_name.lower()) + + # Remove leading/trailing special characters + sanitized = re.sub(r"^[._-]+", "", sanitized) + sanitized = re.sub(r"[._-]+$", "", sanitized) + + # Ensure it doesn't start with a dot or hyphen (ECR requirement) + sanitized = re.sub(r"^[.-]", "", sanitized) + + # Truncate to max length + if len(sanitized) > max_length: + sanitized = sanitized[:max_length] + # Ensure it doesn't end with a special character after truncation + sanitized = re.sub(r"[._-]+$", "", sanitized) + + # If empty after sanitization, use a default + if not sanitized: + sanitized = "gaab-default" + + logger.info(f"Sanitized stack name '{stack_name}' to ECR prefix '{sanitized}'") + return sanitized + + +@tracer.capture_method +def generate_prefix_from_inputs(event_properties: dict) -> str: + """ + Generate ECR repository prefix based on provided inputs. + + Args: + event_properties: Resource properties from CloudFormation event + + Returns: + Generated ECR repository prefix + """ + if "UseCaseShortId" in event_properties: + # For standalone deployments: gaab-agents-{uuid} + use_case_id = event_properties["UseCaseShortId"] + prefix = f"gaab-agents-{use_case_id}" + logger.info(f"Generated UUID-based prefix: {prefix}") + return sanitize_and_truncate_prefix(prefix) + elif "StackName" in event_properties: + # For deployment platform: use stack name directly + stack_name = event_properties["StackName"] + logger.info(f"Generated stack name-based prefix from: {stack_name}") + return sanitize_and_truncate_prefix(stack_name) + else: + raise ValueError("Neither StackName nor UseCaseShortId provided") + + +@tracer.capture_method +def execute(event, context): + """ + Generate ECR repository prefix from either stack name or use case short ID. + + This operation creates a valid ECR repository prefix that meets AWS ECR naming requirements. + Supports two modes: + 1. Stack name-based (for deployment platform): Uses CloudFormation stack name + 2. UUID-based (for standalone use cases): Uses gaab-agents-{uuid} format + + For 'Create' events, generates the prefix from the provided inputs. + For 'Update' and 'Delete' events, returns empty response with 'SUCCESS' status since + the ECR repository prefix doesn't need to change during stack updates. + + Args: + event (LambdaEvent): An event object received by the lambda function + context (LambdaContext): A context object received by the lambda function + + Raises: + Exception: if there are errors in prefix generation. During the handling of this + exception it also sends a 'FAILED' status to the AWS CloudFormation service. + """ + try: + verify_env_setup(event) + + if event["RequestType"] == "Create": + ecr_repo_prefix = generate_prefix_from_inputs(event[RESOURCE_PROPERTIES]) + + logger.info(f"Generated ECR repository prefix: {ecr_repo_prefix}") + send_response(event, context, SUCCESS, {"EcrRepoPrefix": ecr_repo_prefix}) + + elif event["RequestType"] == "Update" or event["RequestType"] == "Delete": + logger.info(f"{event['RequestType']} operation is a no-op for ECR repository prefix generation") + send_response(event, context, SUCCESS, {}) + + except Exception as ex: + logger.error(f"Error occurred when generating ECR repository prefix. Error is {ex}") + send_response(event, context, FAILED, {}, reason=str(ex)) diff --git a/source/lambda/custom-resource/operations/get_arns_for_inference_profile.py b/source/lambda/custom-resource/operations/get_arns_for_inference_profile.py index 2121df2a..2009cc4a 100644 --- a/source/lambda/custom-resource/operations/get_arns_for_inference_profile.py +++ b/source/lambda/custom-resource/operations/get_arns_for_inference_profile.py @@ -39,24 +39,32 @@ def verify_env_setup(event): @tracer.capture_method -def get_model_arns(inference_profile_identifier): - """This method retrieves the model ARNs from a Bedrock inference profile""" +def get_model_arns(inference_profile_identifiers): + """This method retrieves the model ARNs from a list of Bedrock inference profiles""" bedrock_client = get_service_client("bedrock") + arn_set = set() # Use set for deduplication try: - response = bedrock_client.get_inference_profile(inferenceProfileIdentifier=inference_profile_identifier) - arns = [model["modelArn"] for model in response.get("models", [])] - if "inferenceProfileArn" in response: - arns.append(response["inferenceProfileArn"]) - return ",".join(arns) + for inference_profile_identifier in inference_profile_identifiers: + response = bedrock_client.get_inference_profile(inferenceProfileIdentifier=inference_profile_identifier) + + # Add model ARNs to the set + for model in response.get("models", []): + arn_set.add(model["modelArn"]) + + # Add inference profile ARN if present + if "inferenceProfileArn" in response: + arn_set.add(response["inferenceProfileArn"]) + + return ",".join(arn_set) except Exception as error: - logger.error(f"Error in retrieving model ARNs from inference profile. The error is {error}") + logger.error(f"Error in retrieving model ARNs from inference profiles. The error is {error}") raise error @tracer.capture_method def get_inference_identifier_from_ddb(table_name, record_key): - """This method retrieves the inference profile id from dynamodb table""" + """This method retrieves the inference profile id(s) from dynamodb table""" ddb_client = get_service_client("dynamodb") try: response = ddb_client.get_item(TableName=table_name, Key={LLM_CONFIG_RECORD_FIELD_NAME: {"S": record_key}}) @@ -65,7 +73,37 @@ def get_inference_identifier_from_ddb(table_name, record_key): deserialized_response = {k: deserializer.deserialize(v) for k, v in response.get("Item", {}).items()} if not deserialized_response: return None - return deserialized_response["config"]["LlmParams"]["BedrockLlmParams"]["InferenceProfileId"] + + config = deserialized_response["config"] + inference_profile_ids = set() # Use set for deduplication + + # Always check the top-level LlmParams for inference profile ID + top_level_llm_params = config.get("LlmParams", {}) + top_level_bedrock_params = top_level_llm_params.get("BedrockLlmParams", {}) + top_level_inference_profile_id = top_level_bedrock_params.get("InferenceProfileId") + + if top_level_inference_profile_id: + inference_profile_ids.add(top_level_inference_profile_id) + + # Additional check: if this is a Workflow with agents-as-tools orchestration, also check sub-agents + if ( + config.get("UseCaseType") == "Workflow" + and config.get("WorkflowParams", {}).get("OrchestrationPattern") == "agents-as-tools" + ): + # Get agents from WorkflowParams.AgentsAsToolsParams.Agents + agents = config.get("WorkflowParams", {}).get("AgentsAsToolsParams", {}).get("Agents", []) + + for agent in agents: + # Check if agent has LlmParams.BedrockLlmParams.InferenceProfileId + llm_params = agent.get("LlmParams", {}) + bedrock_params = llm_params.get("BedrockLlmParams", {}) + inference_profile_id = bedrock_params.get("InferenceProfileId") + + if inference_profile_id: + inference_profile_ids.add(inference_profile_id) + + return list(inference_profile_ids) if inference_profile_ids else None + except Exception as error: logger.error(f"Error in retrieving inference profile identifier from DDB. The error is {error}") raise error @@ -90,11 +128,11 @@ def execute(event, context): physical_resource_id = event.get(PHYSICAL_RESOURCE_ID, None) if event["RequestType"] == "Create" or event["RequestType"] == "Update": - inference_profile_identifier = get_inference_identifier_from_ddb( + inference_profile_identifiers = get_inference_identifier_from_ddb( event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_TABLE_NAME], event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_RECORD_KEY], ) - if not inference_profile_identifier: # ddb does not contain inference profile information + if not inference_profile_identifiers: # ddb does not contain inference profile information send_response( event, context, @@ -105,7 +143,7 @@ def execute(event, context): ) return - arns = get_model_arns(inference_profile_identifier) + arns = get_model_arns(inference_profile_identifiers) if not arns: # no arns were returned for the provided inference profile id send_response( event, diff --git a/source/lambda/custom-resource/operations/lambda_version_generator.py b/source/lambda/custom-resource/operations/lambda_version_generator.py new file mode 100644 index 00000000..3e354217 --- /dev/null +++ b/source/lambda/custom-resource/operations/lambda_version_generator.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from aws_lambda_powertools import Logger +from cfn_response import send_response +from helper import get_service_client +from operations.operation_types import FAILED, RESOURCE, RESOURCE_PROPERTIES, SUCCESS + +def verify_env_setup(event): + """ + Verifies that all required environment variables and resource properties are set + """ + if event[RESOURCE_PROPERTIES][RESOURCE] != "LAMBDA_VERSION_GENERATOR": + raise ValueError("Operation type not supported") + + if not event[RESOURCE_PROPERTIES].get("FunctionName"): + raise ValueError("FunctionName has not been passed") + + +def execute(event, context): + try: + request_type = event["RequestType"] + properties = event[RESOURCE_PROPERTIES] + function_name = properties["FunctionName"] + + lambda_client = get_service_client("lambda") + + if request_type in ["Create", "Update"]: + # Always create a new version - let the caller decide when to invoke this + response = lambda_client.publish_version( + FunctionName=function_name, + Description="Lambda Version" + ) + + response_data = { + "VersionArn": response["FunctionArn"], + "VersionNumber": response["Version"] + } + + elif request_type == "Delete": + # No action on delete - versions are retained + response_data = {} + + send_response(event, context, SUCCESS, response_data) + + except Exception as ex: + send_response(event, context, FAILED, {}, reason=str(ex)) + raise ex diff --git a/source/lambda/custom-resource/operations/multimodal_bucket_notifications.py b/source/lambda/custom-resource/operations/multimodal_bucket_notifications.py new file mode 100644 index 00000000..caed91f0 --- /dev/null +++ b/source/lambda/custom-resource/operations/multimodal_bucket_notifications.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json + +import boto3 +from aws_lambda_powertools import Logger +from cfn_response import send_response +from helper import get_service_client +from operations.operation_types import FAILED, RESOURCE_PROPERTIES, SUCCESS +from utils.constants import MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR + +logger = Logger(utc=True) + + +def execute(event, context): + """ + Custom resource operation to enables EventBridge notifications on the S3 bucket by + calling s3:PutBucketNotification. Without this configuration, the S3 bucket will not be able + to send any events to EventBridge which is required to trigger Update Metadata Lambda. + + Args: + event: CloudFormation custom resource event + context: Lambda context object + """ + try: + request_type = event.get("RequestType") + resource_properties = event.get(RESOURCE_PROPERTIES, {}) + + bucket_name = resource_properties.get(MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR) + + if not bucket_name: + raise ValueError(f"{MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR} is required in ResourceProperties") + + logger.info(f"Processing {request_type} for bucket: {bucket_name}") + + s3_client = get_service_client("s3") + + if request_type in ["Create", "Update"]: + logger.info(f"Enabling EventBridge notifications for bucket: {bucket_name}") + s3_client.put_bucket_notification_configuration( + Bucket=bucket_name, NotificationConfiguration={"EventBridgeConfiguration": {}} + ) + + elif request_type == "Delete": + logger.info(f"Disabling EventBridge notifications for bucket: {bucket_name}") + try: + s3_client.put_bucket_notification_configuration(Bucket=bucket_name, NotificationConfiguration={}) + except Exception as e: + # Don't fail deletion if bucket doesn't exist or is already cleaned up + logger.warning(f"Failed to clean up bucket notifications: {str(e)}") + + response_data = {"BucketName": bucket_name, "EventBridgeEnabled": True} + + send_response(event, context, SUCCESS, response_data) + logger.info(f"Successfully processed {request_type} for bucket notifications") + + except Exception as e: + logger.error(f"Error configuring bucket notifications: {str(e)}") + send_response(event, context, FAILED, {}, reason=str(e)) + raise e diff --git a/source/lambda/custom-resource/operations/operation_types.py b/source/lambda/custom-resource/operations/operation_types.py index 51abdacd..d78c5670 100644 --- a/source/lambda/custom-resource/operations/operation_types.py +++ b/source/lambda/custom-resource/operations/operation_types.py @@ -1,14 +1,13 @@ #!/usr/bin/env python # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 - - # list of operation names as constants COPY_TEMPLATE = "COPY_TEMPLATE" COPY_SAMPLE_DOCUMENTS = "COPY_SAMPLE_DOCUMENTS" CW_LOG_RETENTION = "CW_LOG_RETENTION" UPDATE_LLM_CONFIG = "UPDATE_LLM_CONFIG" GEN_UUID = "GEN_UUID" +METRIC = "METRIC" ANONYMOUS_METRIC = "ANONYMOUS_METRIC" COPY_MODEL_INFO = "COPY_MODEL_INFO" WEBCONFIG = "WEBCONFIG" @@ -21,6 +20,16 @@ GEN_DOMAIN_PREFIX = "GEN_DOMAIN_PREFIX" GET_MODEL_RESOURCE_ARNS = "GET_MODEL_RESOURCE_ARNS" REDEPLOY_API = "REDEPLOY_API" +DEPLOY_AGENT_CORE = "DEPLOY_AGENT_CORE" +DEPLOY_AGENT_CORE_MEMORY = "DEPLOY_AGENT_CORE_MEMORY" +DEPLOY_MCP_GATEWAY = "DEPLOY_MCP_GATEWAY" +DEPLOY_MCP_RUNTIME = "DEPLOY_MCP_RUNTIME" +LAMBDA_VERSION_GENERATOR = "LAMBDA_VERSION_GENERATOR" +GEN_ECR_REPO_PREFIX = "GEN_ECR_REPO_PREFIX" +AGENTCORE_OAUTH_CLIENT = "AGENTCORE_OAUTH_CLIENT" +AGENTCORE_OUTBOUND_PERMISSIONS = "AGENTCORE_OUTBOUND_PERMISSIONS" +MULTIMODAL_BUCKET_NOTIFICATIONS = "MULTIMODAL_BUCKET_NOTIFICATIONS" +SLEEP = "SLEEP" # additional constants RESOURCE_PROPERTIES = "ResourceProperties" diff --git a/source/lambda/custom-resource/operations/anonymous_metrics.py b/source/lambda/custom-resource/operations/send_metrics.py similarity index 70% rename from source/lambda/custom-resource/operations/anonymous_metrics.py rename to source/lambda/custom-resource/operations/send_metrics.py index 298333b6..3ee9c470 100644 --- a/source/lambda/custom-resource/operations/anonymous_metrics.py +++ b/source/lambda/custom-resource/operations/send_metrics.py @@ -37,6 +37,7 @@ from utils.data import BuilderMetrics from utils.metrics_schema import MetricsSchema from utils.metrics import push_builder_metrics +from utils.lambda_context_parser import get_invocation_account_id SOLUTION_ID = "SolutionId" VERSION = "Version" @@ -59,8 +60,9 @@ def verify_env_setup(event): ValueError: If any of the keys are not found under event['ResourceProperties], the method throws a ValueError exception """ - if event[RESOURCE_PROPERTIES].get(RESOURCE, None) != operation_types.ANONYMOUS_METRIC: - err_msg = f"Operation type not available or did not match from the request. Expecting operation type to be {operation_types.ANONYMOUS_METRIC}" + resource_type = event[RESOURCE_PROPERTIES].get(RESOURCE, None) + if resource_type not in [operation_types.METRIC, operation_types.ANONYMOUS_METRIC]: + err_msg = f"Operation type not available or did not match from the request. Expecting operation type to be {operation_types.METRIC} or {operation_types.ANONYMOUS_METRIC}" logger.error(f"{err_msg}. Here are the resource properties received {json.dumps(event[RESOURCE_PROPERTIES])}") raise ValueError(err_msg) @@ -133,6 +135,11 @@ def update_metrics_data(metrics_data, config: dict): metrics_data[DEPLOY_UI] = config.get(DEPLOY_UI, None) metrics_data[USE_CASE_TYPE] = config[USE_CASE_TYPE] + # Collecting Provisioned Concurrency value + provisioned_concurrency_value = config.get("ProvisionedConcurrencyValue") + if provisioned_concurrency_value is not None and provisioned_concurrency_value > 0: + metrics_data["ProvisionedConcurrencyValue"] = provisioned_concurrency_value + bedrock_llm_params = metrics_data[LLM_PARAMS].get("BedrockLlmParams", {}) bedrock_llm_params[GUARDRAIL_ENABLED] = bool( bedrock_llm_params @@ -143,20 +150,84 @@ def update_metrics_data(metrics_data, config: dict): # Collecting whether Provisioned Model is provided bedrock_llm_params[PROVISIONED_MODEL_ENABLED] = bool(bedrock_llm_params and bedrock_llm_params.get("ModelArn")) + # Collecting whether Multimodal is enabled + multimodal_params = metrics_data[LLM_PARAMS].get("MultimodalParams", {}) + metrics_data[LLM_PARAMS]["MultimodalEnabled"] = bool( + multimodal_params and multimodal_params.get("MultimodalEnabled") + ) + cognito_params = metrics_data[AUTH_PARAMS].get("CognitoParams", {}) # Updating Auth Params, to make sure we know if user is providing their own user pool or not metrics_data[AUTH_PARAMS][CLIENT_OWNED_USER_POOL] = bool( cognito_params and cognito_params.get("ExistingUserPoolId") ) + + mcp_params = config.get("MCPParams", {}) + if mcp_params.get("GatewayParams"): + target_params = mcp_params["GatewayParams"].get("TargetParams", []) + filtered_targets = [ + { + "TargetType": target.get("TargetType"), + **({ + "OutboundAuthProviderType": target["OutboundAuthParams"]["OutboundAuthProviderType"] + } if target.get("OutboundAuthParams", {}).get("OutboundAuthProviderType") else {}) + } + for target in target_params + ] + + metrics_data["MCPParams"] = { + "MCPType": "Gateway", + "GatewayParams": { + "TargetCount": len(filtered_targets), + "TargetParams": filtered_targets, + }, + } + elif mcp_params.get("RuntimeParams"): + metrics_data["MCPParams"] = {"MCPType": "Runtime", "RuntimeParams": {}} + + agent_builder_params = config.get("AgentBuilderParams", {}) + if agent_builder_params: + memory_config = agent_builder_params.get("MemoryConfig", {}) + tools = agent_builder_params.get("Tools", []) + mcp_servers = agent_builder_params.get("MCPServers", []) + + tool_ids = [tool["ToolId"] for tool in tools if tool.get("ToolId")] + mcp_server_info = [{"Type": server["Type"]} for server in mcp_servers if server.get("Type")] + + metrics_data["AgentBuilderParams"] = { + "MemoryConfig": {"LongTermEnabled": memory_config.get("LongTermEnabled")}, + "BuiltInToolsCount": len(tool_ids), + "BuiltInTools": tool_ids, + "MCPServersCount": len(mcp_server_info), + "MCPServers": mcp_server_info, + } + + workflow_params = config.get("WorkflowParams", {}) + if workflow_params: + memory_config = workflow_params.get("MemoryConfig", {}) + orchestration_pattern = workflow_params.get("OrchestrationPattern") + + filtered_agents = [] + if orchestration_pattern == "agents-as-tools": + agents = workflow_params.get("AgentsAsToolsParams", {}).get("Agents", []) + filtered_agents = [{"Type": agent["UseCaseType"]} for agent in agents if agent.get("UseCaseType")] + + metrics_data["WorkflowParams"] = { + "OrchestrationPattern": orchestration_pattern, + "MemoryConfig": {"LongTermEnabled": memory_config.get("LongTermEnabled")}, + "AgentsCount": len(filtered_agents), + "Agents": filtered_agents, + } + return metrics_data @tracer.capture_method def execute(event, context): - """This method implementation is to support sending anonymous metric to aws solution builder endpoint. On 'Create', 'Update', and 'Delete' + """This method implementation is to support sending metric to aws solution builder endpoint. On 'Create', 'Update', and 'Delete' events this implementation will send configuration details about the deployed stack without any customer specific information. The - 'Resource' property for this implementation is 'ANONYMOUS_METRIC'. Additionally, all data to be pushed as operational metrics should be + 'Resource' property for this implementation is 'METRIC'. Additionally, all data to be pushed as operational metrics should be set directly under 'Properties' in the Custom Resource creation. 'SolutionId' and 'Version' are mandatory resource attributes For 'Create' events, this implementation will add a UUID to the metrics payload using the uuid.uuid4() call to provide a unique identified @@ -202,14 +273,16 @@ def execute(event, context): # it would be excluded from our metrics_payload data metrics_data = MetricsSchema(metrics_data).model_dump(remove_empty=True) + account_id = get_invocation_account_id(context) builder_metrics = BuilderMetrics( event[RESOURCE_PROPERTIES][UUID], event[RESOURCE_PROPERTIES][SOLUTION_ID], event[RESOURCE_PROPERTIES][VERSION], metrics_data, + account_id, ) push_builder_metrics(builder_metrics) send_response(event, context, SUCCESS, {}, physical_resource_id) except Exception as ex: - logger.error(f"Error occurred when sending anonymous metric, Error is {ex}") + logger.error(f"Error occurred when sending metric, Error is {ex}") send_response(event, context, FAILED, {}, physical_resource_id=physical_resource_id, reason=str(ex)) diff --git a/source/lambda/custom-resource/operations/shared.py b/source/lambda/custom-resource/operations/shared.py index 20f413a0..136227e5 100644 --- a/source/lambda/custom-resource/operations/shared.py +++ b/source/lambda/custom-resource/operations/shared.py @@ -3,14 +3,27 @@ # SPDX-License-Identifier: Apache-2.0 import io +import time import zipfile import botocore from aws_lambda_powertools import Logger, Tracer +from botocore.exceptions import ClientError logger = Logger(utc=True) tracer = Tracer() +MAX_RETRIES = 5 +RETRY_DELAY_BASE = 3 +TRANSIENT_ERROR_CODES = [ + "ThrottlingException", + "ServiceUnavailableException", + "InternalServerException", + "RequestTimeoutException", + "TooManyRequestsException", + "ImageNotFoundException", + "RepositoryNotFoundException", +] @tracer.capture_method def get_zip_archive(s3_resource, source_bucket_name, source_prefix): @@ -48,3 +61,86 @@ def get_zip_archive(s3_resource, source_bucket_name, source_prefix): raise error return zip_archive + + +def _calculate_retry_delay(error_code, attempt, base_delay=None): + """Calculate delay for retry based on error type and attempt number.""" + retry_delay_base = base_delay if base_delay is not None else RETRY_DELAY_BASE + if error_code == "AccessDeniedException": + # Use longer delays for IAM propagation (15, 45, 30, 30, 30 seconds capped at 30) + return min(30, 5 * (retry_delay_base**attempt)) + # Cap delay at 30 seconds to prevent extremely long waits + return min(30, retry_delay_base**attempt) + + +def _log_retry_warning(error_code, error_message, attempt, delay, max_retries=None): + """Log appropriate warning message based on error type.""" + max_retry_count = max_retries if max_retries is not None else MAX_RETRIES + if error_code == "AccessDeniedException": + logger.warning( + f"IAM AccessDeniedException on attempt {attempt + 1}/{max_retry_count + 1}: {error_message}. " + f"This is likely due to IAM policy propagation delay. Retrying in {delay} seconds..." + ) + else: + logger.warning( + f"Transient error {error_code} on attempt {attempt + 1}/{max_retry_count + 1}: {error_message}. " + f"Retrying in {delay} seconds..." + ) + + +def _is_retryable_error(error_code, attempt, max_retries=None): + """Check if error is retryable based on error code and attempt count.""" + max_retry_count = max_retries if max_retries is not None else MAX_RETRIES + return error_code in TRANSIENT_ERROR_CODES and attempt < max_retry_count + + +def _handle_client_error(error, attempt, max_retries=None, base_delay=None): + """Handle ClientError with retry logic.""" + error_code = error.response["Error"]["Code"] + error_message = error.response["Error"]["Message"] + max_retry_count = max_retries if max_retries is not None else MAX_RETRIES + + if _is_retryable_error(error_code, attempt, max_retries): + delay = _calculate_retry_delay(error_code, attempt, base_delay) + _log_retry_warning(error_code, error_message, attempt, delay, max_retries) + time.sleep(delay) + return error # Return exception to track as last_exception + + if attempt == max_retry_count: + logger.error(f"Max retries ({max_retry_count}) reached for transient error: {error_code}") + raise error + + +@tracer.capture_method +def retry_with_backoff(func, *args, max_attempts=None, base_delay=None, **kwargs): + """ + Execute a function with exponential backoff retry logic for transient failures. + + Args: + func: Function to execute + *args: Positional arguments for the function + max_attempts: Maximum number of retry attempts (default: MAX_RETRIES) + base_delay: Base delay in seconds for exponential backoff (default: RETRY_DELAY_BASE) + **kwargs: Keyword arguments for the function + + Returns: + Function result on success + + Raises: + ClientError: If all retries are exhausted or non-transient error occurs + Exception: For unexpected errors + """ + max_retries = max_attempts - 1 if max_attempts is not None else MAX_RETRIES + last_exception = None + + for attempt in range(max_retries + 1): + try: + return func(*args, **kwargs) + except ClientError as e: + last_exception = _handle_client_error(e, attempt, max_retries, base_delay) + except Exception as e: + logger.error(f"Non-retryable error in retry_with_backoff: {str(e)}") + raise e + + if last_exception: + raise last_exception diff --git a/source/lambda/custom-resource/operations/sleep.py b/source/lambda/custom-resource/operations/sleep.py new file mode 100644 index 00000000..89559c08 --- /dev/null +++ b/source/lambda/custom-resource/operations/sleep.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json +import uuid +import time + +from aws_lambda_powertools import Logger, Tracer +from cfn_response import send_response +from operations import operation_types +from operations.operation_types import FAILED, PHYSICAL_RESOURCE_ID, RESOURCE, RESOURCE_PROPERTIES, SUCCESS + +logger = Logger(utc=True) +tracer = Tracer() + +# Required keys in the incoming event object +DURATION = "DURATION" +RETURN_EARLY = "RETURN_EARLY" + +# other constants +OPERATION_TYPE = operation_types.SLEEP + + +@tracer.capture_method +def verify_env_setup(event): + """This method verifies if all the necessary properties are correctly set in the event object as received by the lambda function's handler + + Args: + event (LambdaEvent): An event received by the lambda function that is passed by AWS services when invoking the function's handler + + Raises: + ValueError: If any of the properties in the custom resource properties are not set correctly or are not available + """ + if event[RESOURCE_PROPERTIES][RESOURCE] != OPERATION_TYPE: + err_msg = f"Operation type not available or did not match from the request. Expecting operation type to be {OPERATION_TYPE}" + logger.error(f"{err_msg}. Here are the resource properties received {json.dumps(event[RESOURCE_PROPERTIES])}") + raise ValueError(err_msg) + + +@tracer.capture_method +def execute(event, context): + physical_resource_id = event.get(PHYSICAL_RESOURCE_ID, uuid.uuid4().hex[:8]) + + try: + verify_env_setup(event) + duration = int(event[RESOURCE_PROPERTIES].get(DURATION, "30")) + return_early = event[RESOURCE_PROPERTIES].get(RETURN_EARLY, 'false').lower() == 'true' + + logger.info(f"Sleeping for {duration} seconds") + logger.info(f"Return early is set to {return_early}") + if(return_early): + send_response(event, context, SUCCESS, {}, physical_resource_id) + + if event["RequestType"] in ["Create", "Delete"]: + time.sleep(duration) + else: + logger.info( + f"Operation type not set or cannot be handled. This is a no-op operation. Received operation type is {event['RequestType']}" + ) + if(not return_early): + send_response(event, context, SUCCESS, {}, physical_resource_id) + except Exception as ex: + logger.error(f"Error occurred when attempting to sleep, passing to avoid blocking CloudFormation. Error is {ex}") + send_response(event, context, FAILED, {}, physical_resource_id=physical_resource_id, reason=str(ex)) diff --git a/source/lambda/custom-resource/poetry.lock b/source/lambda/custom-resource/poetry.lock index 1d77d741..627ec1ec 100644 --- a/source/lambda/custom-resource/poetry.lock +++ b/source/lambda/custom-resource/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "aws-lambda-powertools" @@ -47,27 +47,27 @@ wrapt = "*" [[package]] name = "boto3" -version = "1.40.15" +version = "1.40.53" description = "The AWS SDK for Python" optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "boto3-1.40.15-py3-none-any.whl", hash = "sha256:52b8aa78c9906c4e49dcec6817c041df33c9825073bf66e7df8fc00afbe47b4b"}, - {file = "boto3-1.40.15.tar.gz", hash = "sha256:271b379ce5ad35ca82f1009e917528a182eed0e2de197ccffb0c51acadec5c79"}, + {file = "boto3-1.40.53-py3-none-any.whl", hash = "sha256:65ded2738de259bd9030feb4772ec7b53d5b661befa88ce836117c3df8265309"}, + {file = "boto3-1.40.53.tar.gz", hash = "sha256:3f8cf56034cfde20dd0abca01349f64ab65734d90c3fbf7357e8a84cb64a62ee"}, ] [package.dependencies] -botocore = ">=1.40.15,<1.41.0" +botocore = ">=1.40.53,<1.41.0" jmespath = ">=0.7.1,<2.0.0" -s3transfer = ">=0.13.0,<0.14.0" +s3transfer = ">=0.14.0,<0.15.0" [package.extras] crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-layer" -version = "3.0.7" +version = "4.0.0" description = "Layer for AWS Boto3 python SDK" optional = false python-versions = "^3.13" @@ -76,8 +76,8 @@ files = [] develop = true [package.dependencies] -boto3 = "1.40.15" -botocore = "1.40.15" +boto3 = "1.40.53" +botocore = "1.40.53" urllib3 = "2.5.0" [package.source] @@ -86,14 +86,14 @@ url = "../layers/aws_boto3" [[package]] name = "botocore" -version = "1.40.15" +version = "1.40.53" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "botocore-1.40.15-py3-none-any.whl", hash = "sha256:b364e039d2b67e509cfb089cb39b295251e48a60cc68fd591defbe10b44d83f9"}, - {file = "botocore-1.40.15.tar.gz", hash = "sha256:4960800e4c5a7b43db22550979c22f5a324cbaf75ef494bbb2cf400ef1e6aca7"}, + {file = "botocore-1.40.53-py3-none-any.whl", hash = "sha256:840322b0af4be7a6e2effddb4eb388053c25af0618f627f37d8b03cc1edbc928"}, + {file = "botocore-1.40.53.tar.gz", hash = "sha256:4ebb9e6648c4896d3f0cdda5ff30b5de9a83aeb591be89a16f98cc5ee3cd371c"}, ] [package.dependencies] @@ -106,96 +106,113 @@ crt = ["awscrt (==0.27.6)"] [[package]] name = "certifi" -version = "2025.8.3" +version = "2025.10.5" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.7" groups = ["test"] files = [ - {file = "certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5"}, - {file = "certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407"}, + {file = "certifi-2025.10.5-py3-none-any.whl", hash = "sha256:0f212c2744a9bb6de0c56639a6f68afe01ecd92d91f14ae897c4fe7bbeeef0de"}, + {file = "certifi-2025.10.5.tar.gz", hash = "sha256:47c09d31ccf2acf0be3f701ea53595ee7e0b8fa08801c6624be771df09ae7b43"}, ] [[package]] name = "cffi" -version = "1.17.1" +version = "2.0.0" description = "Foreign Function Interface for Python calling C code." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["test"] markers = "platform_python_implementation != \"PyPy\"" files = [ - {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, - {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, - {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, - {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, - {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, - {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, - {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, - {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, - {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, - {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, - {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, - {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, - {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, - {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, - {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, - {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, - {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, - {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, - {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, - {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, - {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, - {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, - {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, - {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, - {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, - {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, - {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, - {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, + {file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"}, + {file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb"}, + {file = "cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a"}, + {file = "cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743"}, + {file = "cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5"}, + {file = "cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5"}, + {file = "cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187"}, + {file = "cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18"}, + {file = "cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5"}, + {file = "cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b"}, + {file = "cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27"}, + {file = "cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75"}, + {file = "cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1"}, + {file = "cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f"}, + {file = "cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25"}, + {file = "cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4"}, + {file = "cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e"}, + {file = "cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6"}, + {file = "cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322"}, + {file = "cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a"}, + {file = "cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9"}, + {file = "cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529"}, ] [package.dependencies] -pycparser = "*" +pycparser = {version = "*", markers = "implementation_name != \"PyPy\""} [[package]] name = "charset-normalizer" @@ -301,100 +318,116 @@ files = [ [[package]] name = "coverage" -version = "7.10.4" +version = "7.10.7" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "coverage-7.10.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d92d6edb0ccafd20c6fbf9891ca720b39c2a6a4b4a6f9cf323ca2c986f33e475"}, - {file = "coverage-7.10.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7202da14dc0236884fcc45665ffb2d79d4991a53fbdf152ab22f69f70923cc22"}, - {file = "coverage-7.10.4-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ada418633ae24ec8d0fcad5efe6fc7aa3c62497c6ed86589e57844ad04365674"}, - {file = "coverage-7.10.4-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b828e33eca6c3322adda3b5884456f98c435182a44917ded05005adfa1415500"}, - {file = "coverage-7.10.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:802793ba397afcfdbe9f91f89d65ae88b958d95edc8caf948e1f47d8b6b2b606"}, - {file = "coverage-7.10.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d0b23512338c54101d3bf7a1ab107d9d75abda1d5f69bc0887fd079253e4c27e"}, - {file = "coverage-7.10.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f36b7dcf72d06a8c5e2dd3aca02be2b1b5db5f86404627dff834396efce958f2"}, - {file = "coverage-7.10.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fce316c367a1dc2c411821365592eeb335ff1781956d87a0410eae248188ba51"}, - {file = "coverage-7.10.4-cp310-cp310-win32.whl", hash = "sha256:8c5dab29fc8070b3766b5fc85f8d89b19634584429a2da6d42da5edfadaf32ae"}, - {file = "coverage-7.10.4-cp310-cp310-win_amd64.whl", hash = "sha256:4b0d114616f0fccb529a1817457d5fb52a10e106f86c5fb3b0bd0d45d0d69b93"}, - {file = "coverage-7.10.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:05d5f98ec893d4a2abc8bc5f046f2f4367404e7e5d5d18b83de8fde1093ebc4f"}, - {file = "coverage-7.10.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9267efd28f8994b750d171e58e481e3bbd69e44baed540e4c789f8e368b24b88"}, - {file = "coverage-7.10.4-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:4456a039fdc1a89ea60823d0330f1ac6f97b0dbe9e2b6fb4873e889584b085fb"}, - {file = "coverage-7.10.4-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c2bfbd2a9f7e68a21c5bd191be94bfdb2691ac40d325bac9ef3ae45ff5c753d9"}, - {file = "coverage-7.10.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ab7765f10ae1df7e7fe37de9e64b5a269b812ee22e2da3f84f97b1c7732a0d8"}, - {file = "coverage-7.10.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a09b13695166236e171ec1627ff8434b9a9bae47528d0ba9d944c912d33b3d2"}, - {file = "coverage-7.10.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5c9e75dfdc0167d5675e9804f04a56b2cf47fb83a524654297000b578b8adcb7"}, - {file = "coverage-7.10.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c751261bfe6481caba15ec005a194cb60aad06f29235a74c24f18546d8377df0"}, - {file = "coverage-7.10.4-cp311-cp311-win32.whl", hash = "sha256:051c7c9e765f003c2ff6e8c81ccea28a70fb5b0142671e4e3ede7cebd45c80af"}, - {file = "coverage-7.10.4-cp311-cp311-win_amd64.whl", hash = "sha256:1a647b152f10be08fb771ae4a1421dbff66141e3d8ab27d543b5eb9ea5af8e52"}, - {file = "coverage-7.10.4-cp311-cp311-win_arm64.whl", hash = "sha256:b09b9e4e1de0d406ca9f19a371c2beefe3193b542f64a6dd40cfcf435b7d6aa0"}, - {file = "coverage-7.10.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a1f0264abcabd4853d4cb9b3d164adbf1565da7dab1da1669e93f3ea60162d79"}, - {file = "coverage-7.10.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:536cbe6b118a4df231b11af3e0f974a72a095182ff8ec5f4868c931e8043ef3e"}, - {file = "coverage-7.10.4-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9a4c0d84134797b7bf3f080599d0cd501471f6c98b715405166860d79cfaa97e"}, - {file = "coverage-7.10.4-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7c155fc0f9cee8c9803ea0ad153ab6a3b956baa5d4cd993405dc0b45b2a0b9e0"}, - {file = "coverage-7.10.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a5f2ab6e451d4b07855d8bcf063adf11e199bff421a4ba57f5bb95b7444ca62"}, - {file = "coverage-7.10.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:685b67d99b945b0c221be0780c336b303a7753b3e0ec0d618c795aada25d5e7a"}, - {file = "coverage-7.10.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0c079027e50c2ae44da51c2e294596cbc9dbb58f7ca45b30651c7e411060fc23"}, - {file = "coverage-7.10.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3749aa72b93ce516f77cf5034d8e3c0dfd45c6e8a163a602ede2dc5f9a0bb927"}, - {file = "coverage-7.10.4-cp312-cp312-win32.whl", hash = "sha256:fecb97b3a52fa9bcd5a7375e72fae209088faf671d39fae67261f37772d5559a"}, - {file = "coverage-7.10.4-cp312-cp312-win_amd64.whl", hash = "sha256:26de58f355626628a21fe6a70e1e1fad95702dafebfb0685280962ae1449f17b"}, - {file = "coverage-7.10.4-cp312-cp312-win_arm64.whl", hash = "sha256:67e8885408f8325198862bc487038a4980c9277d753cb8812510927f2176437a"}, - {file = "coverage-7.10.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b8e1d2015d5dfdbf964ecef12944c0c8c55b885bb5c0467ae8ef55e0e151233"}, - {file = "coverage-7.10.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:25735c299439018d66eb2dccf54f625aceb78645687a05f9f848f6e6c751e169"}, - {file = "coverage-7.10.4-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:715c06cb5eceac4d9b7cdf783ce04aa495f6aff657543fea75c30215b28ddb74"}, - {file = "coverage-7.10.4-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e017ac69fac9aacd7df6dc464c05833e834dc5b00c914d7af9a5249fcccf07ef"}, - {file = "coverage-7.10.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bad180cc40b3fccb0f0e8c702d781492654ac2580d468e3ffc8065e38c6c2408"}, - {file = "coverage-7.10.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:becbdcd14f685fada010a5f792bf0895675ecf7481304fe159f0cd3f289550bd"}, - {file = "coverage-7.10.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0b485ca21e16a76f68060911f97ebbe3e0d891da1dbbce6af7ca1ab3f98b9097"}, - {file = "coverage-7.10.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6c1d098ccfe8e1e0a1ed9a0249138899948afd2978cbf48eb1cc3fcd38469690"}, - {file = "coverage-7.10.4-cp313-cp313-win32.whl", hash = "sha256:8630f8af2ca84b5c367c3df907b1706621abe06d6929f5045fd628968d421e6e"}, - {file = "coverage-7.10.4-cp313-cp313-win_amd64.whl", hash = "sha256:f68835d31c421736be367d32f179e14ca932978293fe1b4c7a6a49b555dff5b2"}, - {file = "coverage-7.10.4-cp313-cp313-win_arm64.whl", hash = "sha256:6eaa61ff6724ca7ebc5326d1fae062d85e19b38dd922d50903702e6078370ae7"}, - {file = "coverage-7.10.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:702978108876bfb3d997604930b05fe769462cc3000150b0e607b7b444f2fd84"}, - {file = "coverage-7.10.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e8f978e8c5521d9c8f2086ac60d931d583fab0a16f382f6eb89453fe998e2484"}, - {file = "coverage-7.10.4-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:df0ac2ccfd19351411c45e43ab60932b74472e4648b0a9edf6a3b58846e246a9"}, - {file = "coverage-7.10.4-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:73a0d1aaaa3796179f336448e1576a3de6fc95ff4f07c2d7251d4caf5d18cf8d"}, - {file = "coverage-7.10.4-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:873da6d0ed6b3ffc0bc01f2c7e3ad7e2023751c0d8d86c26fe7322c314b031dc"}, - {file = "coverage-7.10.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c6446c75b0e7dda5daa876a1c87b480b2b52affb972fedd6c22edf1aaf2e00ec"}, - {file = "coverage-7.10.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6e73933e296634e520390c44758d553d3b573b321608118363e52113790633b9"}, - {file = "coverage-7.10.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:52073d4b08d2cb571234c8a71eb32af3c6923149cf644a51d5957ac128cf6aa4"}, - {file = "coverage-7.10.4-cp313-cp313t-win32.whl", hash = "sha256:e24afb178f21f9ceb1aefbc73eb524769aa9b504a42b26857243f881af56880c"}, - {file = "coverage-7.10.4-cp313-cp313t-win_amd64.whl", hash = "sha256:be04507ff1ad206f4be3d156a674e3fb84bbb751ea1b23b142979ac9eebaa15f"}, - {file = "coverage-7.10.4-cp313-cp313t-win_arm64.whl", hash = "sha256:f3e3ff3f69d02b5dad67a6eac68cc9c71ae343b6328aae96e914f9f2f23a22e2"}, - {file = "coverage-7.10.4-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a59fe0af7dd7211ba595cf7e2867458381f7e5d7b4cffe46274e0b2f5b9f4eb4"}, - {file = "coverage-7.10.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3a6c35c5b70f569ee38dc3350cd14fdd0347a8b389a18bb37538cc43e6f730e6"}, - {file = "coverage-7.10.4-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:acb7baf49f513554c4af6ef8e2bd6e8ac74e6ea0c7386df8b3eb586d82ccccc4"}, - {file = "coverage-7.10.4-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a89afecec1ed12ac13ed203238b560cbfad3522bae37d91c102e690b8b1dc46c"}, - {file = "coverage-7.10.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:480442727f464407d8ade6e677b7f21f3b96a9838ab541b9a28ce9e44123c14e"}, - {file = "coverage-7.10.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a89bf193707f4a17f1ed461504031074d87f035153239f16ce86dfb8f8c7ac76"}, - {file = "coverage-7.10.4-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:3ddd912c2fc440f0fb3229e764feec85669d5d80a988ff1b336a27d73f63c818"}, - {file = "coverage-7.10.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8a538944ee3a42265e61c7298aeba9ea43f31c01271cf028f437a7b4075592cf"}, - {file = "coverage-7.10.4-cp314-cp314-win32.whl", hash = "sha256:fd2e6002be1c62476eb862b8514b1ba7e7684c50165f2a8d389e77da6c9a2ebd"}, - {file = "coverage-7.10.4-cp314-cp314-win_amd64.whl", hash = "sha256:ec113277f2b5cf188d95fb66a65c7431f2b9192ee7e6ec9b72b30bbfb53c244a"}, - {file = "coverage-7.10.4-cp314-cp314-win_arm64.whl", hash = "sha256:9744954bfd387796c6a091b50d55ca7cac3d08767795b5eec69ad0f7dbf12d38"}, - {file = "coverage-7.10.4-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:5af4829904dda6aabb54a23879f0f4412094ba9ef153aaa464e3c1b1c9bc98e6"}, - {file = "coverage-7.10.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7bba5ed85e034831fac761ae506c0644d24fd5594727e174b5a73aff343a7508"}, - {file = "coverage-7.10.4-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d57d555b0719834b55ad35045de6cc80fc2b28e05adb6b03c98479f9553b387f"}, - {file = "coverage-7.10.4-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ba62c51a72048bb1ea72db265e6bd8beaabf9809cd2125bbb5306c6ce105f214"}, - {file = "coverage-7.10.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0acf0c62a6095f07e9db4ec365cc58c0ef5babb757e54745a1aa2ea2a2564af1"}, - {file = "coverage-7.10.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e1033bf0f763f5cf49ffe6594314b11027dcc1073ac590b415ea93463466deec"}, - {file = "coverage-7.10.4-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:92c29eff894832b6a40da1789b1f252305af921750b03ee4535919db9179453d"}, - {file = "coverage-7.10.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:822c4c830989c2093527e92acd97be4638a44eb042b1bdc0e7a278d84a070bd3"}, - {file = "coverage-7.10.4-cp314-cp314t-win32.whl", hash = "sha256:e694d855dac2e7cf194ba33653e4ba7aad7267a802a7b3fc4347d0517d5d65cd"}, - {file = "coverage-7.10.4-cp314-cp314t-win_amd64.whl", hash = "sha256:efcc54b38ef7d5bfa98050f220b415bc5bb3d432bd6350a861cf6da0ede2cdcd"}, - {file = "coverage-7.10.4-cp314-cp314t-win_arm64.whl", hash = "sha256:6f3a3496c0fa26bfac4ebc458747b778cff201c8ae94fa05e1391bab0dbc473c"}, - {file = "coverage-7.10.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:48fd4d52600c2a9d5622e52dfae674a7845c5e1dceaf68b88c99feb511fbcfd6"}, - {file = "coverage-7.10.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:56217b470d09d69e6b7dcae38200f95e389a77db801cb129101697a4553b18b6"}, - {file = "coverage-7.10.4-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:44ac3f21a6e28c5ff7f7a47bca5f87885f6a1e623e637899125ba47acd87334d"}, - {file = "coverage-7.10.4-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3387739d72c84d17b4d2f7348749cac2e6700e7152026912b60998ee9a40066b"}, - {file = "coverage-7.10.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3f111ff20d9a6348e0125be892608e33408dd268f73b020940dfa8511ad05503"}, - {file = "coverage-7.10.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:01a852f0a9859734b018a3f483cc962d0b381d48d350b1a0c47d618c73a0c398"}, - {file = "coverage-7.10.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:225111dd06759ba4e37cee4c0b4f3df2b15c879e9e3c37bf986389300b9917c3"}, - {file = "coverage-7.10.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2178d4183bd1ba608f0bb12e71e55838ba1b7dbb730264f8b08de9f8ef0c27d0"}, - {file = "coverage-7.10.4-cp39-cp39-win32.whl", hash = "sha256:93d175fe81913aee7a6ea430abbdf2a79f1d9fd451610e12e334e4fe3264f563"}, - {file = "coverage-7.10.4-cp39-cp39-win_amd64.whl", hash = "sha256:2221a823404bb941c7721cf0ef55ac6ee5c25d905beb60c0bba5e5e85415d353"}, - {file = "coverage-7.10.4-py3-none-any.whl", hash = "sha256:065d75447228d05121e5c938ca8f0e91eed60a1eb2d1258d42d5084fecfc3302"}, - {file = "coverage-7.10.4.tar.gz", hash = "sha256:25f5130af6c8e7297fd14634955ba9e1697f47143f289e2a23284177c0061d27"}, + {file = "coverage-7.10.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fc04cc7a3db33664e0c2d10eb8990ff6b3536f6842c9590ae8da4c614b9ed05a"}, + {file = "coverage-7.10.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e201e015644e207139f7e2351980feb7040e6f4b2c2978892f3e3789d1c125e5"}, + {file = "coverage-7.10.7-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:240af60539987ced2c399809bd34f7c78e8abe0736af91c3d7d0e795df633d17"}, + {file = "coverage-7.10.7-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8421e088bc051361b01c4b3a50fd39a4b9133079a2229978d9d30511fd05231b"}, + {file = "coverage-7.10.7-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6be8ed3039ae7f7ac5ce058c308484787c86e8437e72b30bf5e88b8ea10f3c87"}, + {file = "coverage-7.10.7-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e28299d9f2e889e6d51b1f043f58d5f997c373cc12e6403b90df95b8b047c13e"}, + {file = "coverage-7.10.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c4e16bd7761c5e454f4efd36f345286d6f7c5fa111623c355691e2755cae3b9e"}, + {file = "coverage-7.10.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b1c81d0e5e160651879755c9c675b974276f135558cf4ba79fee7b8413a515df"}, + {file = "coverage-7.10.7-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:606cc265adc9aaedcc84f1f064f0e8736bc45814f15a357e30fca7ecc01504e0"}, + {file = "coverage-7.10.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:10b24412692df990dbc34f8fb1b6b13d236ace9dfdd68df5b28c2e39cafbba13"}, + {file = "coverage-7.10.7-cp310-cp310-win32.whl", hash = "sha256:b51dcd060f18c19290d9b8a9dd1e0181538df2ce0717f562fff6cf74d9fc0b5b"}, + {file = "coverage-7.10.7-cp310-cp310-win_amd64.whl", hash = "sha256:3a622ac801b17198020f09af3eaf45666b344a0d69fc2a6ffe2ea83aeef1d807"}, + {file = "coverage-7.10.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a609f9c93113be646f44c2a0256d6ea375ad047005d7f57a5c15f614dc1b2f59"}, + {file = "coverage-7.10.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:65646bb0359386e07639c367a22cf9b5bf6304e8630b565d0626e2bdf329227a"}, + {file = "coverage-7.10.7-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5f33166f0dfcce728191f520bd2692914ec70fac2713f6bf3ce59c3deacb4699"}, + {file = "coverage-7.10.7-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:35f5e3f9e455bb17831876048355dca0f758b6df22f49258cb5a91da23ef437d"}, + {file = "coverage-7.10.7-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4da86b6d62a496e908ac2898243920c7992499c1712ff7c2b6d837cc69d9467e"}, + {file = "coverage-7.10.7-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6b8b09c1fad947c84bbbc95eca841350fad9cbfa5a2d7ca88ac9f8d836c92e23"}, + {file = "coverage-7.10.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4376538f36b533b46f8971d3a3e63464f2c7905c9800db97361c43a2b14792ab"}, + {file = "coverage-7.10.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:121da30abb574f6ce6ae09840dae322bef734480ceafe410117627aa54f76d82"}, + {file = "coverage-7.10.7-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:88127d40df529336a9836870436fc2751c339fbaed3a836d42c93f3e4bd1d0a2"}, + {file = "coverage-7.10.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ba58bbcd1b72f136080c0bccc2400d66cc6115f3f906c499013d065ac33a4b61"}, + {file = "coverage-7.10.7-cp311-cp311-win32.whl", hash = "sha256:972b9e3a4094b053a4e46832b4bc829fc8a8d347160eb39d03f1690316a99c14"}, + {file = "coverage-7.10.7-cp311-cp311-win_amd64.whl", hash = "sha256:a7b55a944a7f43892e28ad4bc0561dfd5f0d73e605d1aa5c3c976b52aea121d2"}, + {file = "coverage-7.10.7-cp311-cp311-win_arm64.whl", hash = "sha256:736f227fb490f03c6488f9b6d45855f8e0fd749c007f9303ad30efab0e73c05a"}, + {file = "coverage-7.10.7-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7bb3b9ddb87ef7725056572368040c32775036472d5a033679d1fa6c8dc08417"}, + {file = "coverage-7.10.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:18afb24843cbc175687225cab1138c95d262337f5473512010e46831aa0c2973"}, + {file = "coverage-7.10.7-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:399a0b6347bcd3822be369392932884b8216d0944049ae22925631a9b3d4ba4c"}, + {file = "coverage-7.10.7-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:314f2c326ded3f4b09be11bc282eb2fc861184bc95748ae67b360ac962770be7"}, + {file = "coverage-7.10.7-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c41e71c9cfb854789dee6fc51e46743a6d138b1803fab6cb860af43265b42ea6"}, + {file = "coverage-7.10.7-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc01f57ca26269c2c706e838f6422e2a8788e41b3e3c65e2f41148212e57cd59"}, + {file = "coverage-7.10.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a6442c59a8ac8b85812ce33bc4d05bde3fb22321fa8294e2a5b487c3505f611b"}, + {file = "coverage-7.10.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:78a384e49f46b80fb4c901d52d92abe098e78768ed829c673fbb53c498bef73a"}, + {file = "coverage-7.10.7-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:5e1e9802121405ede4b0133aa4340ad8186a1d2526de5b7c3eca519db7bb89fb"}, + {file = "coverage-7.10.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d41213ea25a86f69efd1575073d34ea11aabe075604ddf3d148ecfec9e1e96a1"}, + {file = "coverage-7.10.7-cp312-cp312-win32.whl", hash = "sha256:77eb4c747061a6af8d0f7bdb31f1e108d172762ef579166ec84542f711d90256"}, + {file = "coverage-7.10.7-cp312-cp312-win_amd64.whl", hash = "sha256:f51328ffe987aecf6d09f3cd9d979face89a617eacdaea43e7b3080777f647ba"}, + {file = "coverage-7.10.7-cp312-cp312-win_arm64.whl", hash = "sha256:bda5e34f8a75721c96085903c6f2197dc398c20ffd98df33f866a9c8fd95f4bf"}, + {file = "coverage-7.10.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:981a651f543f2854abd3b5fcb3263aac581b18209be49863ba575de6edf4c14d"}, + {file = "coverage-7.10.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:73ab1601f84dc804f7812dc297e93cd99381162da39c47040a827d4e8dafe63b"}, + {file = "coverage-7.10.7-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a8b6f03672aa6734e700bbcd65ff050fd19cddfec4b031cc8cf1c6967de5a68e"}, + {file = "coverage-7.10.7-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10b6ba00ab1132a0ce4428ff68cf50a25efd6840a42cdf4239c9b99aad83be8b"}, + {file = "coverage-7.10.7-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c79124f70465a150e89340de5963f936ee97097d2ef76c869708c4248c63ca49"}, + {file = "coverage-7.10.7-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:69212fbccdbd5b0e39eac4067e20a4a5256609e209547d86f740d68ad4f04911"}, + {file = "coverage-7.10.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7ea7c6c9d0d286d04ed3541747e6597cbe4971f22648b68248f7ddcd329207f0"}, + {file = "coverage-7.10.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b9be91986841a75042b3e3243d0b3cb0b2434252b977baaf0cd56e960fe1e46f"}, + {file = "coverage-7.10.7-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:b281d5eca50189325cfe1f365fafade89b14b4a78d9b40b05ddd1fc7d2a10a9c"}, + {file = "coverage-7.10.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:99e4aa63097ab1118e75a848a28e40d68b08a5e19ce587891ab7fd04475e780f"}, + {file = "coverage-7.10.7-cp313-cp313-win32.whl", hash = "sha256:dc7c389dce432500273eaf48f410b37886be9208b2dd5710aaf7c57fd442c698"}, + {file = "coverage-7.10.7-cp313-cp313-win_amd64.whl", hash = "sha256:cac0fdca17b036af3881a9d2729a850b76553f3f716ccb0360ad4dbc06b3b843"}, + {file = "coverage-7.10.7-cp313-cp313-win_arm64.whl", hash = "sha256:4b6f236edf6e2f9ae8fcd1332da4e791c1b6ba0dc16a2dc94590ceccb482e546"}, + {file = "coverage-7.10.7-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a0ec07fd264d0745ee396b666d47cef20875f4ff2375d7c4f58235886cc1ef0c"}, + {file = "coverage-7.10.7-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:dd5e856ebb7bfb7672b0086846db5afb4567a7b9714b8a0ebafd211ec7ce6a15"}, + {file = "coverage-7.10.7-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f57b2a3c8353d3e04acf75b3fed57ba41f5c0646bbf1d10c7c282291c97936b4"}, + {file = "coverage-7.10.7-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1ef2319dd15a0b009667301a3f84452a4dc6fddfd06b0c5c53ea472d3989fbf0"}, + {file = "coverage-7.10.7-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:83082a57783239717ceb0ad584de3c69cf581b2a95ed6bf81ea66034f00401c0"}, + {file = "coverage-7.10.7-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:50aa94fb1fb9a397eaa19c0d5ec15a5edd03a47bf1a3a6111a16b36e190cff65"}, + {file = "coverage-7.10.7-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2120043f147bebb41c85b97ac45dd173595ff14f2a584f2963891cbcc3091541"}, + {file = "coverage-7.10.7-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2fafd773231dd0378fdba66d339f84904a8e57a262f583530f4f156ab83863e6"}, + {file = "coverage-7.10.7-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:0b944ee8459f515f28b851728ad224fa2d068f1513ef6b7ff1efafeb2185f999"}, + {file = "coverage-7.10.7-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4b583b97ab2e3efe1b3e75248a9b333bd3f8b0b1b8e5b45578e05e5850dfb2c2"}, + {file = "coverage-7.10.7-cp313-cp313t-win32.whl", hash = "sha256:2a78cd46550081a7909b3329e2266204d584866e8d97b898cd7fb5ac8d888b1a"}, + {file = "coverage-7.10.7-cp313-cp313t-win_amd64.whl", hash = "sha256:33a5e6396ab684cb43dc7befa386258acb2d7fae7f67330ebb85ba4ea27938eb"}, + {file = "coverage-7.10.7-cp313-cp313t-win_arm64.whl", hash = "sha256:86b0e7308289ddde73d863b7683f596d8d21c7d8664ce1dee061d0bcf3fbb4bb"}, + {file = "coverage-7.10.7-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b06f260b16ead11643a5a9f955bd4b5fd76c1a4c6796aeade8520095b75de520"}, + {file = "coverage-7.10.7-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:212f8f2e0612778f09c55dd4872cb1f64a1f2b074393d139278ce902064d5b32"}, + {file = "coverage-7.10.7-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3445258bcded7d4aa630ab8296dea4d3f15a255588dd535f980c193ab6b95f3f"}, + {file = "coverage-7.10.7-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bb45474711ba385c46a0bfe696c695a929ae69ac636cda8f532be9e8c93d720a"}, + {file = "coverage-7.10.7-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:813922f35bd800dca9994c5971883cbc0d291128a5de6b167c7aa697fcf59360"}, + {file = "coverage-7.10.7-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:93c1b03552081b2a4423091d6fb3787265b8f86af404cff98d1b5342713bdd69"}, + {file = "coverage-7.10.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:cc87dd1b6eaf0b848eebb1c86469b9f72a1891cb42ac7adcfbce75eadb13dd14"}, + {file = "coverage-7.10.7-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:39508ffda4f343c35f3236fe8d1a6634a51f4581226a1262769d7f970e73bffe"}, + {file = "coverage-7.10.7-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:925a1edf3d810537c5a3abe78ec5530160c5f9a26b1f4270b40e62cc79304a1e"}, + {file = "coverage-7.10.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2c8b9a0636f94c43cd3576811e05b89aa9bc2d0a85137affc544ae5cb0e4bfbd"}, + {file = "coverage-7.10.7-cp314-cp314-win32.whl", hash = "sha256:b7b8288eb7cdd268b0304632da8cb0bb93fadcfec2fe5712f7b9cc8f4d487be2"}, + {file = "coverage-7.10.7-cp314-cp314-win_amd64.whl", hash = "sha256:1ca6db7c8807fb9e755d0379ccc39017ce0a84dcd26d14b5a03b78563776f681"}, + {file = "coverage-7.10.7-cp314-cp314-win_arm64.whl", hash = "sha256:097c1591f5af4496226d5783d036bf6fd6cd0cbc132e071b33861de756efb880"}, + {file = "coverage-7.10.7-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:a62c6ef0d50e6de320c270ff91d9dd0a05e7250cac2a800b7784bae474506e63"}, + {file = "coverage-7.10.7-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:9fa6e4dd51fe15d8738708a973470f67a855ca50002294852e9571cdbd9433f2"}, + {file = "coverage-7.10.7-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:8fb190658865565c549b6b4706856d6a7b09302c797eb2cf8e7fe9dabb043f0d"}, + {file = "coverage-7.10.7-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:affef7c76a9ef259187ef31599a9260330e0335a3011732c4b9effa01e1cd6e0"}, + {file = "coverage-7.10.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6e16e07d85ca0cf8bafe5f5d23a0b850064e8e945d5677492b06bbe6f09cc699"}, + {file = "coverage-7.10.7-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:03ffc58aacdf65d2a82bbeb1ffe4d01ead4017a21bfd0454983b88ca73af94b9"}, + {file = "coverage-7.10.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1b4fd784344d4e52647fd7857b2af5b3fbe6c239b0b5fa63e94eb67320770e0f"}, + {file = "coverage-7.10.7-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:0ebbaddb2c19b71912c6f2518e791aa8b9f054985a0769bdb3a53ebbc765c6a1"}, + {file = "coverage-7.10.7-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:a2d9a3b260cc1d1dbdb1c582e63ddcf5363426a1a68faa0f5da28d8ee3c722a0"}, + {file = "coverage-7.10.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a3cc8638b2480865eaa3926d192e64ce6c51e3d29c849e09d5b4ad95efae5399"}, + {file = "coverage-7.10.7-cp314-cp314t-win32.whl", hash = "sha256:67f8c5cbcd3deb7a60b3345dffc89a961a484ed0af1f6f73de91705cc6e31235"}, + {file = "coverage-7.10.7-cp314-cp314t-win_amd64.whl", hash = "sha256:e1ed71194ef6dea7ed2d5cb5f7243d4bcd334bfb63e59878519be558078f848d"}, + {file = "coverage-7.10.7-cp314-cp314t-win_arm64.whl", hash = "sha256:7fe650342addd8524ca63d77b2362b02345e5f1a093266787d210c70a50b471a"}, + {file = "coverage-7.10.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fff7b9c3f19957020cac546c70025331113d2e61537f6e2441bc7657913de7d3"}, + {file = "coverage-7.10.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bc91b314cef27742da486d6839b677b3f2793dfe52b51bbbb7cf736d5c29281c"}, + {file = "coverage-7.10.7-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:567f5c155eda8df1d3d439d40a45a6a5f029b429b06648235f1e7e51b522b396"}, + {file = "coverage-7.10.7-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2af88deffcc8a4d5974cf2d502251bc3b2db8461f0b66d80a449c33757aa9f40"}, + {file = "coverage-7.10.7-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c7315339eae3b24c2d2fa1ed7d7a38654cba34a13ef19fbcb9425da46d3dc594"}, + {file = "coverage-7.10.7-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:912e6ebc7a6e4adfdbb1aec371ad04c68854cd3bf3608b3514e7ff9062931d8a"}, + {file = "coverage-7.10.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f49a05acd3dfe1ce9715b657e28d138578bc40126760efb962322c56e9ca344b"}, + {file = "coverage-7.10.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:cce2109b6219f22ece99db7644b9622f54a4e915dad65660ec435e89a3ea7cc3"}, + {file = "coverage-7.10.7-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:f3c887f96407cea3916294046fc7dab611c2552beadbed4ea901cbc6a40cc7a0"}, + {file = "coverage-7.10.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:635adb9a4507c9fd2ed65f39693fa31c9a3ee3a8e6dc64df033e8fdf52a7003f"}, + {file = "coverage-7.10.7-cp39-cp39-win32.whl", hash = "sha256:5a02d5a850e2979b0a014c412573953995174743a3f7fa4ea5a6e9a3c5617431"}, + {file = "coverage-7.10.7-cp39-cp39-win_amd64.whl", hash = "sha256:c134869d5ffe34547d14e174c866fd8fe2254918cc0a95e99052903bc1543e07"}, + {file = "coverage-7.10.7-py3-none-any.whl", hash = "sha256:f7941f6f2fe6dd6807a1208737b8a0cbcf1cc6d7b07d24998ad2d63590868260"}, + {file = "coverage-7.10.7.tar.gz", hash = "sha256:f4ab143ab113be368a3e9b795f9cd7906c5ef407d6173fe9675a902e1fffc239"}, ] [package.extras] @@ -462,7 +495,7 @@ test-randomorder = ["pytest-randomly"] [[package]] name = "custom-boto3-init" -version = "3.0.7" +version = "4.0.0" description = "Initialize boto config for AWS Python SDK with custom configuration" optional = false python-versions = "^3.13" @@ -572,73 +605,101 @@ drafts = ["pycryptodome"] [[package]] name = "markupsafe" -version = "3.0.2" +version = "3.0.3" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, - {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, - {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, - {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, - {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, - {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, - {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, - {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, + {file = "markupsafe-3.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559"}, + {file = "markupsafe-3.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1"}, + {file = "markupsafe-3.0.3-cp310-cp310-win32.whl", hash = "sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a"}, + {file = "markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b"}, + {file = "markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12"}, + {file = "markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe"}, + {file = "markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15d939a21d546304880945ca1ecb8a039db6b4dc49b2c5a400387cdae6a62e26"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f71a396b3bf33ecaa1626c255855702aca4d3d9fea5e051b41ac59a9c1c41edc"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f4b68347f8c5eab4a13419215bdfd7f8c9b19f2b25520968adfad23eb0ce60c"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8fc20152abba6b83724d7ff268c249fa196d8259ff481f3b1476383f8f24e42"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:949b8d66bc381ee8b007cd945914c721d9aba8e27f71959d750a46f7c282b20b"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:3537e01efc9d4dccdf77221fb1cb3b8e1a38d5428920e0657ce299b20324d758"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:591ae9f2a647529ca990bc681daebdd52c8791ff06c2bfa05b65163e28102ef2"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a320721ab5a1aba0a233739394eb907f8c8da5c98c9181d1161e77a0c8e36f2d"}, + {file = "markupsafe-3.0.3-cp39-cp39-win32.whl", hash = "sha256:df2449253ef108a379b8b5d6b43f4b1a8e81a061d6537becd5582fba5f9196d7"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:7c3fb7d25180895632e5d3148dbdc29ea38ccb7fd210aa27acbd1201a1902c6e"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:38664109c14ffc9e7437e86b4dceb442b0096dfe3541d7864d9cbe1da4cf36c8"}, + {file = "markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698"}, ] [[package]] @@ -734,15 +795,15 @@ testing = ["coverage", "pytest", "pytest-benchmark"] [[package]] name = "pycparser" -version = "2.22" +version = "2.23" description = "C parser in Python" optional = false python-versions = ">=3.8" groups = ["test"] -markers = "platform_python_implementation != \"PyPy\"" +markers = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\"" files = [ - {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, - {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, + {file = "pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934"}, + {file = "pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2"}, ] [[package]] @@ -901,14 +962,14 @@ files = [ [[package]] name = "requests" -version = "2.32.4" +version = "2.32.5" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["test"] files = [ - {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, - {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, + {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, + {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"}, ] [package.dependencies] @@ -943,14 +1004,14 @@ tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asy [[package]] name = "s3transfer" -version = "0.13.1" +version = "0.14.0" description = "An Amazon S3 Transfer Manager" optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "s3transfer-0.13.1-py3-none-any.whl", hash = "sha256:a981aa7429be23fe6dfc13e80e4020057cbab622b08c0315288758d67cabc724"}, - {file = "s3transfer-0.13.1.tar.gz", hash = "sha256:c3fdba22ba1bd367922f27ec8032d6a1cf5f10c934fb5d68cf60fd5a23d936cf"}, + {file = "s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456"}, + {file = "s3transfer-0.14.0.tar.gz", hash = "sha256:eff12264e7c8b4985074ccce27a3b38a485bb7f7422cc8046fee9be4983e4125"}, ] [package.dependencies] @@ -994,14 +1055,14 @@ files = [ [[package]] name = "typing-extensions" -version = "4.14.1" +version = "4.15.0" description = "Backported and Experimental Type Hints for Python 3.9+" optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76"}, - {file = "typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36"}, + {file = "typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"}, + {file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"}, ] [[package]] @@ -1133,17 +1194,20 @@ files = [ [[package]] name = "xmltodict" -version = "0.14.2" +version = "1.0.2" description = "Makes working with XML feel like you are working with JSON" optional = false -python-versions = ">=3.6" +python-versions = ">=3.9" groups = ["test"] files = [ - {file = "xmltodict-0.14.2-py2.py3-none-any.whl", hash = "sha256:20cc7d723ed729276e808f26fb6b3599f786cbc37e06c65e192ba77c40f20aac"}, - {file = "xmltodict-0.14.2.tar.gz", hash = "sha256:201e7c28bb210e374999d1dde6382923ab0ed1a8a5faeece48ab525b7810a553"}, + {file = "xmltodict-1.0.2-py3-none-any.whl", hash = "sha256:62d0fddb0dcbc9f642745d8bbf4d81fd17d6dfaec5a15b5c1876300aad92af0d"}, + {file = "xmltodict-1.0.2.tar.gz", hash = "sha256:54306780b7c2175a3967cad1db92f218207e5bc1aba697d887807c0fb68b7649"}, ] +[package.extras] +test = ["pytest", "pytest-cov"] + [metadata] lock-version = "2.1" python-versions = "^3.13" -content-hash = "c5fdc453505e6143c8c2c7c41230dfe641b8547201bea1f11990a27a04abb832" +content-hash = "85356d30a94c4a80532f6c5d70f8922d63a9f8cdf0a45cd00ce39af3ee2b8345" diff --git a/source/lambda/custom-resource/pyproject.toml b/source/lambda/custom-resource/pyproject.toml index fd8ded86..d9868995 100644 --- a/source/lambda/custom-resource/pyproject.toml +++ b/source/lambda/custom-resource/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "custom-resource" -version = "3.0.7" +version = "4.0.0" authors = [ "Amazon Web Services" ] description = "Perform specific operations triggered by AWS CloudFormation events" packages = [ @@ -28,7 +28,6 @@ pytest-env = "1.1.5" PyYAML = "6.0.2" setuptools = "80.8.0" pyjwt = "^2.10.1" -requests="2.32.4" urllib3="2.5.0" boto3-layer = { path = "../layers/aws_boto3/", develop = true } custom_boto3_init = { path = "../layers/custom_boto3_init", develop = true } diff --git a/source/lambda/custom-resource/test/fixtures/agentcore_oauth_client_events.py b/source/lambda/custom-resource/test/fixtures/agentcore_oauth_client_events.py new file mode 100644 index 00000000..3eeb11e5 --- /dev/null +++ b/source/lambda/custom-resource/test/fixtures/agentcore_oauth_client_events.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from operations import operation_types +from operations.operation_types import PHYSICAL_RESOURCE_ID, RESOURCE, RESOURCE_PROPERTIES + + +@pytest.fixture +def lambda_event(aws_credentials, custom_resource_event): + custom_resource_event[RESOURCE_PROPERTIES] = { + RESOURCE: operation_types.AGENTCORE_OAUTH_CLIENT, + "CLIENT_ID": "test-client-id", + "CLIENT_SECRET": "test-client-secret", + "DISCOVERY_URL": "https://example.com/.well-known/openid_configuration", + "PROVIDER_NAME": "test-provider", + "AWS_REGION": "us-east-1" + } + custom_resource_event[PHYSICAL_RESOURCE_ID] = "fake_physical_resource_id" + + yield custom_resource_event diff --git a/source/lambda/custom-resource/test/fixtures/agentcore_outbound_permissions_events.py b/source/lambda/custom-resource/test/fixtures/agentcore_outbound_permissions_events.py new file mode 100644 index 00000000..ae8c382e --- /dev/null +++ b/source/lambda/custom-resource/test/fixtures/agentcore_outbound_permissions_events.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from operations import operation_types +from operations.operation_types import PHYSICAL_RESOURCE_ID, RESOURCE, RESOURCE_PROPERTIES + + +@pytest.fixture +def lambda_event(custom_resource_event): + custom_resource_event[RESOURCE_PROPERTIES] = { + RESOURCE: operation_types.AGENTCORE_OUTBOUND_PERMISSIONS, + "USE_CASE_ID": "test-use-case-123", + "USE_CASE_CLIENT_ID": "fake-client-id", + "USE_CASE_CONFIG_TABLE_NAME": "fake-table-name", + "USE_CASE_CONFIG_RECORD_KEY": "fake-record-key" + } + custom_resource_event[PHYSICAL_RESOURCE_ID] = "fake_physical_resource_id" + + yield custom_resource_event diff --git a/source/lambda/custom-resource/test/fixtures/gen_ecr_repo_prefix_events.py b/source/lambda/custom-resource/test/fixtures/gen_ecr_repo_prefix_events.py new file mode 100644 index 00000000..c658c4a9 --- /dev/null +++ b/source/lambda/custom-resource/test/fixtures/gen_ecr_repo_prefix_events.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from operations import operation_types +from operations.operation_types import PHYSICAL_RESOURCE_ID, RESOURCE, RESOURCE_PROPERTIES + + +@pytest.fixture +def lambda_event(aws_credentials, custom_resource_event): + custom_resource_event[RESOURCE_PROPERTIES] = { + RESOURCE: operation_types.GEN_ECR_REPO_PREFIX, + "StackName": "DeploymentPlatformStack" + } + custom_resource_event[PHYSICAL_RESOURCE_ID] = "fake_physical_resource_id" + + yield custom_resource_event \ No newline at end of file diff --git a/source/lambda/custom-resource/test/fixtures/lambda_version_generator_events.py b/source/lambda/custom-resource/test/fixtures/lambda_version_generator_events.py new file mode 100644 index 00000000..d5d169d9 --- /dev/null +++ b/source/lambda/custom-resource/test/fixtures/lambda_version_generator_events.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from operations import operation_types +from operations.operation_types import PHYSICAL_RESOURCE_ID, RESOURCE, RESOURCE_PROPERTIES + + +@pytest.fixture +def lambda_event(aws_credentials, custom_resource_event): + custom_resource_event[RESOURCE_PROPERTIES] = { + RESOURCE: operation_types.LAMBDA_VERSION_GENERATOR, + "FunctionName": "my-function" + } + custom_resource_event[PHYSICAL_RESOURCE_ID] = "fake_physical_resource_id" + + yield custom_resource_event + \ No newline at end of file diff --git a/source/lambda/custom-resource/test/fixtures/anonymous_metrics_events.py b/source/lambda/custom-resource/test/fixtures/metrics_events.py similarity index 50% rename from source/lambda/custom-resource/test/fixtures/anonymous_metrics_events.py rename to source/lambda/custom-resource/test/fixtures/metrics_events.py index 5782b557..c91de29c 100644 --- a/source/lambda/custom-resource/test/fixtures/anonymous_metrics_events.py +++ b/source/lambda/custom-resource/test/fixtures/metrics_events.py @@ -77,6 +77,125 @@ def llm_config_value_with_auth(): yield config +@pytest.fixture +def llm_config_value_with_multimodal(): + config = { + "LlmParams": { + "ModelProvider": "Bedrock", + "BedrockLlmParams": {"ModelId": "fakemodel"}, + "MultimodalParams": { + "MultimodalEnabled": True, + }, + }, + "UseCaseType": "AgentBuilder", + } + yield config + + +@pytest.fixture +def llm_config_value_with_mcp_gateway(): + config = { + "LlmParams": { + "ModelProvider": "Bedrock", + "BedrockLlmParams": {"ModelId": "fakemodel"}, + }, + "UseCaseType": "MCPServer", + "MCPParams": { + "GatewayParams": { + "TargetParams": [ + { + "TargetType": "smithyModel", + }, + { + "TargetType": "openApiSchema", + "OutboundAuthParams": { + "OutboundAuthProviderArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test", + "OutboundAuthProviderType": "API_KEY", + }, + }, + { + "TargetType": "openApiSchema", + "OutboundAuthParams": { + "OutboundAuthProviderArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/oauth", + "OutboundAuthProviderType": "OAUTH", + }, + }, + ], + } + }, + } + yield config + + +@pytest.fixture +def llm_config_value_with_mcp_runtime(): + config = { + "LlmParams": { + "ModelProvider": "Bedrock", + "BedrockLlmParams": {"ModelId": "fakemodel"}, + }, + "UseCaseType": "MCPServer", + "MCPParams": { + "RuntimeParams": { + "RuntimeName": "test-runtime", + "RuntimeId": "runtime-123", + } + }, + } + yield config + + +@pytest.fixture +def llm_config_value_with_agent_builder(): + config = { + "LlmParams": { + "ModelProvider": "Bedrock", + "BedrockLlmParams": {"ModelId": "fakemodel"}, + }, + "UseCaseType": "AgentBuilder", + "AgentBuilderParams": { + "MemoryConfig": {"LongTermEnabled": False}, + "MCPServers": [ + { + "Type": "runtime", + } + ], + "Tools": [ + {"ToolId": "calculator"}, + {"ToolId": "current_time"}, + {"ToolId": "environment"}, + ], + }, + } + yield config + + +@pytest.fixture +def llm_config_value_with_workflow(): + config = { + "LlmParams": { + "ModelProvider": "Bedrock", + "BedrockLlmParams": {"ModelId": "fakemodel"}, + }, + "UseCaseType": "Workflow", + "WorkflowParams": { + "AgentsAsToolsParams": { + "Agents": [ + { + "UseCaseType": "AgentBuilder", + }, + { + "UseCaseType": "AgentBuilder", + }, + ] + }, + "MemoryConfig": {"LongTermEnabled": False}, + "OrchestrationPattern": "agents-as-tools", + }, + } + yield config + + @pytest.fixture def llm_config_value_text_with_no_rag(): config = { @@ -104,12 +223,25 @@ def llm_config_value_text_with_no_rag(): yield config +@pytest.fixture +def llm_config_value_with_provisioned_concurrency(): + config = { + "LlmParams": { + "ModelProvider": "Bedrock", + "BedrockLlmParams": {"ModelId": "fakemodel"}, + }, + "UseCaseType": "Text", + "ProvisionedConcurrencyValue": 10, + } + yield config + + @pytest.fixture def lambda_events(aws_credentials, custom_resource_event): events_list = [] payloads = [ { - RESOURCE: operation_types.ANONYMOUS_METRIC, + RESOURCE: operation_types.METRIC, "SolutionId": "SO0999", "Version": "v9.9.9", "ServiceToken": "arn:aws:lambda:us-east-1:123456789012:function:fakefunction:1", @@ -118,21 +250,21 @@ def lambda_events(aws_credentials, custom_resource_event): UUID: "fakeuuid", }, { - RESOURCE: operation_types.ANONYMOUS_METRIC, + RESOURCE: operation_types.METRIC, "SolutionId": "SO0999", "Version": "v9.9.9", "ServiceToken": "arn:aws:lambda:us-east-1:123456789012:function:fakefunction:1", UUID: "fakeuuid", }, { - RESOURCE: operation_types.ANONYMOUS_METRIC, + RESOURCE: operation_types.METRIC, "SolutionId": "SO0999", "Version": "v9.9.9", "ServiceToken": "arn:aws:lambda:us-east-1:123456789012:function:fakefunction:1", UUID: "fakeuuid", }, { - RESOURCE: operation_types.ANONYMOUS_METRIC, + RESOURCE: operation_types.METRIC, "SolutionId": "SO0999", "Version": "v9.9.9", "ServiceToken": "arn:aws:lambda:us-east-1:123456789012:function:fakefunction:1", @@ -141,7 +273,7 @@ def lambda_events(aws_credentials, custom_resource_event): UUID: "fakeuuid", }, { - RESOURCE: operation_types.ANONYMOUS_METRIC, + RESOURCE: operation_types.METRIC, "SolutionId": "SO0999", "Version": "v9.9.9", "ServiceToken": "arn:aws:lambda:us-east-1:123456789012:function:fakefunction:1", @@ -149,13 +281,31 @@ def lambda_events(aws_credentials, custom_resource_event): USE_CASE_CONFIG_RECORD_KEY: "fake_ddb_table_hash_key_3", # Second mock table to simulate config value with no knowledge base UUID: "fakeuuid", }, + { + RESOURCE: operation_types.METRIC, + "SolutionId": "SO0999", + "Version": "v9.9.9", + "ServiceToken": "arn:aws:lambda:us-east-1:123456789012:function:fakefunction:1", + USE_CASE_CONFIG_TABLE_NAME: "fake_ddb_table", + USE_CASE_CONFIG_RECORD_KEY: "fake_ddb_table_hash_key_4", + UUID: "fakeuuid", + }, { RESOURCE: operation_types.ANONYMOUS_METRIC, "SolutionId": "SO0999", "Version": "v9.9.9", "ServiceToken": "arn:aws:lambda:us-east-1:123456789012:function:fakefunction:1", USE_CASE_CONFIG_TABLE_NAME: "fake_ddb_table", - USE_CASE_CONFIG_RECORD_KEY: "fake_ddb_table_hash_key_4", # Second mock table to simulate config value with no knowledge base + USE_CASE_CONFIG_RECORD_KEY: "fake_ddb_table_hash_key_8", + UUID: "fakeuuid", + }, + { + RESOURCE: operation_types.ANONYMOUS_METRIC, + "SolutionId": "SO0999", + "Version": "v9.9.9", + "ServiceToken": "arn:aws:lambda:us-east-1:123456789012:function:fakefunction:1", + USE_CASE_CONFIG_TABLE_NAME: "fake_ddb_table", + USE_CASE_CONFIG_RECORD_KEY: "fake_ddb_table_hash_key_9", UUID: "fakeuuid", }, ] @@ -170,7 +320,17 @@ def lambda_events(aws_credentials, custom_resource_event): @pytest.fixture(autouse=True) def setup_config_ddb( - ddb, llm_config_value, llm_config_value_text_with_no_rag, llm_config_value_with_auth, llm_config_value_with_agent + ddb, + llm_config_value, + llm_config_value_text_with_no_rag, + llm_config_value_with_auth, + llm_config_value_with_agent, + llm_config_value_with_multimodal, + llm_config_value_with_mcp_gateway, + llm_config_value_with_mcp_runtime, + llm_config_value_with_agent_builder, + llm_config_value_with_workflow, + llm_config_value_with_provisioned_concurrency, ): table = ddb.create_table( TableName="fake_ddb_table", @@ -213,4 +373,52 @@ def setup_config_ddb( }, ) + table.put_item( + TableName="fake_ddb_table", + Item={ + USE_CASE_CONFIG_RECORD_KEY_ATTRIBUTE_NAME: "fake_ddb_table_hash_key_5", + "config": llm_config_value_with_multimodal, + }, + ) + + table.put_item( + TableName="fake_ddb_table", + Item={ + USE_CASE_CONFIG_RECORD_KEY_ATTRIBUTE_NAME: "fake_ddb_table_hash_key_6", + "config": llm_config_value_with_mcp_gateway, + }, + ) + + table.put_item( + TableName="fake_ddb_table", + Item={ + USE_CASE_CONFIG_RECORD_KEY_ATTRIBUTE_NAME: "fake_ddb_table_hash_key_7", + "config": llm_config_value_with_mcp_runtime, + }, + ) + + table.put_item( + TableName="fake_ddb_table", + Item={ + USE_CASE_CONFIG_RECORD_KEY_ATTRIBUTE_NAME: "fake_ddb_table_hash_key_8", + "config": llm_config_value_with_agent_builder, + }, + ) + + table.put_item( + TableName="fake_ddb_table", + Item={ + USE_CASE_CONFIG_RECORD_KEY_ATTRIBUTE_NAME: "fake_ddb_table_hash_key_9", + "config": llm_config_value_with_workflow, + }, + ) + + table.put_item( + TableName="fake_ddb_table", + Item={ + USE_CASE_CONFIG_RECORD_KEY_ATTRIBUTE_NAME: "fake_ddb_table_hash_key_10", + "config": llm_config_value_with_provisioned_concurrency, + }, + ) + yield ddb diff --git a/source/lambda/custom-resource/test/operations/test_agentcore_oauth_client.py b/source/lambda/custom-resource/test/operations/test_agentcore_oauth_client.py new file mode 100644 index 00000000..dcf4d3c9 --- /dev/null +++ b/source/lambda/custom-resource/test/operations/test_agentcore_oauth_client.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from unittest.mock import Mock, patch +from operations.agentcore_oauth_client import execute, verify_env_setup, create, delete, AgentCoreIdentityError +from operations.operation_types import RESOURCE_PROPERTIES +from test.fixtures.agentcore_oauth_client_events import lambda_event + + +class TestAgentcoreOauthClient: + + def test_verify_env_setup_success(self, lambda_event): + verify_env_setup(lambda_event) + + def test_verify_env_setup_missing_field(self, lambda_event): + lambda_event[RESOURCE_PROPERTIES]["CLIENT_ID"] = "" + with pytest.raises(ValueError, match="CLIENT_ID has not been passed"): + verify_env_setup(lambda_event) + + @patch('operations.agentcore_oauth_client.get_service_client') + def test_create_success(self, mock_get_service_client, lambda_event, mock_lambda_context): + mock_client = Mock() + mock_client.create_oauth2_credential_provider.return_value = {} + mock_get_service_client.return_value = mock_client + + result = create(lambda_event, mock_lambda_context) + + mock_get_service_client.assert_called_once_with("bedrock-agentcore-control") + mock_client.create_oauth2_credential_provider.assert_called_once_with( + name="test-provider", + credentialProviderVendor="CustomOauth2", + oauth2ProviderConfigInput={ + "customOauth2ProviderConfig": { + "oauthDiscovery": { + "discoveryUrl": "https://example.com/.well-known/openid_configuration" + }, + "clientId": "test-client-id", + "clientSecret": "test-client-secret", + } + } + ) + + @patch('operations.agentcore_oauth_client.get_service_client') + def test_create_failure(self, mock_get_service_client, lambda_event, mock_lambda_context): + mock_client = Mock() + mock_client.create_oauth2_credential_provider.side_effect = Exception("API Error") + mock_get_service_client.return_value = mock_client + + with pytest.raises(AgentCoreIdentityError, match="Failed to create OAuth2 provider"): + create(lambda_event, mock_lambda_context) + + @patch('operations.agentcore_oauth_client.get_service_client') + def test_delete_success(self, mock_get_service_client, lambda_event, mock_lambda_context): + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + delete(lambda_event, mock_lambda_context) + + mock_get_service_client.assert_called_once_with("bedrock-agentcore-control") + mock_client.delete_oauth2_credential_provider.assert_called_once_with(name="test-provider") + + @patch('operations.agentcore_oauth_client.get_service_client') + def test_delete_failure(self, mock_get_service_client, lambda_event, mock_lambda_context): + mock_client = Mock() + mock_client.delete_oauth2_credential_provider.side_effect = Exception("API Error") + mock_get_service_client.return_value = mock_client + + with pytest.raises(AgentCoreIdentityError, match="Failed to delete OAuth2 provider"): + delete(lambda_event, mock_lambda_context) + + @patch('operations.agentcore_oauth_client.send_response') + @patch('operations.agentcore_oauth_client.create') + def test_execute_create_success(self, mock_create, mock_send_response, lambda_event, mock_lambda_context): + lambda_event["RequestType"] = "Create" + + execute(lambda_event, mock_lambda_context) + + mock_create.assert_called_once() + mock_send_response.assert_called_once_with( + lambda_event, mock_lambda_context, "SUCCESS", {}, "fake_physical_resource_id" + ) + + @patch('operations.agentcore_oauth_client.send_response') + @patch('operations.agentcore_oauth_client.delete') + def test_execute_delete_success(self, mock_delete, mock_send_response, lambda_event, mock_lambda_context): + lambda_event["RequestType"] = "Delete" + + execute(lambda_event, mock_lambda_context) + + mock_delete.assert_called_once() + mock_send_response.assert_called_once_with( + lambda_event, mock_lambda_context, "SUCCESS", {}, "fake_physical_resource_id" + ) + + @patch('operations.agentcore_oauth_client.send_response') + def test_execute_update_noop(self, mock_send_response, lambda_event, mock_lambda_context): + lambda_event["RequestType"] = "Update" + + execute(lambda_event, mock_lambda_context) + + mock_send_response.assert_called_once_with( + lambda_event, mock_lambda_context, "SUCCESS", {}, "fake_physical_resource_id" + ) + + @patch('operations.agentcore_oauth_client.send_response') + @patch('operations.agentcore_oauth_client.create') + def test_execute_failure(self, mock_create, mock_send_response, lambda_event, mock_lambda_context): + lambda_event["RequestType"] = "Create" + mock_create.side_effect = Exception("Test error") + + execute(lambda_event, mock_lambda_context) + + mock_send_response.assert_called_once() + args = mock_send_response.call_args + assert args[0][2] == "FAILED" + assert "Test error" in args[1]["reason"] diff --git a/source/lambda/custom-resource/test/operations/test_agentcore_outbound_permissions.py b/source/lambda/custom-resource/test/operations/test_agentcore_outbound_permissions.py new file mode 100644 index 00000000..f5d9babd --- /dev/null +++ b/source/lambda/custom-resource/test/operations/test_agentcore_outbound_permissions.py @@ -0,0 +1,307 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from unittest.mock import Mock, patch +from operations.agentcore_outbound_permissions import ( + execute, + verify_env_setup, + create, + update, + delete, + _extract_properties, + _manage_permissions, + get_usecase_config, + get_mcp_servers, +) +from operations.operation_types import RESOURCE_PROPERTIES +from test.fixtures.agentcore_outbound_permissions_events import lambda_event +from utils.data import MCPServerData +from utils.constants import EntityType + + +class TestAgentcoreOutboundPermissions: + def test_verify_env_setup_success(self, lambda_event): + verify_env_setup(lambda_event) + + def test_verify_env_setup_missing_use_case_id(self, lambda_event): + lambda_event[RESOURCE_PROPERTIES]["USE_CASE_ID"] = "" + with pytest.raises(ValueError, match="USE_CASE_ID has not been passed"): + verify_env_setup(lambda_event) + + def test_verify_env_setup_missing_client_id(self, lambda_event): + lambda_event[RESOURCE_PROPERTIES]["USE_CASE_CLIENT_ID"] = "" + with pytest.raises(ValueError, match="USE_CASE_CLIENT_ID has not been passed"): + verify_env_setup(lambda_event) + + def test_extract_properties(self, lambda_event): + use_case_id, client_id, table_name, record_key = _extract_properties(lambda_event) + assert use_case_id == "test-use-case-123" + assert client_id == "fake-client-id" + assert table_name == "fake-table-name" + assert record_key == "fake-record-key" + + def test_manage_permissions_add(self): + mock_auth_manager = Mock() + mcp_servers = [ + MCPServerData( + EntityType.RUNTIME.value, + "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/test-runtime/invocations", + "use-case-1", + "name-1", + "123456789012", + ), + MCPServerData( + EntityType.GATEWAY.value, + "https://test.gateway.bedrock-agentcore.us-east-1", + "use-case-2", + "name-2", + "123456789012", + ), + ] + + result = _manage_permissions(mock_auth_manager, mcp_servers, "add") + + assert len(result) == 2 + assert mock_auth_manager.add_permission.call_count == 2 + + def test_manage_permissions_remove(self): + mock_auth_manager = Mock() + mcp_servers = [ + MCPServerData( + EntityType.RUNTIME.value, + "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/test-runtime/invocations", + "use-case-1", + "name-1", + "123456789012", + ) + ] + + result = _manage_permissions(mock_auth_manager, mcp_servers, "remove") + + assert len(result) == 1 + mock_auth_manager.remove_permission.assert_called_once() + + @patch("operations.agentcore_outbound_permissions.get_mcp_servers") + @patch("operations.agentcore_outbound_permissions.AuthManager") + def test_create(self, mock_auth_manager_class, mock_get_mcp_servers, lambda_event): + mock_auth_manager = Mock() + mock_auth_manager_class.return_value = mock_auth_manager + mock_mcp_servers = [Mock()] + mock_mcp_servers[0].agentcore_id = "test-id" + mock_get_mcp_servers.return_value = mock_mcp_servers + + result = create(lambda_event, Mock()) + + assert result == ["test-id"] + mock_auth_manager.add_permission.assert_called_once() + + @patch("operations.agentcore_outbound_permissions.get_mcp_servers") + @patch("operations.agentcore_outbound_permissions.AuthManager") + def test_update(self, mock_auth_manager_class, mock_get_mcp_servers, lambda_event): + lambda_event["OldResourceProperties"] = { + "USE_CASE_CONFIG_RECORD_KEY": "old-key", + "USE_CASE_CONFIG_TABLE_NAME": "old-table", + } + + mock_auth_manager = Mock() + mock_auth_manager_class.return_value = mock_auth_manager + + # Mock current and old servers + current_server = Mock() + current_server.agentcore_id = "current-id" + old_server = Mock() + old_server.agentcore_id = "old-id" + + mock_get_mcp_servers.side_effect = [[current_server], [old_server]] + + added, removed = update(lambda_event, Mock()) + + assert added == ["current-id"] + assert removed == ["old-id"] + + @patch("operations.agentcore_outbound_permissions.get_mcp_servers") + @patch("operations.agentcore_outbound_permissions.AuthManager") + def test_delete(self, mock_auth_manager_class, mock_get_mcp_servers, lambda_event): + mock_auth_manager = Mock() + mock_auth_manager_class.return_value = mock_auth_manager + mock_mcp_servers = [Mock()] + mock_mcp_servers[0].agentcore_id = "test-id" + mock_get_mcp_servers.return_value = mock_mcp_servers + + result = delete(lambda_event, Mock()) + + assert result == ["test-id"] + mock_auth_manager.remove_permission.assert_called_once() + + @patch("operations.agentcore_outbound_permissions.get_service_resource") + def test_get_usecase_config(self, mock_get_service_resource): + mock_table = Mock() + mock_get_service_resource.return_value.Table.return_value = mock_table + mock_table.get_item.return_value = {"Item": {"config": {"test": "data"}}} + + result = get_usecase_config("test-table", "test-key") + + assert result == {"test": "data"} + + @patch("operations.agentcore_outbound_permissions.get_usecase_config") + @patch("operations.agentcore_outbound_permissions.get_invocation_account_id") + def test_get_mcp_servers_agent_builder(self, mock_get_account_id, mock_get_config): + mock_get_account_id.return_value = "123456789012" + mock_get_config.return_value = { + "AgentBuilderParams": { + "MCPServers": [ + { + "Type": "runtime", + "Url": "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/test-runtime/invocations", + "UseCaseId": "test-use-case", + "UseCaseName": "test-name", + } + ] + } + } + + result = get_mcp_servers("test-table", "test-key", Mock()) + + assert len(result) == 1 + assert result[0].type == "runtime" + assert result[0].use_case_id == "test-use-case" + + @patch("operations.agentcore_outbound_permissions.get_usecase_config") + @patch("operations.agentcore_outbound_permissions.get_invocation_account_id") + def test_get_mcp_servers_workflow(self, mock_get_account_id, mock_get_config): + mock_get_account_id.return_value = "123456789012" + mock_get_config.return_value = { + "WorkflowParams": { + "AgentsAsToolsParams": { + "Agents": [ + { + "UseCaseId": "agent-1", + "UseCaseName": "Agent 1", + "AgentBuilderParams": { + "MCPServers": [ + { + "Type": "gateway", + "Url": "https://gaab-mcp-c8ed4e33-eipz5dmpyt.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp", + "UseCaseId": "mcp-use-case-1", + "UseCaseName": "MCP Server 1", + } + ] + }, + }, + { + "UseCaseId": "agent-2", + "UseCaseName": "Agent 2", + "AgentBuilderParams": { + "MCPServers": [ + { + "Type": "runtime", + "Url": "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agentcore%3Aus-east-1%3A123456789012%3Aruntime%2Ftest-runtime/invocations?qualifier=DEFAULT", + "UseCaseId": "mcp-use-case-2", + "UseCaseName": "MCP Server 2", + } + ] + }, + }, + ] + } + } + } + + result = get_mcp_servers("test-table", "test-key", Mock()) + + assert len(result) == 2 + assert result[0].type == "gateway" + assert result[0].use_case_id == "mcp-use-case-1" + assert result[1].type == "runtime" + assert result[1].use_case_id == "mcp-use-case-2" + + @patch("operations.agentcore_outbound_permissions.get_usecase_config") + @patch("operations.agentcore_outbound_permissions.get_invocation_account_id") + def test_get_mcp_servers_workflow_mixed(self, mock_get_account_id, mock_get_config): + """Test workflow with agents that have and don't have MCP servers""" + mock_get_account_id.return_value = "123456789012" + mock_get_config.return_value = { + "WorkflowParams": { + "AgentsAsToolsParams": { + "Agents": [ + { + "UseCaseId": "agent-1", + "UseCaseName": "Agent 1", + "AgentBuilderParams": { + "MCPServers": [ + { + "Type": "gateway", + "Url": "https://gaab-mcp-89e05838-b8anlovrme.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp", + "UseCaseId": "mcp-use-case-1", + "UseCaseName": "MCP Server 1", + } + ] + }, + }, + { + "UseCaseId": "agent-2", + "UseCaseName": "Agent 2", + "AgentBuilderParams": {"Tools": [{"ToolId": "calculator"}]}, + }, + ] + } + } + } + + result = get_mcp_servers("test-table", "test-key", Mock()) + + assert len(result) == 1 + assert result[0].type == "gateway" + assert result[0].use_case_id == "mcp-use-case-1" + + @patch("operations.agentcore_outbound_permissions.get_usecase_config") + @patch("operations.agentcore_outbound_permissions.get_invocation_account_id") + def test_get_mcp_servers_empty_workflow(self, mock_get_account_id, mock_get_config): + """Test workflow with no MCP servers""" + mock_get_account_id.return_value = "123456789012" + mock_get_config.return_value = {"WorkflowParams": {"AgentsAsToolsParams": {"Agents": []}}} + + result = get_mcp_servers("test-table", "test-key", Mock()) + + assert len(result) == 0 + + @patch("operations.agentcore_outbound_permissions.send_response") + @patch("operations.agentcore_outbound_permissions.create") + def test_execute_create(self, mock_create, mock_send_response, lambda_event): + lambda_event["RequestType"] = "Create" + mock_create.return_value = ["test-id"] + + execute(lambda_event, Mock()) + + mock_send_response.assert_called_once() + args = mock_send_response.call_args[0] + assert args[2] == "SUCCESS" + assert args[3] == {"Added": ["test-id"], "Removed": []} + + @patch("operations.agentcore_outbound_permissions.send_response") + @patch("operations.agentcore_outbound_permissions.update") + def test_execute_update(self, mock_update, mock_send_response, lambda_event): + lambda_event["RequestType"] = "Update" + mock_update.return_value = (["added-id"], ["removed-id"]) + + execute(lambda_event, Mock()) + + mock_send_response.assert_called_once() + args = mock_send_response.call_args[0] + assert args[2] == "SUCCESS" + assert args[3] == {"Added": ["added-id"], "Removed": ["removed-id"]} + + @patch("operations.agentcore_outbound_permissions.send_response") + @patch("operations.agentcore_outbound_permissions.delete") + def test_execute_delete(self, mock_delete, mock_send_response, lambda_event): + lambda_event["RequestType"] = "Delete" + mock_delete.return_value = ["removed-id"] + + execute(lambda_event, Mock()) + + mock_send_response.assert_called_once() + args = mock_send_response.call_args[0] + assert args[2] == "SUCCESS" + assert args[3] == {"Added": [], "Removed": ["removed-id"]} diff --git a/source/lambda/custom-resource/test/operations/test_deploy_agent_core.py b/source/lambda/custom-resource/test/operations/test_deploy_agent_core.py new file mode 100644 index 00000000..0d374828 --- /dev/null +++ b/source/lambda/custom-resource/test/operations/test_deploy_agent_core.py @@ -0,0 +1,1317 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from unittest.mock import Mock, patch + +import pytest +from botocore.exceptions import ClientError +from operations.deploy_agent_core import ( + _build_runtime_environment_variables, + _build_runtime_request, + _build_update_request, + _delete_runtime_resource, + _ensure_ecr_image_exists, + _extract_resource_properties, + _find_runtime_for_deletion, + _find_runtime_id_by_name, + _get_runtime_description, + _log_configuration_changes, + _validate_runtime_response, + create_agent_runtime, + delete_agent_runtime, + execute, + update_agent_runtime, +) +from utils.agent_core_utils import format_error_message, handle_client_error, validate_event_properties + + +class TestDeployAgentCore: + """Test cases for deploy_agent_core operation.""" + + def setup_method(self): + """Set up test fixtures.""" + self.mock_context = Mock() + self.mock_context.log_stream_name = "test-log-stream" + + self.base_event = { + "RequestType": "Create", + "ResourceProperties": { + "AgentRuntimeName": "test-runtime", + "AgentImageUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/test-repo:latest", + "ExecutionRoleArn": "arn:aws:iam::123456789012:role/test-role", + "UseCaseConfigRecordKey": "12345678-1234-1234-1234-123456789012", + "UseCaseUUID": "53e345af-deb5-45a6-8e26-c96854eb4a4d", + "UseCaseConfigTableName": "test-config-table", + "MemoryId": "test-memory-id-123", + }, + "ResponseURL": "https://test-response-url.com", + "StackId": "test-stack-id", + "RequestId": "test-request-id", + "LogicalResourceId": "test-logical-id", + } + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_create_with_memory_id(self, mock_send_response, mock_get_service_client): + """Test successful CREATE operation with provided memory ID.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + # Memory creation should not be called since memory ID is provided + mock_client.create_agent_runtime.return_value = { + "agentRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/test-runtime-id", + "agentRuntimeId": "test-runtime-id", + } + + mock_client.create_agent_runtime.return_value = { + "agentRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:agent-runtime/test-runtime", + "agentRuntimeId": "runtime-id-123", + } + + # Set AWS_REGION environment variable for the test + with patch.dict("os.environ", {"AWS_REGION": "us-east-1"}): + execute(self.base_event, self.mock_context) + + mock_get_service_client.assert_called_with("bedrock-agentcore-control") + mock_send_response.assert_called_once() + + call_args = mock_send_response.call_args + assert call_args[0][2] == "SUCCESS" # status parameter + + # Verify that create_agent_runtime was called with authorizer configuration + create_runtime_calls = mock_client.create_agent_runtime.call_args_list + assert len(create_runtime_calls) == 1 + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_create_with_custom_allowed_clients(self, mock_send_response, mock_get_service_client): + """Test CREATE operation with custom allowed clients.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + mock_client.create_agent_runtime.return_value = { + "agentRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:agent-runtime/test-runtime", + "agentRuntimeId": "runtime-id-123", + } + + # Add custom allowed clients to the event + self.base_event["ResourceProperties"]["AllowedClients"] = ["client-1", "client-2"] + + execute(self.base_event, self.mock_context) + + # Verify that create_agent_runtime was called with custom allowed clients + create_runtime_calls = mock_client.create_agent_runtime.call_args_list + assert len(create_runtime_calls) == 1 + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_update_with_cognito_authorizer(self, mock_send_response, mock_get_service_client): + """Test successful UPDATE operation with Cognito authorizer configuration.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + self.base_event["RequestType"] = "Update" + + mock_client.list_agent_runtimes.return_value = { + "agentRuntimes": [{"agentRuntimeName": "test-runtime", "agentRuntimeId": "runtime-id-123"}] + } + + mock_client.get_agent_runtime.return_value = { + "agentRuntimeArtifact": {"containerConfiguration": {"containerUri": "old-image-uri"}}, + "roleArn": "old-role-arn", + "networkConfiguration": {"networkMode": "PUBLIC"}, + "environmentVariables": { + "AGENT_CONFIG_TABLE": "test-config-table", + "AGENT_CONFIG_KEY": "12345678-1234-1234-1234-123456789012", + }, + } + + mock_client.update_agent_runtime.return_value = { + "agentRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:agent-runtime/test-runtime", + "agentRuntimeId": "runtime-id-123", + } + + # Set AWS_REGION environment variable for the test + with patch.dict("os.environ", {"AWS_REGION": "us-east-1"}): + execute(self.base_event, self.mock_context) + + mock_get_service_client.assert_called_with("bedrock-agentcore-control") + mock_send_response.assert_called_once() + + call_args = mock_send_response.call_args + assert call_args[0][2] == "SUCCESS" + + # Verify that update_agent_runtime was called with authorizer configuration + update_runtime_calls = mock_client.update_agent_runtime.call_args_list + assert len(update_runtime_calls) == 1 + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_create_success(self, mock_send_response, mock_get_service_client): + """Test successful CREATE operation with bedrock-agentcore client initialization.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + mock_client.create_agent_runtime.return_value = { + "agentRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:agent-runtime/test-runtime", + "agentRuntimeId": "runtime-id-123", + } + + execute(self.base_event, self.mock_context) + + mock_get_service_client.assert_called_with("bedrock-agentcore-control") + mock_send_response.assert_called_once() + + call_args = mock_send_response.call_args + assert call_args[0][2] == "SUCCESS" # status parameter + + response_data = call_args[0][3] + assert "AgentRuntimeArn" in response_data + assert "AgentRuntimeName" in response_data + assert response_data["AgentRuntimeName"] == "test-runtime" + assert ( + response_data["AgentRuntimeArn"] + == "arn:aws:bedrock-agentcore:us-east-1:123456789012:agent-runtime/test-runtime" + ) + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_create_with_m2m_identity_name(self, mock_send_response, mock_get_service_client): + """Test that M2M_IDENTITY_NAME environment variable is correctly set.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + mock_client.create_agent_runtime.return_value = { + "agentRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:agent-runtime/test-runtime", + "agentRuntimeId": "runtime-id-123", + } + + with patch.dict("os.environ", {"AWS_REGION": "us-east-1"}): + execute(self.base_event, self.mock_context) + + # Verify create_agent_runtime was called + create_runtime_calls = mock_client.create_agent_runtime.call_args_list + assert len(create_runtime_calls) == 1 + + runtime_request = create_runtime_calls[0][1] # Get keyword arguments + env_vars = runtime_request.get("environmentVariables", {}) + + # Verify M2M_IDENTITY_NAME is set correctly + # UseCaseUUID is "53e345af-deb5-45a6-8e26-c96854eb4a4d" + # First segment after split by '-' is "53e345af" + assert "M2M_IDENTITY_NAME" in env_vars + assert env_vars["M2M_IDENTITY_NAME"] == "gaab-oauth-provider-53e345af" + + call_args = mock_send_response.call_args + assert call_args[0][2] == "SUCCESS" # status parameter + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_update_success(self, mock_send_response, mock_get_service_client): + """Test successful UPDATE operation.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + self.base_event["RequestType"] = "Update" + + mock_client.list_agent_runtimes.return_value = { + "agentRuntimes": [{"agentRuntimeName": "test-runtime", "agentRuntimeId": "runtime-id-123"}] + } + + mock_client.get_agent_runtime.return_value = { + "agentRuntimeArtifact": {"containerConfiguration": {"containerUri": "old-image-uri"}}, + "roleArn": "old-role-arn", + "networkConfiguration": {"networkMode": "PUBLIC"}, + "environmentVariables": { + "AGENT_CONFIG_TABLE": "test-config-table", + "AGENT_CONFIG_KEY": "12345678-1234-1234-1234-123456789012", + }, + } + + mock_client.update_agent_runtime.return_value = { + "agentRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:agent-runtime/test-runtime" + } + + execute(self.base_event, self.mock_context) + + mock_get_service_client.assert_called_with("bedrock-agentcore-control") + mock_send_response.assert_called_once() + + call_args = mock_send_response.call_args + assert call_args[0][2] == "SUCCESS" + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_delete_success(self, mock_send_response, mock_get_service_client): + """Test successful DELETE operation.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + self.base_event["RequestType"] = "Delete" + + # Mock the delete operation to return successfully without any runtime found + mock_client.list_agent_runtimes.return_value = {"agentRuntimes": []} + + execute(self.base_event, self.mock_context) + + mock_get_service_client.assert_called_with("bedrock-agentcore-control") + mock_send_response.assert_called_once() + + call_args = mock_send_response.call_args + assert call_args[0][2] == "SUCCESS" + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_with_memory_enabled(self, mock_send_response, mock_get_service_client): + """Test CREATE operation with long-term memory enabled.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + self.base_event["ResourceProperties"]["EnableLongTermMemory"] = "Yes" + self.base_event["ResourceProperties"]["MemoryStrategy"] = "user_preferences" + + mock_client.create_agent_runtime.return_value = { + "agentRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:agent-runtime/test-runtime", + "agentRuntimeId": "runtime-id-123", + } + + execute(self.base_event, self.mock_context) + + mock_get_service_client.assert_called_with("bedrock-agentcore-control") + mock_send_response.assert_called_once() + + call_args = mock_send_response.call_args + assert call_args[0][2] == "SUCCESS" + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_client_error_handling(self, mock_send_response, mock_get_service_client): + """Test handling of AWS ClientError.""" + from botocore.exceptions import ClientError + + error_response = {"Error": {"Code": "ValidationException", "Message": "Invalid parameter value"}} + mock_get_service_client.side_effect = ClientError(error_response, "CreateAgentRuntime") + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" + + reason = call_args[1]["reason"] + assert "ValidationException" in reason + assert "Invalid parameter value" in reason + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_unexpected_error_handling(self, mock_send_response, mock_get_service_client): + """Test handling of unexpected errors.""" + mock_get_service_client.side_effect = Exception("Unexpected error occurred") + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" + + reason = call_args[1]["reason"] + assert "Unexpected Error" in reason + assert "Unexpected error occurred" in reason + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_missing_required_parameter(self, mock_send_response, mock_get_service_client): + """Test handling when required parameters are missing.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + del self.base_event["ResourceProperties"]["AgentRuntimeName"] + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_missing_resource_properties(self, mock_send_response, mock_get_service_client): + """Test execute function when ResourceProperties is missing from event.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + event_without_props = { + "RequestType": "Create", + "ResponseURL": "https://test-response-url.com", + "StackId": "test-stack-id", + "RequestId": "test-request-id", + "LogicalResourceId": "test-logical-id", + } + + execute(event_without_props, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status + + reason = call_args[1]["reason"] + assert "Missing ResourceProperties in CloudFormation event" in reason + + physical_resource_id = call_args[0][4] + assert physical_resource_id == "agent-runtime-unknown" + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_missing_request_type(self, mock_send_response, mock_get_service_client): + """Test execute function when RequestType is missing from event.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + event_without_type = { + "ResourceProperties": self.base_event["ResourceProperties"], + "ResponseURL": "https://test-response-url.com", + "StackId": "test-stack-id", + "RequestId": "test-request-id", + "LogicalResourceId": "test-logical-id", + } + + execute(event_without_type, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status + + reason = call_args[1]["reason"] + assert "Missing RequestType in CloudFormation event" in reason + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_unknown_request_type(self, mock_send_response, mock_get_service_client): + """Test execute function with unknown RequestType.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + self.base_event["RequestType"] = "Unknown" + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status + + reason = call_args[1]["reason"] + assert "Unknown CloudFormation request type: Unknown" in reason + assert "Expected Create, Update, or Delete" in reason + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_missing_multiple_required_parameters(self, mock_send_response, mock_get_service_client): + """Test execute function when multiple required parameters are missing.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + del self.base_event["ResourceProperties"]["AgentRuntimeName"] + del self.base_event["ResourceProperties"]["AgentImageUri"] + del self.base_event["ResourceProperties"]["ExecutionRoleArn"] + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status + + reason = call_args[1]["reason"] + assert "Missing required parameter" in reason + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_physical_resource_id_generation(self, mock_send_response, mock_get_service_client): + """Test that physical resource ID is correctly generated and used.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + mock_client.create_agent_runtime.return_value = { + "agentRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:agent-runtime/test-runtime", + "agentRuntimeId": "runtime-id-123", + } + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + + physical_resource_id = call_args[0][4] + assert physical_resource_id == "runtime-id-123" + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_response_data_structure(self, mock_send_response, mock_get_service_client): + """Test that response data structure is correct for successful operations.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + expected_arn = "arn:aws:bedrock-agentcore:us-east-1:123456789012:agent-runtime/test-runtime" + + mock_client.create_agent_runtime.return_value = { + "agentRuntimeArn": expected_arn, + "agentRuntimeId": "runtime-id-123", + } + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + + assert call_args[0][2] == "SUCCESS" # status + response_data = call_args[0][3] + + assert isinstance(response_data, dict) + assert "AgentRuntimeArn" in response_data + assert "AgentRuntimeName" in response_data + assert "AgentRuntimeId" in response_data + assert "AgentMemoryId" in response_data + assert response_data["AgentRuntimeArn"] == expected_arn + assert response_data["AgentRuntimeName"] == "test-runtime" + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_delete_response_data_structure(self, mock_send_response, mock_get_service_client): + """Test response data structure for DELETE operations.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + self.base_event["RequestType"] = "Delete" + + # Mock successful delete with no runtime found + mock_client.list_agent_runtimes.return_value = {"agentRuntimes": []} + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + + assert call_args[0][2] == "SUCCESS" # status + response_data = call_args[0][3] + + assert response_data["AgentRuntimeArn"] == "" + assert response_data["AgentRuntimeName"] == "test-runtime" + assert "AgentRuntimeId" in response_data + assert "AgentMemoryId" in response_data + + @patch("utils.agent_core_utils.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_no_credentials_error(self, mock_send_response, mock_get_service_client): + """Test execute function when AWS credentials are not available.""" + from botocore.exceptions import NoCredentialsError + + mock_get_service_client.side_effect = NoCredentialsError() + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status + + reason = call_args[1]["reason"] + assert "AWS credentials not found" in reason + assert "Lambda execution role has proper permissions" in reason + + @patch("utils.agent_core_utils.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_endpoint_connection_error(self, mock_send_response, mock_get_service_client): + """Test execute function when cannot connect to bedrock-agentcore service.""" + from botocore.exceptions import EndpointConnectionError + + mock_get_service_client.side_effect = EndpointConnectionError( + endpoint_url="https://bedrock-agentcore.us-east-1.amazonaws.com" + ) + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status + + reason = call_args[1]["reason"] + assert "Cannot connect to bedrock-agentcore service" in reason + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_client_error_with_context(self, mock_send_response, mock_get_service_client): + """Test execute function ClientError handling includes operation context.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + error_response = { + "Error": {"Code": "ValidationException", "Message": "Invalid runtime configuration"}, + "ResponseMetadata": {"RequestId": "test-request-id-123"}, + } + mock_client.create_agent_runtime.side_effect = ClientError(error_response, "CreateAgentRuntime") + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status + + reason = call_args[1]["reason"] + assert "ValidationException" in reason + assert "Invalid runtime configuration" in reason + assert "deploy AgentCore runtime" in reason + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_error_propagation_from_runtime_update(self, mock_send_response, mock_get_service_client): + """Test that errors from runtime update are properly propagated.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + self.base_event["RequestType"] = "Update" + + mock_client.list_agent_runtimes.return_value = { + "agentRuntimes": [{"agentRuntimeName": "test-runtime", "agentRuntimeId": "runtime-id-123"}] + } + + mock_client.get_agent_runtime.return_value = { + "agentRuntimeArtifact": {"containerConfiguration": {"containerUri": "old-image"}}, + "roleArn": "old-role", + "networkConfiguration": {"networkMode": "PUBLIC"}, + "environmentVariables": {"AGENT_CONFIG_TABLE": "test-table", "AGENT_CONFIG_KEY": "test-uuid"}, + } + + error_response = {"Error": {"Code": "AccessDeniedException", "Message": "Insufficient permissions"}} + mock_client.update_agent_runtime.side_effect = ClientError(error_response, "UpdateAgentRuntime") + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status + + reason = call_args[1]["reason"] + assert "AccessDeniedException" in reason + assert "Insufficient permissions" in reason + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_error_propagation_from_runtime_deletion(self, mock_send_response, mock_get_service_client): + """Test that errors from runtime deletion are properly propagated.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + self.base_event["RequestType"] = "Delete" + + mock_client.list_agent_runtimes.return_value = { + "agentRuntimes": [{"agentRuntimeName": "test-runtime", "agentRuntimeId": "runtime-id-123"}] + } + + mock_client.get_agent_runtime.return_value = {"environmentVariables": {"AGENT_CONFIG_TABLE": "test-table"}} + + error_response = {"Error": {"Code": "ConflictException", "Message": "Runtime is in use"}} + mock_client.delete_agent_runtime.side_effect = ClientError(error_response, "DeleteAgentRuntime") + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status + + reason = call_args[1]["reason"] + assert "ConflictException" in reason + assert "Runtime is in use" in reason + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_logging_and_error_context(self, mock_send_response, mock_get_service_client): + """Test that proper logging and error context is maintained throughout execution.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + error_response = {"Error": {"Code": "ServiceException", "Message": "Internal service error"}} + mock_client.create_agent_runtime.side_effect = ClientError(error_response, "CreateAgentRuntime") + + with patch("utils.agent_core_utils.logger") as mock_logger: + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status + + mock_logger.info.assert_called() + mock_logger.error.assert_called() + + reason = call_args[1]["reason"] + assert "ServiceException" in reason + assert "Internal service error" in reason + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_parameter_validation_edge_cases(self, mock_send_response, mock_get_service_client): + """Test parameter validation edge cases in execute function.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + self.base_event["ResourceProperties"]["AgentRuntimeName"] = "" + self.base_event["ResourceProperties"]["AgentImageUri"] = "" + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status - should fail due to invalid ECR URI + + reason = call_args[1]["reason"] + assert "Invalid ECR URI format" in reason + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_update_with_memory_strategy_parameter(self, mock_send_response, mock_get_service_client): + """Test UPDATE operation includes MemoryStrategy parameter handling.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + self.base_event["RequestType"] = "Update" + self.base_event["ResourceProperties"]["MemoryStrategy"] = "conversation_summary" + + mock_client.list_agent_runtimes.return_value = { + "agentRuntimes": [{"agentRuntimeName": "test-runtime", "agentRuntimeId": "runtime-id-123"}] + } + + mock_client.get_agent_runtime.return_value = { + "agentRuntimeArtifact": {"containerConfiguration": {"containerUri": "old-image"}}, + "roleArn": "old-role", + "networkConfiguration": {"networkMode": "PUBLIC"}, + "environmentVariables": {"AGENT_CONFIG_TABLE": "test-table", "AGENT_CONFIG_KEY": "test-uuid"}, + } + + mock_client.update_agent_runtime.return_value = { + "agentRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:agent-runtime/test-runtime" + } + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "SUCCESS" # status + + mock_client.update_agent_runtime.assert_called_once() + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_validation_exception_error(self, mock_send_response, mock_get_service_client): + """Test execute function with ValidationException error.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + error = ClientError( + {"Error": {"Code": "ValidationException", "Message": "Invalid parameter"}}, "CreateAgentRuntime" + ) + mock_client.create_agent_runtime.side_effect = error + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status + + reason = call_args[1]["reason"] + assert "ValidationException" in reason + assert "Invalid parameter" in reason + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_resource_not_found_exception_error(self, mock_send_response, mock_get_service_client): + """Test execute function with ResourceNotFoundException error.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + error = ClientError( + {"Error": {"Code": "ResourceNotFoundException", "Message": "Resource not found"}}, "CreateAgentRuntime" + ) + mock_client.create_agent_runtime.side_effect = error + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status + + reason = call_args[1]["reason"] + assert "ResourceNotFoundException" in reason + assert "Resource not found" in reason + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_access_denied_exception_error(self, mock_send_response, mock_get_service_client): + """Test execute function with AccessDeniedException error.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + error = ClientError( + {"Error": {"Code": "AccessDeniedException", "Message": "Access denied"}}, "CreateAgentRuntime" + ) + mock_client.create_agent_runtime.side_effect = error + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status + + reason = call_args[1]["reason"] + assert "AccessDeniedException" in reason + assert "Access denied" in reason + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_conflict_exception_error(self, mock_send_response, mock_get_service_client): + """Test execute function with ConflictException error.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + error = ClientError( + {"Error": {"Code": "ConflictException", "Message": "Resource conflict"}}, "CreateAgentRuntime" + ) + mock_client.create_agent_runtime.side_effect = error + + execute(self.base_event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" # status + + reason = call_args[1]["reason"] + assert "ConflictException" in reason + assert "Resource conflict" in reason + + +class TestMemoryConfiguration: + """Test cases for memory configuration functionality.""" + + def setup_method(self): + """Set up test fixtures for memory configuration tests.""" + self.runtime_name = "test-runtime" + self.memory_config_name = f"{self.runtime_name}_memory_config" + + +class TestMemoryCleanupOnDeletion: + """Test cases for memory configuration cleanup during runtime deletion.""" + + def setup_method(self): + """Set up test fixtures for memory cleanup tests.""" + self.runtime_name = "test-runtime" + self.memory_id = "test-memory-id-cleanup" + + @patch("operations.deploy_agent_core.get_service_client") + def test_delete_agent_runtime(self, mock_get_service_client): + """Test delete_agent_runtime.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + mock_client.list_agent_runtimes.return_value = { + "agentRuntimes": [{"agentRuntimeName": self.runtime_name, "agentRuntimeId": "runtime-id-123"}] + } + + mock_client.delete_agent_runtime.return_value = {} + + delete_agent_runtime(self.runtime_name) + + mock_client.list_agent_runtimes.assert_called_once() + + mock_client.delete_agent_runtime.assert_called_once_with(agentRuntimeId="runtime-id-123") + + +class TestHelperFunctions: + """Test cases for helper functions.""" + + def test_format_error_message_basic(self): + """Test format_error_message with basic parameters.""" + result = format_error_message("create_runtime", "ValidationException", "Invalid parameter") + expected = "Failed to create_runtime: ValidationException - Invalid parameter" + assert result == expected + + def test_format_error_message_with_context(self): + """Test format_error_message with context information.""" + context = {"runtime_name": "test-runtime", "image_uri": "test-image"} + result = format_error_message("create_runtime", "ValidationException", "Invalid parameter", context) + expected = "Failed to create_runtime: ValidationException - Invalid parameter (Context: runtime_name=test-runtime, image_uri=test-image)" + assert result == expected + + def test_format_error_message_with_none_context_values(self): + """Test format_error_message filters out None context values.""" + context = {"runtime_name": "test-runtime", "image_uri": None, "role_arn": "test-role"} + result = format_error_message("create_runtime", "ValidationException", "Invalid parameter", context) + expected = "Failed to create_runtime: ValidationException - Invalid parameter (Context: runtime_name=test-runtime, role_arn=test-role)" + assert result == expected + + def test_validate_event_properties_success(self): + """Test validate_event_properties with valid event.""" + event = {"ResourceProperties": {"test": "value"}, "RequestType": "Create"} + validate_event_properties(event) # Should not raise + + def test_validate_event_properties_missing_resource_properties(self): + """Test validate_event_properties with missing ResourceProperties.""" + event = {"RequestType": "Create"} + with pytest.raises(ValueError, match="Missing ResourceProperties in CloudFormation event"): + validate_event_properties(event) + + def test_validate_event_properties_missing_request_type(self): + """Test validate_event_properties with missing RequestType.""" + event = {"ResourceProperties": {"test": "value"}} + with pytest.raises(ValueError, match="Missing RequestType in CloudFormation event"): + validate_event_properties(event) + + def test_extract_resource_properties_success(self): + """Test _extract_resource_properties with all required fields.""" + resource_properties = { + "AgentRuntimeName": "test-runtime", + "AgentImageUri": "test-image", + "ExecutionRoleArn": "test-role", + "UseCaseConfigRecordKey": "test-key", + "UseCaseConfigTableName": "test-table", + "UseCaseUUID": "test-uuid", + "MemoryId": "memid", + } + + result = _extract_resource_properties(resource_properties) + + assert result["agent_runtime_name"] == "test-runtime" + assert result["agent_image_uri"] == "test-image" + assert result["execution_role_arn"] == "test-role" + assert result["use_case_config_key"] == "test-key" + assert result["use_case_config_table_name"] == "test-table" + assert result["use_case_uuid"] == "test-uuid" + assert result["memory_id"] == "memid" + + def test_extract_resource_properties_missing_field(self): + """Test _extract_resource_properties with missing required field.""" + resource_properties = { + "AgentRuntimeName": "test-runtime", + # Missing AgentImageUri + "ExecutionRoleArn": "test-role", + "UseCaseConfigRecordKey": "test-key", + "UseCaseConfigTableName": "test-table", + "UseCaseUUID": "test-uuid", + } + + with pytest.raises(ValueError, match="Missing required parameter: AgentImageUri"): + _extract_resource_properties(resource_properties) + + @patch("utils.agent_core_utils.logger") + def test_handle_client_error_basic(self, mock_logger): + """Test handle_client_error with basic ClientError.""" + error_response = { + "Error": {"Code": "ValidationException", "Message": "Invalid parameter"}, + "ResponseMetadata": {"RequestId": "test-request-123"}, + } + client_error = ClientError(error_response, "CreateAgentRuntime") + + with pytest.raises(ClientError): + handle_client_error(client_error, "create_runtime") + + mock_logger.error.assert_called_once() + + @patch("utils.agent_core_utils.logger") + def test_handle_client_error_with_context(self, mock_logger): + """Test handle_client_error with context information.""" + error_response = { + "Error": {"Code": "ValidationException", "Message": "Invalid parameter"}, + "ResponseMetadata": {"RequestId": "test-request-123"}, + } + client_error = ClientError(error_response, "CreateAgentRuntime") + context = {"runtime_name": "test-runtime"} + + with pytest.raises(ClientError): + handle_client_error(client_error, "create_runtime", context) + + mock_logger.error.assert_called_once() + call_args = mock_logger.error.call_args + assert "context" in call_args[1]["extra"] + assert call_args[1]["extra"]["context"] == context + + +class TestDeployAgentCoreMultimodality: + """Test cases for multimodality functionality in deploy_agent_core operation.""" + + def setup_method(self): + """Set up test fixtures.""" + self.mock_context = Mock() + self.mock_context.log_stream_name = "test-log-stream" + + def test_extract_resource_properties_with_multimodal_data(self): + """Test _extract_resource_properties includes multimodal data parameters when both are provided.""" + resource_properties = { + "AgentRuntimeName": "test-runtime", + "AgentImageUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/test-repo:latest", + "ExecutionRoleArn": "arn:aws:iam::123456789012:role/test-role", + "UseCaseConfigRecordKey": "12345678-1234-1234-1234-123456789012", + "UseCaseConfigTableName": "test-config-table", + "UseCaseUUID": "53e345af-deb5-45a6-8e26-c96854eb4a4d", + "MemoryId": "test-memory-id-123", + "MultimodalDataMetadataTable": "test-multimodal-table", + "MultimodalDataBucket": "test-multimodal-bucket", + } + + result = _extract_resource_properties(resource_properties) + + assert result["multimodal_data_metadata_table"] == "test-multimodal-table" + assert result["multimodal_data_bucket"] == "test-multimodal-bucket" + + def test_extract_resource_properties_default_multimodal_values(self): + """Test _extract_resource_properties uses default empty values when multimodal parameters are not provided.""" + resource_properties = { + "AgentRuntimeName": "test-runtime", + "AgentImageUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/test-repo:latest", + "ExecutionRoleArn": "arn:aws:iam::123456789012:role/test-role", + "UseCaseConfigRecordKey": "12345678-1234-1234-1234-123456789012", + "UseCaseConfigTableName": "test-config-table", + "UseCaseUUID": "53e345af-deb5-45a6-8e26-c96854eb4a4d", + } + + result = _extract_resource_properties(resource_properties) + + assert result["multimodal_data_metadata_table"] == "" + assert result["multimodal_data_bucket"] == "" + + def test_extract_resource_properties_partial_multimodal_data_table_only(self): + """Test _extract_resource_properties when only table is provided (should fail validation).""" + resource_properties = { + "AgentRuntimeName": "test-runtime", + "AgentImageUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/test-repo:latest", + "ExecutionRoleArn": "arn:aws:iam::123456789012:role/test-role", + "UseCaseConfigRecordKey": "12345678-1234-1234-1234-123456789012", + "UseCaseConfigTableName": "test-config-table", + "UseCaseUUID": "53e345af-deb5-45a6-8e26-c96854eb4a4d", + "MultimodalDataMetadataTable": "test-multimodal-table", + } + + with pytest.raises(ValueError) as exc_info: + _extract_resource_properties(resource_properties) + + assert "Both MultimodalDataBucket and MultimodalDataMetadataTable must be provided together" in str( + exc_info.value + ) + + def test_extract_resource_properties_partial_multimodal_data_bucket_only(self): + """Test _extract_resource_properties when only bucket is provided (should fail validation).""" + resource_properties = { + "AgentRuntimeName": "test-runtime", + "AgentImageUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/test-repo:latest", + "ExecutionRoleArn": "arn:aws:iam::123456789012:role/test-role", + "UseCaseConfigRecordKey": "12345678-1234-1234-1234-123456789012", + "UseCaseConfigTableName": "test-config-table", + "UseCaseUUID": "53e345af-deb5-45a6-8e26-c96854eb4a4d", + "MultimodalDataBucket": "test-multimodal-bucket", + } + + with pytest.raises(ValueError) as exc_info: + _extract_resource_properties(resource_properties) + + assert "Both MultimodalDataBucket and MultimodalDataMetadataTable must be provided together" in str( + exc_info.value + ) + + @patch.dict("os.environ", {"AWS_REGION": "us-west-2"}) + def test_build_runtime_environment_variables_with_multimodal_data(self): + """Test _build_runtime_environment_variables includes multimodal env vars when both table and bucket are provided.""" + env_vars = _build_runtime_environment_variables( + config_table_name="test-config-table", + use_case_config_key="12345678-1234-1234-1234-123456789012", + use_case_uuid="53e345af-deb5-45a6-8e26-c96854eb4a4d", + memory_id="test-memory-id-123", + multimodal_data_metadata_table="test-multimodal-table", + multimodal_data_bucket="test-multimodal-bucket", + ) + + # Verify core environment variables + assert env_vars["USE_CASE_TABLE_NAME"] == "test-config-table" + assert env_vars["USE_CASE_CONFIG_KEY"] == "12345678-1234-1234-1234-123456789012" + assert env_vars["USE_CASE_UUID"] == "53e345af-deb5-45a6-8e26-c96854eb4a4d" + assert env_vars["AWS_REGION"] == "us-west-2" + assert env_vars["M2M_IDENTITY_NAME"] == "gaab-oauth-provider-53e345af" + assert env_vars["MEMORY_ID"] == "test-memory-id-123" + + # Verify multimodal environment variables are included + assert env_vars["MULTIMODAL_METADATA_TABLE_NAME"] == "test-multimodal-table" + assert env_vars["MULTIMODAL_DATA_BUCKET"] == "test-multimodal-bucket" + + @patch.dict("os.environ", {"AWS_REGION": "us-east-1"}) + def test_build_runtime_environment_variables_without_multimodal_data(self): + """Test _build_runtime_environment_variables excludes multimodal env vars when not provided.""" + env_vars = _build_runtime_environment_variables( + config_table_name="test-config-table", + use_case_config_key="12345678-1234-1234-1234-123456789012", + use_case_uuid="53e345af-deb5-45a6-8e26-c96854eb4a4d", + ) + + # Verify core environment variables + assert env_vars["USE_CASE_TABLE_NAME"] == "test-config-table" + assert env_vars["USE_CASE_CONFIG_KEY"] == "12345678-1234-1234-1234-123456789012" + assert env_vars["USE_CASE_UUID"] == "53e345af-deb5-45a6-8e26-c96854eb4a4d" + assert env_vars["AWS_REGION"] == "us-east-1" + assert env_vars["M2M_IDENTITY_NAME"] == "gaab-oauth-provider-53e345af" + + # Verify multimodal environment variables are NOT included + assert "MULTIMODAL_METADATA_TABLE_NAME" not in env_vars + assert "MULTIMODAL_DATA_BUCKET" not in env_vars + assert "MEMORY_ID" not in env_vars + + @patch.dict("os.environ", {"AWS_REGION": "us-east-1"}) + def test_build_runtime_environment_variables_empty_multimodal_values(self): + """Test _build_runtime_environment_variables with empty string multimodal values (should not be included).""" + env_vars = _build_runtime_environment_variables( + config_table_name="test-config-table", + use_case_config_key="12345678-1234-1234-1234-123456789012", + use_case_uuid="53e345af-deb5-45a6-8e26-c96854eb4a4d", + multimodal_data_metadata_table="", + multimodal_data_bucket="", + ) + + # Verify core environment variables are included + assert env_vars["USE_CASE_UUID"] == "53e345af-deb5-45a6-8e26-c96854eb4a4d" + + # Verify empty multimodal values are not included as environment variables + assert "MULTIMODAL_METADATA_TABLE_NAME" not in env_vars + assert "MULTIMODAL_DATA_BUCKET" not in env_vars + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_create_with_multimodal_data(self, mock_send_response, mock_get_service_client): + """Test CREATE operation with multimodal data parameters.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + mock_client.create_agent_runtime.return_value = { + "agentRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:agent-runtime/test-runtime", + "agentRuntimeId": "runtime-id-123", + } + + event = { + "RequestType": "Create", + "ResourceProperties": { + "AgentRuntimeName": "test-runtime", + "AgentImageUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/test-repo:latest", + "ExecutionRoleArn": "arn:aws:iam::123456789012:role/test-role", + "UseCaseConfigRecordKey": "12345678-1234-1234-1234-123456789012", + "UseCaseUUID": "53e345af-deb5-45a6-8e26-c96854eb4a4d", + "UseCaseConfigTableName": "test-config-table", + "MemoryId": "test-memory-id-123", + "MultimodalDataMetadataTable": "test-multimodal-table", + "MultimodalDataBucket": "test-multimodal-bucket", + }, + "ResponseURL": "https://test-response-url.com", + "StackId": "test-stack-id", + "RequestId": "test-request-id", + "LogicalResourceId": "test-logical-id", + } + + with patch.dict("os.environ", {"AWS_REGION": "us-east-1"}): + execute(event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "SUCCESS" + + # Verify response data includes all expected fields + response_data = call_args[0][3] + assert "AgentRuntimeArn" in response_data + assert "AgentRuntimeName" in response_data + assert "AgentRuntimeId" in response_data + assert "AgentMemoryId" in response_data + + @patch("operations.deploy_agent_core.get_service_client") + @patch("operations.deploy_agent_core.send_response") + def test_execute_update_with_multimodal_data(self, mock_send_response, mock_get_service_client): + """Test UPDATE operation with multimodal data parameters.""" + mock_client = Mock() + mock_get_service_client.return_value = mock_client + + mock_client.list_agent_runtimes.return_value = { + "agentRuntimes": [{"agentRuntimeName": "test-runtime", "agentRuntimeId": "runtime-id-123"}] + } + + mock_client.get_agent_runtime.return_value = { + "agentRuntimeArtifact": {"containerConfiguration": {"containerUri": "old-image-uri"}}, + "roleArn": "old-role-arn", + "networkConfiguration": {"networkMode": "PUBLIC"}, + "environmentVariables": { + "USE_CASE_TABLE_NAME": "test-config-table", + "USE_CASE_CONFIG_KEY": "12345678-1234-1234-1234-123456789012", + }, + } + + mock_client.update_agent_runtime.return_value = { + "agentRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:agent-runtime/test-runtime" + } + + event = { + "RequestType": "Update", + "ResourceProperties": { + "AgentRuntimeName": "test-runtime", + "AgentImageUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/test-repo:latest", + "ExecutionRoleArn": "arn:aws:iam::123456789012:role/test-role", + "UseCaseConfigRecordKey": "12345678-1234-1234-1234-123456789012", + "UseCaseUUID": "53e345af-deb5-45a6-8e26-c96854eb4a4d", + "UseCaseConfigTableName": "test-config-table", + "MemoryId": "test-memory-id-123", + "MultimodalDataMetadataTable": "updated-multimodal-table", + "MultimodalDataBucket": "updated-multimodal-bucket", + }, + "ResponseURL": "https://test-response-url.com", + "StackId": "test-stack-id", + "RequestId": "test-request-id", + "LogicalResourceId": "test-logical-id", + } + + with patch.dict("os.environ", {"AWS_REGION": "us-east-1"}): + execute(event, self.mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "SUCCESS" + + # Verify update_agent_runtime was called with multimodal parameters + update_call_args = mock_client.update_agent_runtime.call_args + env_vars = update_call_args[1]["environmentVariables"] + assert env_vars["MULTIMODAL_METADATA_TABLE_NAME"] == "updated-multimodal-table" + assert env_vars["MULTIMODAL_DATA_BUCKET"] == "updated-multimodal-bucket" + + +class TestEnsureEcrImageExists: + """Test cases for _ensure_ecr_image_exists function.""" + + def setup_method(self): + """Set up test fixtures.""" + self.mock_context = Mock() + self.mock_context.log_stream_name = "test-log-stream" + self.mock_ecr_client = Mock() + + def test_invalid_image_uri_format(self): + """Test that invalid ECR URI format raises ValueError.""" + invalid_uri = "invalid-uri-format" + + with pytest.raises(ValueError, match="Invalid ECR URI format"): + _ensure_ecr_image_exists(invalid_uri) + + @patch('operations.deploy_agent_core.get_service_client') + def test_image_already_exists(self, mock_get_client): + """Test when ECR image already exists.""" + mock_get_client.return_value = self.mock_ecr_client + self.mock_ecr_client.describe_images.return_value = {"imageDetails": []} + + image_uri = "276036092881.dkr.ecr.us-west-2.amazonaws.com/test-repo:latest" + + _ensure_ecr_image_exists(image_uri) + + self.mock_ecr_client.describe_images.assert_called_once_with( + registryId="276036092881", + repositoryName="test-repo", + imageIds=[{'imageTag': 'latest'}] + ) + self.mock_ecr_client.batch_get_image.assert_not_called() + + @patch('operations.deploy_agent_core.retry_with_backoff') + @patch('operations.deploy_agent_core.get_service_client') + def test_image_not_exists_triggers_pull_through_cache(self, mock_get_client, mock_retry): + """Test when ECR image doesn't exist and pull-through cache is triggered.""" + mock_get_client.return_value = self.mock_ecr_client + + # First describe_images call fails (image doesn't exist) + self.mock_ecr_client.describe_images.side_effect = ClientError( + error_response={'Error': {'Code': 'ImageNotFoundException', 'Message': 'Image not found'}}, + operation_name='DescribeImages' + ) + + image_uri = "276036092881.dkr.ecr.us-west-2.amazonaws.com/test-repo:v1.0.0" + + _ensure_ecr_image_exists(image_uri) + + expected_params = { + "registryId": "276036092881", + "repositoryName": "test-repo", + "imageIds": [{'imageTag': 'v1.0.0'}] + } + + # Verify describe_images was called first + self.mock_ecr_client.describe_images.assert_called_with(**expected_params) + + # Verify batch_get_image was called to trigger pull-through cache + self.mock_ecr_client.batch_get_image.assert_called_once_with(**expected_params) + + # Verify retry_with_backoff was called to wait for image + mock_retry.assert_called_once_with(self.mock_ecr_client.describe_images, max_attempts=10, base_delay=2, **expected_params) + + @patch('operations.deploy_agent_core.retry_with_backoff') + @patch('operations.deploy_agent_core.get_service_client') + def test_repository_not_exists_triggers_pull_through_cache(self, mock_get_client, mock_retry): + """Test when ECR repository doesn't exist and pull-through cache is triggered.""" + mock_get_client.return_value = self.mock_ecr_client + + # First describe_images call fails (repository doesn't exist) + self.mock_ecr_client.describe_images.side_effect = ClientError( + error_response={'Error': {'Code': 'RepositoryNotFoundException', 'Message': 'Repository not found'}}, + operation_name='DescribeImages' + ) + + image_uri = "276036092881.dkr.ecr.us-west-2.amazonaws.com/new-repo:latest" + + _ensure_ecr_image_exists(image_uri) + + expected_params = { + "registryId": "276036092881", + "repositoryName": "new-repo", + "imageIds": [{'imageTag': 'latest'}] + } + + self.mock_ecr_client.batch_get_image.assert_called_once_with(**expected_params) + mock_retry.assert_called_once_with(self.mock_ecr_client.describe_images, max_attempts=10, base_delay=2, **expected_params) + + @patch('operations.deploy_agent_core.handle_client_error') + @patch('operations.deploy_agent_core.get_service_client') + def test_unexpected_client_error_raises_exception(self, mock_get_client, mock_handle_error): + """Test that unexpected ClientError is handled and re-raised.""" + mock_get_client.return_value = self.mock_ecr_client + + # Unexpected error + unexpected_error = ClientError( + error_response={'Error': {'Code': 'AccessDeniedException', 'Message': 'Access denied'}}, + operation_name='DescribeImages' + ) + self.mock_ecr_client.describe_images.side_effect = unexpected_error + + image_uri = "276036092881.dkr.ecr.us-west-2.amazonaws.com/test-repo:latest" + + with pytest.raises(ClientError): + _ensure_ecr_image_exists(image_uri) + + mock_handle_error.assert_called_once() + + def test_image_uri_without_tag_defaults_to_latest(self): + """Test that image URI without tag defaults to 'latest'.""" + + with patch('operations.deploy_agent_core.get_service_client') as mock_get_client: + mock_get_client.return_value = self.mock_ecr_client + self.mock_ecr_client.describe_images.return_value = {"imageDetails": []} + + image_uri = "276036092881.dkr.ecr.us-west-2.amazonaws.com/test-repo" + + _ensure_ecr_image_exists(image_uri) + + self.mock_ecr_client.describe_images.assert_called_once_with( + registryId="276036092881", + repositoryName="test-repo", + imageIds=[{'imageTag': 'latest'}] + ) + + def test_pull_through_cache_repository_path(self): + """Test that pull-through cache repository paths are handled correctly.""" + + with patch('operations.deploy_agent_core.get_service_client') as mock_get_client: + mock_get_client.return_value = self.mock_ecr_client + self.mock_ecr_client.describe_images.return_value = {"imageDetails": []} + + # Pull-through cache URI with namespace + image_uri = "276036092881.dkr.ecr.us-west-2.amazonaws.com/ecr-public/bedrock/agent-runtime:v1.0.0" + + _ensure_ecr_image_exists(image_uri) + + self.mock_ecr_client.describe_images.assert_called_once_with( + registryId="276036092881", + repositoryName="ecr-public/bedrock/agent-runtime", + imageIds=[{'imageTag': 'v1.0.0'}] + ) \ No newline at end of file diff --git a/source/lambda/custom-resource/test/operations/test_deploy_agent_core_memory.py b/source/lambda/custom-resource/test/operations/test_deploy_agent_core_memory.py new file mode 100644 index 00000000..dcd3591d --- /dev/null +++ b/source/lambda/custom-resource/test/operations/test_deploy_agent_core_memory.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from unittest.mock import Mock, patch, MagicMock +from operations import deploy_agent_core_memory +from operations.operation_types import RESOURCE_PROPERTIES + + +@pytest.fixture +def mock_event(): + return { + RESOURCE_PROPERTIES: { + "MemoryName": "test-memory", + "AgentRuntimeName": "test-uuid-123", + "EnableLongTermMemory": "Yes" + }, + "RequestType": "Create" + } + + +@pytest.fixture +def mock_context(): + return Mock() + + +@pytest.fixture +def mock_bedrock_client(): + client = Mock() + client.create_memory.return_value = { + "memory": { + "id": "test-memory-id-123", + "strategies": + [ + { + "strategyId": "test-strategy-id", + "type": "SEMANTIC" + } + ] + }, + } + client.get_waiter.return_value.wait = Mock() + client.get_memory.return_value = { + "memory": {"strategies": []} + } + client.update_memory.return_value = {} + client.delete_memory.return_value = {} + return client + + +class TestDeployAgentCoreMemory: + + @patch('operations.deploy_agent_core_memory.send_response') + @patch('operations.deploy_agent_core_memory.get_service_client') + def test_create_memory_success(self, mock_get_client, mock_send_response, mock_event, mock_context, mock_bedrock_client): + mock_get_client.return_value = mock_bedrock_client + + deploy_agent_core_memory.execute(mock_event, mock_context) + + mock_bedrock_client.create_memory.assert_called_once() + mock_send_response.assert_called_once() + + # Verify response data + call_args = mock_send_response.call_args + response_data = call_args[0][3] + physical_resource_id = call_args[0][4] + + assert response_data["MemoryId"] == "test-memory-id-123" + assert physical_resource_id == "test-memory-id-123" + + @patch('operations.deploy_agent_core_memory.send_response') + @patch('operations.deploy_agent_core_memory.get_service_client') + def test_update_memory_success(self, mock_get_client, mock_send_response, mock_context, mock_bedrock_client): + mock_get_client.return_value = mock_bedrock_client + + update_event = { + RESOURCE_PROPERTIES: { + "MemoryName": "test-memory", + "AgentRuntimeName": "test-uuid-123", + "EnableLongTermMemory": "No" + }, + "RequestType": "Update", + "PhysicalResourceId": "existing-memory-id" + } + + deploy_agent_core_memory.execute(update_event, mock_context) + + mock_bedrock_client.get_memory.assert_called_once_with(memoryId="existing-memory-id") + mock_send_response.assert_called_once() + + @patch('operations.deploy_agent_core_memory.send_response') + @patch('operations.deploy_agent_core_memory.get_service_client') + def test_delete_memory_success(self, mock_get_client, mock_send_response, mock_context, mock_bedrock_client): + mock_get_client.return_value = mock_bedrock_client + + delete_event = { + RESOURCE_PROPERTIES: { + "MemoryName": "test-memory", + "AgentRuntimeName": "test-uuid-123", + "EnableLongTermMemory": "Yes" + }, + "RequestType": "Delete", + "PhysicalResourceId": "existing-memory-id" + } + + deploy_agent_core_memory.execute(delete_event, mock_context) + + mock_bedrock_client.delete_memory.assert_called_once_with(memoryId="existing-memory-id") + mock_send_response.assert_called_once() + + @patch('operations.deploy_agent_core_memory.send_response') + def test_missing_required_parameter(self, mock_send_response, mock_context): + invalid_event = { + RESOURCE_PROPERTIES: { + "EnableLongTermMemory": "Yes" + # Missing AgentRuntimeName + }, + "RequestType": "Create" + } + + deploy_agent_core_memory.execute(invalid_event, mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" + + def test_create_memory_configuration_with_long_term_memory(self, mock_bedrock_client): + with patch('operations.deploy_agent_core_memory.get_service_client', return_value=mock_bedrock_client): + memory_id, strategy_id = deploy_agent_core_memory.create_memory_configuration("Yes", "test-memory") + + assert memory_id == "test-memory-id-123" + assert strategy_id == "test-strategy-id" + mock_bedrock_client.create_memory.assert_called_once() + + call_args = mock_bedrock_client.create_memory.call_args[1] + assert "memoryStrategies" in call_args + assert call_args["memoryStrategies"][0]["semanticMemoryStrategy"]["name"] == "test-memory_semantic" + + def test_create_memory_configuration_without_long_term_memory(self, mock_bedrock_client): + with patch('operations.deploy_agent_core_memory.get_service_client', return_value=mock_bedrock_client): + memory_id, _strategy_id = deploy_agent_core_memory.create_memory_configuration("No", "test-memory") + + assert memory_id == "test-memory-id-123" + mock_bedrock_client.create_memory.assert_called_once() + + call_args = mock_bedrock_client.create_memory.call_args[1] + assert "memoryStrategies" not in call_args + + def test_update_memory_configuration_add_semantic_strategy(self, mock_bedrock_client): + mock_bedrock_client.get_memory.return_value = { + "memory": {"strategies": []} + } + + with patch('operations.deploy_agent_core_memory.get_service_client', return_value=mock_bedrock_client): + deploy_agent_core_memory.update_memory_configuration("test-memory-id", "Yes") + + mock_bedrock_client.update_memory.assert_called_once() + call_args = mock_bedrock_client.update_memory.call_args[1] + assert "addMemoryStrategies" in call_args["memoryStrategies"] + + def test_update_memory_configuration_remove_semantic_strategy(self, mock_bedrock_client): + mock_bedrock_client.get_memory.return_value = { + "memory": {"strategies": [{"type": "SEMANTIC", "strategyId": "strategy-123"}]} + } + + with patch('operations.deploy_agent_core_memory.get_service_client', return_value=mock_bedrock_client): + deploy_agent_core_memory.update_memory_configuration("test-memory-id", "No") + + mock_bedrock_client.update_memory.assert_called_once() + call_args = mock_bedrock_client.update_memory.call_args[1] + assert "deleteMemoryStrategies" in call_args["memoryStrategies"] + + def test_delete_memory_configuration_success(self, mock_bedrock_client): + with patch('operations.deploy_agent_core_memory.get_service_client', return_value=mock_bedrock_client): + deploy_agent_core_memory.delete_memory_configuration("test-memory-id") + + mock_bedrock_client.delete_memory.assert_called_once_with(memoryId="test-memory-id") + + def test_delete_memory_configuration_not_found(self, mock_bedrock_client): + from botocore.exceptions import ClientError + + mock_bedrock_client.delete_memory.side_effect = ClientError( + {"Error": {"Code": "ResourceNotFoundException", "Message": "Not found"}}, + "delete_memory" + ) + + with patch('operations.deploy_agent_core_memory.get_service_client', return_value=mock_bedrock_client): + # Should not raise exception + deploy_agent_core_memory.delete_memory_configuration("test-memory-id") + + mock_bedrock_client.delete_memory.assert_called_once_with(memoryId="test-memory-id") diff --git a/source/lambda/custom-resource/test/operations/test_deploy_mcp_gateway.py b/source/lambda/custom-resource/test/operations/test_deploy_mcp_gateway.py new file mode 100644 index 00000000..47a1d9a9 --- /dev/null +++ b/source/lambda/custom-resource/test/operations/test_deploy_mcp_gateway.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from unittest.mock import Mock, patch + +from operations.deploy_mcp_gateway import execute, validate_required_props + + +@pytest.fixture +def test_event(): + """Fixture for test event data.""" + return { + "RequestType": "Create", + "ResourceProperties": { + "USE_CASE_CONFIG_RECORD_KEY": "test-config-key", + "USE_CASE_CONFIG_TABLE_NAME": "test-table", + "MCPAgentCoreName": "gaab-mcp-10002017", + "GATEWAY_ROLE_ARN": "arn:aws:iam::123456789012:role/MCPGatewayRole", + "S3_BUCKET_NAME": "test-bucket", + "COGNITO_USER_POOL_ID": "us-east-1_test123" + }, + } + + +@pytest.fixture +def mock_context(): + """Fixture for mock Lambda context.""" + context = Mock() + context.log_stream_name = "test-log-stream" + return context + + +# Test validate_required_props +def test_validate_required_props_success(test_event): + """Test validation passes with all required properties.""" + validate_required_props("Create", test_event["ResourceProperties"]) + # Should not raise any exception + + +def test_validate_required_props_missing_property(test_event): + """Test validation fails when required property is missing.""" + incomplete_props = test_event["ResourceProperties"].copy() + del incomplete_props["GATEWAY_ROLE_ARN"] + + with pytest.raises(ValueError, match="GATEWAY_ROLE_ARN is required"): + validate_required_props("Create", incomplete_props) + + +# Execute function tests +@patch("operations.deploy_mcp_gateway.send_response") +@patch("operations.deploy_mcp_gateway.GatewayMCP") +@patch("operations.deploy_mcp_gateway.MCPConfigManager") +def test_execute_create_success(mock_config_manager_class, mock_gateway_class, mock_send_response, test_event, mock_context): + """Test successful execution of create request.""" + # Mock config manager + mock_config_manager = Mock() + mock_config_manager_class.return_value = mock_config_manager + mock_config_manager.get_mcp_gateway_config.return_value = { + "target_params": [], + "use_case_description": "Test use case" + } + + # Mock GatewayMCP instance + mock_gateway = Mock() + mock_gateway_class.return_value = mock_gateway + mock_gateway.gateway_id = "gateway-123" + mock_gateway.to_dict.return_value = { + "GatewayId": "gateway-123", + "GatewayArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:gateway/gateway-123", + "GatewayName": "gaab-mcp-10002017", + "TargetCount": 2, + } + + execute(test_event, mock_context) + + # Verify GatewayMCP was instantiated + mock_gateway_class.assert_called_once() + + # Verify create was called + mock_gateway.create.assert_called_once() + + # Verify config was updated + mock_config_manager.update_gateway_config.assert_called_once() + + # Verify response was sent + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args[0] + assert call_args[2] == "SUCCESS" + assert call_args[4] == "gateway-123" # PhysicalResourceId + + +@patch("operations.deploy_mcp_gateway.send_response") +@patch("operations.deploy_mcp_gateway.GatewayMCP") +@patch("operations.deploy_mcp_gateway.MCPConfigManager") +def test_execute_update_success(mock_config_manager_class, mock_gateway_class, mock_send_response, test_event, mock_context): + """Test successful execution of update request.""" + update_event = {**test_event, "RequestType": "Update", "PhysicalResourceId": "gateway-123"} + + # Mock config manager + mock_config_manager = Mock() + mock_config_manager_class.return_value = mock_config_manager + mock_config_manager.get_mcp_gateway_config.return_value = { + "target_params": [], + "use_case_description": "Test use case" + } + + # Mock GatewayMCP instance + mock_gateway = Mock() + mock_gateway_class.return_value = mock_gateway + mock_gateway.gateway_id = "gateway-123" + mock_gateway.to_dict.return_value = { + "GatewayId": "gateway-123", + "Message": "Gateway updated successfully" + } + + execute(update_event, mock_context) + + # Verify update was called + mock_gateway.update.assert_called_once() + mock_send_response.assert_called_once() + + +@patch("operations.deploy_mcp_gateway.send_response") +@patch("operations.deploy_mcp_gateway.GatewayMCP") +@patch("operations.deploy_mcp_gateway.MCPConfigManager") +def test_execute_delete_success(mock_config_manager_class, mock_gateway_class, mock_send_response, test_event, mock_context): + """Test successful execution of delete request.""" + delete_event = { + "RequestType": "Delete", + "PhysicalResourceId": "gateway-123", + "ResourceProperties": test_event["ResourceProperties"], + } + + # Mock config manager + mock_config_manager = Mock() + mock_config_manager_class.return_value = mock_config_manager + + # Mock GatewayMCP instance + mock_gateway = Mock() + mock_gateway_class.return_value = mock_gateway + mock_gateway.to_dict.return_value = {"Message": "Gateway deleted successfully"} + + execute(delete_event, mock_context) + + # Verify delete was called + mock_gateway.delete.assert_called_once() + mock_send_response.assert_called_once() + + +@patch("operations.deploy_mcp_gateway.send_response") +def test_execute_unsupported_request_type(mock_send_response, test_event, mock_context): + """Test execution failure for unsupported request type.""" + invalid_event = { + "RequestType": "InvalidType", + "ResourceProperties": test_event["ResourceProperties"], + } + + execute(invalid_event, mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args[0] + assert call_args[2] == "FAILED" + + +@patch("operations.deploy_mcp_gateway.send_response") +@patch("operations.deploy_mcp_gateway.GatewayMCP") +@patch("operations.deploy_mcp_gateway.MCPConfigManager") +def test_execute_handler_exception(mock_config_manager_class, mock_gateway_class, mock_send_response, test_event, mock_context): + """Test execution when handler raises an exception.""" + # Mock config manager + mock_config_manager = Mock() + mock_config_manager_class.return_value = mock_config_manager + mock_config_manager.get_mcp_gateway_config.return_value = {} + + # Mock GatewayMCP to raise exception + mock_gateway = Mock() + mock_gateway_class.return_value = mock_gateway + mock_gateway.create.side_effect = Exception("Handler error") + + execute(test_event, mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" + # Error message is in the reason keyword argument + assert "Handler error" in call_args[1]["reason"] diff --git a/source/lambda/custom-resource/test/operations/test_deploy_mcp_runtime.py b/source/lambda/custom-resource/test/operations/test_deploy_mcp_runtime.py new file mode 100644 index 00000000..5fef0da8 --- /dev/null +++ b/source/lambda/custom-resource/test/operations/test_deploy_mcp_runtime.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from unittest.mock import Mock, patch + +from operations.deploy_mcp_runtime import execute, validate_required_props + + +@pytest.fixture +def test_event(): + """Fixture for test event data.""" + return { + "RequestType": "Create", + "ResourceProperties": { + "USE_CASE_CONFIG_RECORD_KEY": "test-config-key", + "USE_CASE_CONFIG_TABLE_NAME": "test-table", + "MCPAgentCoreName": "gaab_mcp_10002017", + "EXECUTION_ROLE_ARN": "arn:aws:iam::123456789012:role/MCPRuntimeExecutionRole", + "COGNITO_USER_POOL_ID": "us-east-1_test123" + }, + } + + +@pytest.fixture +def mock_context(): + """Fixture for mock Lambda context.""" + context = Mock() + context.log_stream_name = "test-log-stream" + return context + + +@pytest.fixture +def mock_runtime_config(): + """Fixture for mock runtime configuration.""" + return { + "ecr_uri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest", + "environment_variables": {"ENV_VAR_1": "value1", "ENV_VAR_2": "value2"}, + } + + +# Test validate_required_props +def test_validate_required_props_success(test_event): + """Test validation passes with all required properties.""" + validate_required_props("Create", test_event["ResourceProperties"]) + # Should not raise any exception + + +def test_validate_required_props_missing_property(test_event): + """Test validation fails when required property is missing.""" + incomplete_props = test_event["ResourceProperties"].copy() + del incomplete_props["EXECUTION_ROLE_ARN"] + + with pytest.raises(ValueError, match="EXECUTION_ROLE_ARN is required"): + validate_required_props("Create", incomplete_props) + + +# Execute function tests +@patch("operations.deploy_mcp_runtime.send_response") +@patch("operations.deploy_mcp_runtime.RuntimeMCP") +@patch("operations.deploy_mcp_runtime.MCPConfigManager") +def test_execute_create_success(mock_config_manager_class, mock_runtime_class, mock_send_response, test_event, mock_context, mock_runtime_config): + """Test successful execution of create request.""" + # Mock config manager + mock_config_manager = Mock() + mock_config_manager_class.return_value = mock_config_manager + mock_config_manager.get_mcp_runtime_config.return_value = mock_runtime_config + + # Mock RuntimeMCP instance + mock_runtime = Mock() + mock_runtime_class.return_value = mock_runtime + mock_runtime.runtime_id = "test-runtime-123" + mock_runtime.to_dict.return_value = { + "MCPRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/test-runtime-123", + "MCPRuntimeId": "test-runtime-123", + "MCPAgentCoreName": "gaab_mcp_10002017", + } + + execute(test_event, mock_context) + + # Verify RuntimeMCP was instantiated + mock_runtime_class.assert_called_once() + + # Verify create was called + mock_runtime.create.assert_called_once() + + # Verify config was updated + mock_config_manager.update_runtime_config.assert_called_once() + + # Verify response was sent + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args[0] + assert call_args[2] == "SUCCESS" + assert call_args[4] == "test-runtime-123" # PhysicalResourceId + + +@patch("operations.deploy_mcp_runtime.send_response") +@patch("operations.deploy_mcp_runtime.RuntimeMCP") +@patch("operations.deploy_mcp_runtime.MCPConfigManager") +def test_execute_update_success(mock_config_manager_class, mock_runtime_class, mock_send_response, test_event, mock_context, mock_runtime_config): + """Test successful execution of update request.""" + update_event = {**test_event, "RequestType": "Update", "PhysicalResourceId": "test-runtime-123"} + + # Mock config manager + mock_config_manager = Mock() + mock_config_manager_class.return_value = mock_config_manager + mock_config_manager.get_mcp_runtime_config.return_value = mock_runtime_config + + # Mock RuntimeMCP instance + mock_runtime = Mock() + mock_runtime_class.return_value = mock_runtime + mock_runtime.runtime_id = "test-runtime-123" + mock_runtime.to_dict.return_value = {"Message": "Runtime updated successfully"} + + execute(update_event, mock_context) + + # Verify update was called + mock_runtime.update.assert_called_once() + mock_send_response.assert_called_once() + + +@patch("operations.deploy_mcp_runtime.send_response") +@patch("operations.deploy_mcp_runtime.RuntimeMCP") +@patch("operations.deploy_mcp_runtime.MCPConfigManager") +def test_execute_delete_success(mock_config_manager_class, mock_runtime_class, mock_send_response, test_event, mock_context): + """Test successful execution of delete request.""" + delete_event = { + "RequestType": "Delete", + "PhysicalResourceId": "test-runtime-123", + "ResourceProperties": test_event["ResourceProperties"], + } + + # Mock config manager + mock_config_manager = Mock() + mock_config_manager_class.return_value = mock_config_manager + + # Mock RuntimeMCP instance + mock_runtime = Mock() + mock_runtime_class.return_value = mock_runtime + mock_runtime.to_dict.return_value = {"Message": "Runtime deleted successfully"} + + execute(delete_event, mock_context) + + # Verify delete was called + mock_runtime.delete.assert_called_once() + mock_send_response.assert_called_once() + + +@patch("operations.deploy_mcp_runtime.send_response") +def test_execute_unsupported_request_type(mock_send_response, test_event, mock_context): + """Test execution failure for unsupported request type.""" + invalid_event = { + "RequestType": "InvalidType", + "ResourceProperties": test_event["ResourceProperties"], + } + + execute(invalid_event, mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args[0] + assert call_args[2] == "FAILED" + + +@patch("operations.deploy_mcp_runtime.send_response") +@patch("operations.deploy_mcp_runtime.RuntimeMCP") +@patch("operations.deploy_mcp_runtime.MCPConfigManager") +def test_execute_handler_exception(mock_config_manager_class, mock_runtime_class, mock_send_response, test_event, mock_context, mock_runtime_config): + """Test execution when handler raises an exception.""" + # Mock config manager + mock_config_manager = Mock() + mock_config_manager_class.return_value = mock_config_manager + mock_config_manager.get_mcp_runtime_config.return_value = mock_runtime_config + + # Mock RuntimeMCP to raise exception + mock_runtime = Mock() + mock_runtime_class.return_value = mock_runtime + mock_runtime.create.side_effect = Exception("Handler error") + + execute(test_event, mock_context) + + mock_send_response.assert_called_once() + call_args = mock_send_response.call_args + assert call_args[0][2] == "FAILED" + # Error message is in the reason keyword argument + assert "Handler error" in call_args[1]["reason"] diff --git a/source/lambda/custom-resource/test/operations/test_gen_ecr_repo_prefix.py b/source/lambda/custom-resource/test/operations/test_gen_ecr_repo_prefix.py new file mode 100644 index 00000000..a393914d --- /dev/null +++ b/source/lambda/custom-resource/test/operations/test_gen_ecr_repo_prefix.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json + +import mock +import pytest +from lambda_func import handler +from operations.gen_ecr_repo_prefix import execute, verify_env_setup, sanitize_and_truncate_prefix +from operations.operation_types import RESOURCE, RESOURCE_PROPERTIES +from test.fixtures.gen_ecr_repo_prefix_events import lambda_event + + +class TestGenEcrRepoPrefix: + def test_sanitize_and_truncate_prefix_normal_case(self): + """Test normal stack name sanitization""" + result = sanitize_and_truncate_prefix("DeploymentPlatformStack") + assert result == "deploymentplatformstack" + assert len(result) <= 30 + + def test_sanitize_and_truncate_prefix_with_special_chars(self): + """Test stack name with special characters""" + result = sanitize_and_truncate_prefix("My-Stack@Name#123") + assert result == "my-stack-name-123" + assert len(result) <= 30 + + def test_sanitize_and_truncate_prefix_long_name(self): + """Test very long stack name truncation""" + long_name = "VeryLongDeploymentPlatformStackNameThatExceedsThirtyCharacters" + result = sanitize_and_truncate_prefix(long_name) + assert len(result) <= 30 + assert result.startswith("verylongdeploymentplatformst") + + def test_sanitize_and_truncate_prefix_empty_after_sanitization(self): + """Test edge case where name becomes empty after sanitization""" + result = sanitize_and_truncate_prefix("@#$%^&*()") + assert result == "gaab-default" + + def test_sanitize_and_truncate_prefix_leading_trailing_special_chars(self): + """Test removal of leading/trailing special characters""" + result = sanitize_and_truncate_prefix("-._MyStack-._") + assert result == "mystack" + assert not result.startswith(('-', '.', '_')) + assert not result.endswith(('-', '.', '_')) + + @pytest.mark.parametrize("requestType", ["Create"]) + def test_gen_ecr_repo_prefix_success_stack_name(self, lambda_event, mock_lambda_context, requestType): + """Test successful ECR repository prefix generation from stack name""" + lambda_event["RequestType"] = requestType + + with mock.patch("cfn_response.http") as mocked_PoolManager: + execute(lambda_event, mock_lambda_context) + mocked_PoolManager.request.assert_called_once() + + call_kwargs = mocked_PoolManager.request.call_args.kwargs + assert call_kwargs["method"] == "PUT" + assert call_kwargs["url"] == "https://fakeurl/doesnotexist" + + body = json.loads(call_kwargs["body"]) + assert body["Status"] == "SUCCESS" + assert body["Reason"] == "See the details in CloudWatch Log Stream: fake_logstream_name" + assert body["PhysicalResourceId"] == "fake_logstream_name" + assert body["StackId"] == "fakeStackId" + assert body["RequestId"] == "fakeRequestId" + assert body["LogicalResourceId"] == "fakeLogicalResourceId" + assert body["NoEcho"] == False + assert "EcrRepoPrefix" in body["Data"] + assert isinstance(body["Data"]["EcrRepoPrefix"], str) + assert len(body["Data"]["EcrRepoPrefix"]) <= 30 + + @pytest.mark.parametrize("requestType", ["Update"]) + def test_gen_ecr_repo_prefix_success_update(self, lambda_event, mock_lambda_context, requestType): + """Test successful ECR repository prefix generation on update (no-op)""" + lambda_event["RequestType"] = requestType + + with mock.patch("cfn_response.http") as mocked_PoolManager: + with mock.patch("operations.gen_ecr_repo_prefix.logger") as mock_logger: + execute(lambda_event, mock_lambda_context) + mocked_PoolManager.request.assert_called_once() + + # Verify no-op logging + mock_logger.info.assert_called_with("Update operation is a no-op for ECR repository prefix generation") + + call_kwargs = mocked_PoolManager.request.call_args.kwargs + body = json.loads(call_kwargs["body"]) + assert body["Status"] == "SUCCESS" + assert body["Data"] == {} # Update should return empty data like Delete + + @pytest.mark.parametrize("requestType", ["Delete"]) + def test_gen_ecr_repo_prefix_success_delete(self, lambda_event, mock_lambda_context, requestType): + """Test successful ECR repository prefix generation on delete""" + lambda_event["RequestType"] = requestType + + with mock.patch("cfn_response.http") as mocked_PoolManager: + execute(lambda_event, mock_lambda_context) + mocked_PoolManager.request.assert_called_once() + + call_kwargs = mocked_PoolManager.request.call_args.kwargs + body = json.loads(call_kwargs["body"]) + assert body["Status"] == "SUCCESS" + assert body["Data"] == {} + + def test_verify_env_setup_success(self, lambda_event): + """Test successful environment setup verification""" + verify_env_setup(lambda_event) + # Should not raise any exception + + def test_verify_env_setup_missing_both_properties(self, lambda_event): + """Test environment setup with missing both StackName and UseCaseShortId""" + del lambda_event[RESOURCE_PROPERTIES]["StackName"] + + with pytest.raises(ValueError, match="Missing required property: either StackName or UseCaseShortId must be provided"): + verify_env_setup(lambda_event) + + def test_verify_env_setup_with_use_case_short_id(self, lambda_event): + """Test environment setup with UseCaseShortId instead of StackName""" + del lambda_event[RESOURCE_PROPERTIES]["StackName"] + lambda_event[RESOURCE_PROPERTIES]["UseCaseShortId"] = "a1b2c3d4" + + # Should not raise any exception + verify_env_setup(lambda_event) + + @pytest.mark.parametrize("requestType", ["Create", "Update", "Delete"]) + def test_when_operation_type_is_invalid(self, lambda_event, mock_lambda_context, requestType): + """Test behavior with invalid operation type""" + lambda_event[RESOURCE_PROPERTIES][RESOURCE] = "INVALID_OPERATION" + lambda_event["RequestType"] = requestType + + with pytest.raises(ValueError): + verify_env_setup(lambda_event) + + with mock.patch("cfn_response.http") as mocked_PoolManager: + execute(lambda_event, mock_lambda_context) + mocked_PoolManager.request.assert_called_once() + + call_kwargs = mocked_PoolManager.request.call_args.kwargs + body = json.loads(call_kwargs["body"]) + assert body["Status"] == "FAILED" + assert "Operation type not available or did not match" in body["Reason"] + + @pytest.mark.parametrize("requestType", ["Create", "Update", "Delete"]) + def test_lambda_handler(self, lambda_event, mock_lambda_context, requestType): + """Test lambda handler for all request types""" + lambda_event["RequestType"] = requestType + + with mock.patch("cfn_response.http") as mocked_PoolManager: + handler(lambda_event, mock_lambda_context) + mocked_PoolManager.request.assert_called_once() + + def test_when_sanitization_fails(self, lambda_event, mock_lambda_context): + """Test behavior when sanitization fails""" + lambda_event["RequestType"] = "Create" + + with mock.patch("operations.gen_ecr_repo_prefix.sanitize_and_truncate_prefix") as sanitize_mock: + sanitize_mock.side_effect = Exception("Fake sanitization error") + + with mock.patch("cfn_response.http") as mocked_PoolManager: + execute(lambda_event, mock_lambda_context) + + call_kwargs = mocked_PoolManager.request.call_args.kwargs + body = json.loads(call_kwargs["body"]) + assert body["Status"] == "FAILED" + assert "Fake sanitization error" in body["Reason"] + + def test_update_does_not_call_generate_prefix(self, lambda_event, mock_lambda_context): + """Test that Update operations don't call generate_prefix_from_inputs""" + lambda_event["RequestType"] = "Update" + + with mock.patch("operations.gen_ecr_repo_prefix.generate_prefix_from_inputs") as generate_mock: + with mock.patch("cfn_response.http") as mocked_PoolManager: + execute(lambda_event, mock_lambda_context) + + # Verify generate_prefix_from_inputs was NOT called + generate_mock.assert_not_called() + + call_kwargs = mocked_PoolManager.request.call_args.kwargs + body = json.loads(call_kwargs["body"]) + assert body["Status"] == "SUCCESS" + assert body["Data"] == {} \ No newline at end of file diff --git a/source/lambda/custom-resource/test/operations/test_get_arns_for_inference_profile.py b/source/lambda/custom-resource/test/operations/test_get_arns_for_inference_profile.py index 70c72260..d842a410 100644 --- a/source/lambda/custom-resource/test/operations/test_get_arns_for_inference_profile.py +++ b/source/lambda/custom-resource/test/operations/test_get_arns_for_inference_profile.py @@ -77,7 +77,7 @@ def test_get_inference_identifier_success(insert_llm_config): record_key = lambda_event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_RECORD_KEY] result = get_inference_identifier_from_ddb(table_name, record_key) - assert result == expected_identifier + assert result == [expected_identifier] @mock_aws @@ -105,15 +105,16 @@ def test_get_model_arns_success(mock_bedrock_client): ], } - result = get_model_arns("test-profile-identifier") + result = get_model_arns(["test-profile-identifier"]) - assert result == ",".join( - [ - "arn:aws:bedrock:us-west-2:123456789012:model/model1", - "arn:aws:bedrock:us-west-2:123456789012:model/model2", - "arn:aws:bedrock:us-east-1:123456789012:inference-profile/test-profile-identifier", - ] - ) + # Check that all expected ARNs are present (order doesn't matter due to set) + expected_arns = { + "arn:aws:bedrock:us-west-2:123456789012:model/model1", + "arn:aws:bedrock:us-west-2:123456789012:model/model2", + "arn:aws:bedrock:us-east-1:123456789012:inference-profile/test-profile-identifier", + } + result_arns = set(result.split(",")) + assert result_arns == expected_arns mock_bedrock_client.get_inference_profile.assert_called_once_with( inferenceProfileIdentifier="test-profile-identifier" ) @@ -121,7 +122,7 @@ def test_get_model_arns_success(mock_bedrock_client): def test_get_model_arns_empty_response(mock_bedrock_client): mock_bedrock_client.get_inference_profile.return_value = {} - result = get_model_arns("test-profile-identifier") + result = get_model_arns(["test-profile-identifier"]) assert result == "" @@ -131,7 +132,233 @@ def test_get_model_arns_client_error(mock_bedrock_client): ) with pytest.raises(ClientError): - get_model_arns("test-profile-identifier") + get_model_arns(["test-profile-identifier"]) + + +def test_get_model_arns_multiple_profiles(mock_bedrock_client): + # Mock responses for different profiles + def mock_get_inference_profile(inferenceProfileIdentifier): + if inferenceProfileIdentifier == "profile1": + return { + "inferenceProfileArn": "arn:aws:bedrock:us-east-1:123456789012:inference-profile/profile1", + "models": [ + {"modelArn": "arn:aws:bedrock:us-west-2:123456789012:model/model1"}, + {"modelArn": "arn:aws:bedrock:us-west-2:123456789012:model/model2"}, + ], + } + elif inferenceProfileIdentifier == "profile2": + return { + "inferenceProfileArn": "arn:aws:bedrock:us-east-1:123456789012:inference-profile/profile2", + "models": [ + {"modelArn": "arn:aws:bedrock:us-west-2:123456789012:model/model3"}, + ], + } + return {} + + mock_bedrock_client.get_inference_profile.side_effect = mock_get_inference_profile + + result = get_model_arns(["profile1", "profile2"]) + + expected_arns = { + "arn:aws:bedrock:us-west-2:123456789012:model/model1", + "arn:aws:bedrock:us-west-2:123456789012:model/model2", + "arn:aws:bedrock:us-west-2:123456789012:model/model3", + "arn:aws:bedrock:us-east-1:123456789012:inference-profile/profile1", + "arn:aws:bedrock:us-east-1:123456789012:inference-profile/profile2", + } + result_arns = set(result.split(",")) + assert result_arns == expected_arns + assert mock_bedrock_client.get_inference_profile.call_count == 2 + + +def test_get_model_arns_deduplication(mock_bedrock_client): + # Mock responses with duplicate ARNs + def mock_get_inference_profile(inferenceProfileIdentifier): + return { + "inferenceProfileArn": f"arn:aws:bedrock:us-east-1:123456789012:inference-profile/{inferenceProfileIdentifier}", + "models": [ + {"modelArn": "arn:aws:bedrock:us-west-2:123456789012:model/shared-model"}, + {"modelArn": f"arn:aws:bedrock:us-west-2:123456789012:model/{inferenceProfileIdentifier}-model"}, + ], + } + + mock_bedrock_client.get_inference_profile.side_effect = mock_get_inference_profile + + result = get_model_arns(["profile1", "profile2"]) + + # Should deduplicate the shared-model ARN + expected_arns = { + "arn:aws:bedrock:us-west-2:123456789012:model/shared-model", # Only appears once despite being in both profiles + "arn:aws:bedrock:us-west-2:123456789012:model/profile1-model", + "arn:aws:bedrock:us-west-2:123456789012:model/profile2-model", + "arn:aws:bedrock:us-east-1:123456789012:inference-profile/profile1", + "arn:aws:bedrock:us-east-1:123456789012:inference-profile/profile2", + } + result_arns = set(result.split(",")) + assert result_arns == expected_arns + assert mock_bedrock_client.get_inference_profile.call_count == 2 + + +def test_get_model_arns_empty_list(mock_bedrock_client): + result = get_model_arns([]) + assert result == "" + mock_bedrock_client.get_inference_profile.assert_not_called() + + +@mock_aws +def test_get_inference_identifier_workflow_agent_as_tool(setup_use_case_config): + lambda_event, ddb = setup_use_case_config + + table_name = lambda_event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_TABLE_NAME] + record_key = lambda_event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_RECORD_KEY] + + # Create a workflow configuration with agents-as-tools orchestration + python_obj_to_be_inserted = { + LLM_CONFIG_RECORD_FIELD_NAME: record_key, + "config": { + "UseCaseType": "Workflow", + "LlmParams": { + "BedrockLlmParams": {"InferenceProfileId": "workflow-profile"} + }, # Workflow-level inference profile + "WorkflowParams": { + "OrchestrationPattern": "agents-as-tools", + "AgentsAsToolsParams": { + "Agents": [ + {"UseCaseId": "agent1", "LlmParams": {"BedrockLlmParams": {"InferenceProfileId": "profile-1"}}}, + {"UseCaseId": "agent2", "LlmParams": {"BedrockLlmParams": {"InferenceProfileId": "profile-2"}}}, + { + "UseCaseId": "agent3", + "LlmParams": {"BedrockLlmParams": {"ModelId": "some-model"}}, # No InferenceProfileId + }, + ] + }, + }, + }, + } + serializer = TypeSerializer() + ddb.put_item(TableName=table_name, Item={k: serializer.serialize(v) for k, v in python_obj_to_be_inserted.items()}) + + result = get_inference_identifier_from_ddb(table_name, record_key) + assert set(result) == {"workflow-profile", "profile-1", "profile-2"} # Should include workflow and agent profiles + + +@mock_aws +def test_get_inference_identifier_workflow_non_agent_as_tool(setup_use_case_config): + lambda_event, ddb = setup_use_case_config + + table_name = lambda_event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_TABLE_NAME] + record_key = lambda_event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_RECORD_KEY] + + # Create a workflow configuration with different orchestration pattern + python_obj_to_be_inserted = { + LLM_CONFIG_RECORD_FIELD_NAME: record_key, + "config": { + "UseCaseType": "Workflow", + "WorkflowParams": {"OrchestrationPattern": "sequential"}, # Not agents-as-tools + "LlmParams": {"BedrockLlmParams": {"InferenceProfileId": "main-profile"}}, + }, + } + serializer = TypeSerializer() + ddb.put_item(TableName=table_name, Item={k: serializer.serialize(v) for k, v in python_obj_to_be_inserted.items()}) + + result = get_inference_identifier_from_ddb(table_name, record_key) + assert result == ["main-profile"] # Should return the main LlmParams profile + + +@mock_aws +def test_get_inference_identifier_workflow_empty_agents(setup_use_case_config): + lambda_event, ddb = setup_use_case_config + + table_name = lambda_event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_TABLE_NAME] + record_key = lambda_event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_RECORD_KEY] + + # Create a workflow configuration with agents-as-tools but no agents with InferenceProfileId + python_obj_to_be_inserted = { + LLM_CONFIG_RECORD_FIELD_NAME: record_key, + "config": { + "UseCaseType": "Workflow", + "WorkflowParams": { + "OrchestrationPattern": "agents-as-tools", + "AgentsAsToolsParams": { + "Agents": [ + { + "UseCaseId": "agent1", + "LlmParams": {"BedrockLlmParams": {"ModelId": "some-model"}}, # No InferenceProfileId + } + ] + }, + }, + }, + } + serializer = TypeSerializer() + ddb.put_item(TableName=table_name, Item={k: serializer.serialize(v) for k, v in python_obj_to_be_inserted.items()}) + + result = get_inference_identifier_from_ddb(table_name, record_key) + assert result is None # Should return None when no agents have InferenceProfileId + + +@mock_aws +def test_get_inference_identifier_workflow_deduplication(setup_use_case_config): + lambda_event, ddb = setup_use_case_config + + table_name = lambda_event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_TABLE_NAME] + record_key = lambda_event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_RECORD_KEY] + + # Create a workflow configuration with duplicate inference profile IDs + python_obj_to_be_inserted = { + LLM_CONFIG_RECORD_FIELD_NAME: record_key, + "config": { + "UseCaseType": "Workflow", + "LlmParams": {"BedrockLlmParams": {"InferenceProfileId": "shared-profile"}}, # Same as agent1 + "WorkflowParams": { + "OrchestrationPattern": "agents-as-tools", + "AgentsAsToolsParams": { + "Agents": [ + { + "UseCaseId": "agent1", + "LlmParams": {"BedrockLlmParams": {"InferenceProfileId": "shared-profile"}}, + }, # Duplicate + { + "UseCaseId": "agent2", + "LlmParams": {"BedrockLlmParams": {"InferenceProfileId": "unique-profile"}}, + }, + { + "UseCaseId": "agent3", + "LlmParams": {"BedrockLlmParams": {"InferenceProfileId": "shared-profile"}}, + }, # Another duplicate + ] + }, + }, + }, + } + serializer = TypeSerializer() + ddb.put_item(TableName=table_name, Item={k: serializer.serialize(v) for k, v in python_obj_to_be_inserted.items()}) + + result = get_inference_identifier_from_ddb(table_name, record_key) + assert set(result) == {"shared-profile", "unique-profile"} # Should deduplicate shared-profile + assert len(result) == 2 # Should only have 2 unique profiles + + +@mock_aws +def test_get_inference_identifier_non_workflow_use_case(setup_use_case_config): + lambda_event, ddb = setup_use_case_config + + table_name = lambda_event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_TABLE_NAME] + record_key = lambda_event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_RECORD_KEY] + + # Create a non-workflow configuration (e.g., Chat use case) + python_obj_to_be_inserted = { + LLM_CONFIG_RECORD_FIELD_NAME: record_key, + "config": { + "UseCaseType": "Chat", + "LlmParams": {"BedrockLlmParams": {"InferenceProfileId": "chat-profile"}}, + }, + } + serializer = TypeSerializer() + ddb.put_item(TableName=table_name, Item={k: serializer.serialize(v) for k, v in python_obj_to_be_inserted.items()}) + + result = get_inference_identifier_from_ddb(table_name, record_key) + assert result == ["chat-profile"] # Should return the single inference profile @patch("operations.get_arns_for_inference_profile.send_response") @@ -162,24 +389,23 @@ def test_execute_success(mock_send_response, lambda_event, mock_lambda_context, table_name = lambda_event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_TABLE_NAME] record_key = lambda_event[RESOURCE_PROPERTIES][USE_CASE_CONFIG_RECORD_KEY] - assert get_inference_identifier_from_ddb(table_name, record_key) == "test-profile-identifier" + assert get_inference_identifier_from_ddb(table_name, record_key) == ["test-profile-identifier"] execute(lambda_event, mock_lambda_context) - mock_send_response.assert_called_once_with( - lambda_event, - mock_lambda_context, - "SUCCESS", - { - "Arns": ",".join( - [ - "arn:aws:bedrock:us-west-2:123456789012:model/model1", - "arn:aws:bedrock:us-west-2:123456789012:model/model2", - "arn:aws:bedrock:us-east-1:123456789012:inference-profile/test-profile-identifier", - ] - ), - }, - "fake_physical_resource_id", - ) + # Check that the response contains the expected ARNs (order doesn't matter) + expected_arns = { + "arn:aws:bedrock:us-west-2:123456789012:model/model1", + "arn:aws:bedrock:us-west-2:123456789012:model/model2", + "arn:aws:bedrock:us-east-1:123456789012:inference-profile/test-profile-identifier", + } + + # Get the actual call arguments + call_args = mock_send_response.call_args + actual_arns_string = call_args[0][3]["Arns"] # The "Arns" value from the data dictionary + actual_arns = set(actual_arns_string.split(",")) + + assert actual_arns == expected_arns + mock_send_response.assert_called_once() @patch("operations.get_arns_for_inference_profile.send_response") diff --git a/source/lambda/custom-resource/test/operations/test_lambda_version_generator.py b/source/lambda/custom-resource/test/operations/test_lambda_version_generator.py new file mode 100644 index 00000000..1c31e1f5 --- /dev/null +++ b/source/lambda/custom-resource/test/operations/test_lambda_version_generator.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from unittest import mock +from operations.lambda_version_generator import execute, verify_env_setup +from operations.operation_types import FAILED, RESOURCE, RESOURCE_PROPERTIES, SUCCESS +from test.fixtures.lambda_version_generator_events import lambda_event + + +def test_verify_env_setup_success(lambda_event): + assert verify_env_setup(lambda_event) is None + + +def test_verify_env_setup_invalid_operation(lambda_event): + lambda_event[RESOURCE_PROPERTIES][RESOURCE] = "INVALID_OPERATION" + with pytest.raises(ValueError, match="Operation type not supported"): + verify_env_setup(lambda_event) + + +def test_verify_env_setup_missing_function_name(lambda_event): + lambda_event[RESOURCE_PROPERTIES]["FunctionName"] = None + with pytest.raises(ValueError, match="FunctionName has not been passed"): + verify_env_setup(lambda_event) + + +@mock.patch("operations.lambda_version_generator.get_service_client") +@mock.patch("operations.lambda_version_generator.send_response") +def test_execute_create_success(mock_send_response, mock_get_service_client, lambda_event, mock_lambda_context): + lambda_event["RequestType"] = "Create" + + mock_lambda_client = mock.Mock() + mock_get_service_client.return_value = mock_lambda_client + + mock_lambda_client.publish_version.return_value = { + 'FunctionArn': 'arn:aws:lambda:us-east-1:123456789012:function:my-function:1', + 'Version': '1' + } + + execute(lambda_event, mock_lambda_context) + + mock_lambda_client.publish_version.assert_called_once_with( + FunctionName='my-function', + Description='Lambda Version' + ) + + mock_send_response.assert_called_once_with( + lambda_event, + mock_lambda_context, + SUCCESS, + { + 'VersionArn': 'arn:aws:lambda:us-east-1:123456789012:function:my-function:1', + 'VersionNumber': '1' + } + ) + + +@mock.patch("operations.lambda_version_generator.get_service_client") +@mock.patch("operations.lambda_version_generator.send_response") +def test_execute_update_success(mock_send_response, mock_get_service_client, lambda_event, mock_lambda_context): + lambda_event["RequestType"] = "Update" + + mock_lambda_client = mock.Mock() + mock_get_service_client.return_value = mock_lambda_client + + mock_lambda_client.publish_version.return_value = { + 'FunctionArn': 'arn:aws:lambda:us-east-1:123456789012:function:my-function:2', + 'Version': '2' + } + + execute(lambda_event, mock_lambda_context) + + mock_lambda_client.publish_version.assert_called_once_with( + FunctionName='my-function', + Description='Lambda Version' + ) + + mock_send_response.assert_called_once_with( + lambda_event, + mock_lambda_context, + SUCCESS, + { + 'VersionArn': 'arn:aws:lambda:us-east-1:123456789012:function:my-function:2', + 'VersionNumber': '2' + } + ) + + +@mock.patch("operations.lambda_version_generator.send_response") +def test_execute_delete_success(mock_send_response, lambda_event, mock_lambda_context): + lambda_event["RequestType"] = "Delete" + + execute(lambda_event, mock_lambda_context) + + # For delete, no Lambda client calls should be made + mock_send_response.assert_called_once_with( + lambda_event, + mock_lambda_context, + SUCCESS, + {} + ) + + +@mock.patch("operations.lambda_version_generator.get_service_client") +@mock.patch("operations.lambda_version_generator.send_response") +def test_execute_create_lambda_error(mock_send_response, mock_get_service_client, lambda_event, mock_lambda_context): + lambda_event["RequestType"] = "Create" + + mock_lambda_client = mock.Mock() + mock_get_service_client.return_value = mock_lambda_client + + # Simulate Lambda service error + mock_lambda_client.publish_version.side_effect = Exception("Lambda service error") + + with pytest.raises(Exception, match="Lambda service error"): + execute(lambda_event, mock_lambda_context) + + mock_send_response.assert_called_once_with( + lambda_event, + mock_lambda_context, + FAILED, + {}, + reason="Lambda service error" + ) + + +@mock.patch("operations.lambda_version_generator.get_service_client") +@mock.patch("operations.lambda_version_generator.send_response") +def test_execute_update_lambda_error(mock_send_response, mock_get_service_client, lambda_event, mock_lambda_context): + lambda_event["RequestType"] = "Update" + + mock_lambda_client = mock.Mock() + mock_get_service_client.return_value = mock_lambda_client + + # Simulate Lambda service error + mock_lambda_client.publish_version.side_effect = Exception("Function not found") + + with pytest.raises(Exception, match="Function not found"): + execute(lambda_event, mock_lambda_context) + + mock_send_response.assert_called_once_with( + lambda_event, + mock_lambda_context, + FAILED, + {}, + reason="Function not found" + ) diff --git a/source/lambda/custom-resource/test/operations/test_multimodal_bucket_notifications.py b/source/lambda/custom-resource/test/operations/test_multimodal_bucket_notifications.py new file mode 100644 index 00000000..2cfd556a --- /dev/null +++ b/source/lambda/custom-resource/test/operations/test_multimodal_bucket_notifications.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import json +from unittest.mock import MagicMock, patch + +import pytest +from operations import multimodal_bucket_notifications +from operations.operation_types import FAILED, SUCCESS +from utils.constants import MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR + + +def test_multimodal_bucket_notifications_create_success(): + """Test successful creation of bucket notifications""" + event = { + "RequestType": "Create", + "ResourceProperties": {MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR: "test-bucket", "EnableEventBridge": True}, + } + + context = MagicMock() + + with patch("operations.multimodal_bucket_notifications.get_service_client") as mock_get_service_client: + mock_s3_client = MagicMock() + mock_get_service_client.return_value = mock_s3_client + + with patch("operations.multimodal_bucket_notifications.send_response") as mock_send_response: + multimodal_bucket_notifications.execute(event, context) + + mock_get_service_client.assert_called_once_with("s3") + mock_s3_client.put_bucket_notification_configuration.assert_called_once_with( + Bucket="test-bucket", NotificationConfiguration={"EventBridgeConfiguration": {}} + ) + + mock_send_response.assert_called_once_with( + event, context, SUCCESS, {"BucketName": "test-bucket", "EventBridgeEnabled": True} + ) + + +def test_multimodal_bucket_notifications_delete_success(): + """Test successful deletion of bucket notifications""" + event = { + "RequestType": "Delete", + "ResourceProperties": {MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR: "test-bucket", "EnableEventBridge": True}, + } + + context = MagicMock() + + with patch("operations.multimodal_bucket_notifications.get_service_client") as mock_get_service_client: + mock_s3_client = MagicMock() + mock_get_service_client.return_value = mock_s3_client + + with patch("operations.multimodal_bucket_notifications.send_response") as mock_send_response: + multimodal_bucket_notifications.execute(event, context) + + mock_get_service_client.assert_called_once_with("s3") + mock_s3_client.put_bucket_notification_configuration.assert_called_once_with( + Bucket="test-bucket", NotificationConfiguration={} + ) + + mock_send_response.assert_called_once_with( + event, context, SUCCESS, {"BucketName": "test-bucket", "EventBridgeEnabled": True} + ) + + +def test_multimodal_bucket_notifications_missing_bucket_name(): + """Test error handling when bucket name is missing""" + event = {"RequestType": "Create", "ResourceProperties": {"EnableEventBridge": True}} + + context = MagicMock() + + with patch("operations.multimodal_bucket_notifications.send_response") as mock_send_response: + with pytest.raises(ValueError): + multimodal_bucket_notifications.execute(event, context) + + mock_send_response.assert_called_once_with( + event, + context, + FAILED, + {}, + reason=f"{MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR} is required in ResourceProperties", + ) + + +def test_multimodal_bucket_notifications_s3_error(): + """Test error handling when S3 operation fails""" + event = { + "RequestType": "Create", + "ResourceProperties": {MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR: "test-bucket", "EnableEventBridge": True}, + } + + context = MagicMock() + + with patch("operations.multimodal_bucket_notifications.get_service_client") as mock_get_service_client: + mock_s3_client = MagicMock() + mock_s3_client.put_bucket_notification_configuration.side_effect = Exception("S3 Error") + mock_get_service_client.return_value = mock_s3_client + + with patch("operations.multimodal_bucket_notifications.send_response") as mock_send_response: + with pytest.raises(Exception): + multimodal_bucket_notifications.execute(event, context) + + mock_get_service_client.assert_called_once_with("s3") + mock_send_response.assert_called_once_with(event, context, FAILED, {}, reason="S3 Error") + + +def test_multimodal_bucket_notifications_delete_with_error(): + """Test that delete operation doesn't fail when S3 operation fails""" + event = { + "RequestType": "Delete", + "ResourceProperties": {MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR: "test-bucket", "EnableEventBridge": True}, + } + + context = MagicMock() + + with patch("operations.multimodal_bucket_notifications.get_service_client") as mock_get_service_client: + mock_s3_client = MagicMock() + mock_s3_client.put_bucket_notification_configuration.side_effect = Exception("Bucket not found") + mock_get_service_client.return_value = mock_s3_client + + with patch("operations.multimodal_bucket_notifications.send_response") as mock_send_response: + multimodal_bucket_notifications.execute(event, context) + + mock_get_service_client.assert_called_once_with("s3") + mock_send_response.assert_called_once_with( + event, context, SUCCESS, {"BucketName": "test-bucket", "EventBridgeEnabled": True} + ) diff --git a/source/lambda/custom-resource/test/operations/test_anonymous_metric.py b/source/lambda/custom-resource/test/operations/test_send_metrics.py similarity index 50% rename from source/lambda/custom-resource/test/operations/test_anonymous_metric.py rename to source/lambda/custom-resource/test/operations/test_send_metrics.py index a7b8895f..aa374626 100644 --- a/source/lambda/custom-resource/test/operations/test_anonymous_metric.py +++ b/source/lambda/custom-resource/test/operations/test_send_metrics.py @@ -5,22 +5,28 @@ import json import re from copy import copy -from test.fixtures.anonymous_metrics_events import ( +from test.fixtures.metrics_events import ( lambda_events, llm_config_value, setup_config_ddb, llm_config_value_text_with_no_rag, llm_config_value_with_auth, llm_config_value_with_agent, + llm_config_value_with_multimodal, + llm_config_value_with_mcp_gateway, + llm_config_value_with_mcp_runtime, + llm_config_value_with_agent_builder, + llm_config_value_with_workflow, + llm_config_value_with_provisioned_concurrency, ) import mock -import operations import pytest +from operations import operation_types from freezegun import freeze_time from lambda_func import handler from moto import mock_aws -from operations.anonymous_metrics import SOLUTION_ID, VERSION, execute, verify_env_setup +from operations.send_metrics import VERSION, execute, verify_env_setup from operations.operation_types import RESOURCE, RESOURCE_PROPERTIES from utils.constants import ( DISAMBIGUATION_PROMPT_TEMPLATE, @@ -30,6 +36,8 @@ PROMPT_TEMPLATE, SSM_CONFIG_KEY, USE_CASE_CONFIG_RECORD_KEY, + USE_CASE_CONFIG_TABLE_NAME, + UUID, ) UUID_REGEX = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-5][0-9a-f]{3}-[089ab][0-9a-f]{3}-[0-9a-f]{12}$" @@ -41,8 +49,8 @@ def test_when_operation_type_is_invalid(mock_lambda_context, lambda_events, setu expected_response = { "method": "PUT", "url": "https://fakeurl/doesnotexist", - "headers": {"content-type": "", "content-length": "327"}, - "body": '{"Status": "FAILED", "Reason": "Operation type not available or did not match from the request. Expecting operation type to be ANONYMOUS_METRIC", "PhysicalResourceId": "fake_physical_resource_id", "StackId": "fakeStackId", "RequestId": "fakeRequestId", "LogicalResourceId": "fakeLogicalResourceId", "NoEcho": false, "Data": {}}', + "headers": {"content-type": "", "content-length": "337"}, + "body": '{"Status": "FAILED", "Reason": "Operation type not available or did not match from the request. Expecting operation type to be METRIC or ANONYMOUS_METRIC", "PhysicalResourceId": "fake_physical_resource_id", "StackId": "fakeStackId", "RequestId": "fakeRequestId", "LogicalResourceId": "fakeLogicalResourceId", "NoEcho": false, "Data": {}}', } for event in lambda_events: @@ -305,3 +313,291 @@ def test_lambda_handler_for_missing_props(lambda_events, mock_lambda_context, re headers={"content-type": "", "content-length": "278"}, body='{"Status": "SUCCESS", "Reason": "See the details in CloudWatch Log Stream: fake_logstream_name", "PhysicalResourceId": "fake_physical_resource_id", "StackId": "fakeStackId", "RequestId": "fakeRequestId", "LogicalResourceId": "fakeLogicalResourceId", "NoEcho": false, "Data": {}}', ) + + +@mock_aws +@freeze_time("2000-01-01T00:00:00") +@pytest.mark.parametrize("requestType", ["Create", "Update", "Delete"]) +def test_sending_metric_with_multimodal_params(mock_lambda_context, requestType, setup_config_ddb): + event = { + "RequestType": requestType, + "ResponseURL": "https://fakeurl/doesnotexist", + "StackId": "fakeStackId", + "RequestId": "fakeRequestId", + "ResourceType": "Custom::AnonymousMetric", + "LogicalResourceId": "fakeLogicalResourceId", + "PhysicalResourceId": "fake_physical_resource_id", + RESOURCE_PROPERTIES: { + RESOURCE: operation_types.ANONYMOUS_METRIC, + "SolutionId": "SO0999", + "Version": "v9.9.9", + USE_CASE_CONFIG_TABLE_NAME: "fake_ddb_table", + USE_CASE_CONFIG_RECORD_KEY: "fake_ddb_table_hash_key_5", + UUID: "fakeuuid", + }, + } + + with mock.patch("cfn_response.http") as cfn_mocked_PoolManager: + with mock.patch("utils.metrics.http") as metrics_mocked_PoolManager: + execute(event, mock_lambda_context) + call_kwargs = metrics_mocked_PoolManager.request.call_args.kwargs + assert call_kwargs["method"] == "POST" + assert call_kwargs["url"] == METRICS_ENDPOINT + body = json.loads(call_kwargs["body"]) + assert body["Solution"] == "SO0999" + assert body.get("UUID") == "fakeuuid" + if body["Data"]: + # Verify multimodal params are captured in LlmParams + assert "LlmParams" in body["Data"] + assert "MultimodalParams" in body["Data"]["LlmParams"] + assert body["Data"]["LlmParams"]["MultimodalParams"] == {"MultimodalEnabled": True} + assert body["Data"]["UseCaseType"] == "AgentBuilder" + + +@mock_aws +@freeze_time("2000-01-01T00:00:00") +@pytest.mark.parametrize("requestType", ["Create", "Update", "Delete"]) +def test_sending_metric_with_mcp_gateway_params(mock_lambda_context, requestType, setup_config_ddb): + event = { + "RequestType": requestType, + "ResponseURL": "https://fakeurl/doesnotexist", + "StackId": "fakeStackId", + "RequestId": "fakeRequestId", + "ResourceType": "Custom::AnonymousMetric", + "LogicalResourceId": "fakeLogicalResourceId", + "PhysicalResourceId": "fake_physical_resource_id", + RESOURCE_PROPERTIES: { + RESOURCE: operation_types.ANONYMOUS_METRIC, + "SolutionId": "SO0999", + "Version": "v9.9.9", + USE_CASE_CONFIG_TABLE_NAME: "fake_ddb_table", + USE_CASE_CONFIG_RECORD_KEY: "fake_ddb_table_hash_key_6", + UUID: "fakeuuid", + }, + } + + with mock.patch("cfn_response.http") as cfn_mocked_PoolManager: + with mock.patch("utils.metrics.http") as metrics_mocked_PoolManager: + execute(event, mock_lambda_context) + call_kwargs = metrics_mocked_PoolManager.request.call_args.kwargs + assert call_kwargs["method"] == "POST" + assert call_kwargs["url"] == METRICS_ENDPOINT + body = json.loads(call_kwargs["body"]) + assert body["Solution"] == "SO0999" + assert body.get("UUID") == "fakeuuid" + if body["Data"]: + assert "MCPParams" in body["Data"] + assert body["Data"]["MCPParams"]["MCPType"] == "Gateway" + assert "GatewayParams" in body["Data"]["MCPParams"] + gateway_params = body["Data"]["MCPParams"]["GatewayParams"] + # Verify target count and types are captured + assert gateway_params["TargetCount"] == 3 + assert "TargetParams" in gateway_params + assert len(gateway_params["TargetParams"]) == 3 + + target_types = [t["TargetType"] for t in gateway_params["TargetParams"]] + assert "smithyModel" in target_types + assert target_types.count("openApiSchema") == 2 + + # Verify outbound auth types are captured for OpenAPI targets + openapi_targets = [t for t in gateway_params["TargetParams"] if t["TargetType"] == "openApiSchema"] + assert len(openapi_targets) == 2 + auth_types = [t.get("OutboundAuthProviderType") for t in openapi_targets] + assert "API_KEY" in auth_types + assert "OAUTH" in auth_types + + for target in gateway_params["TargetParams"]: + assert "TargetName" not in target + assert "TargetId" not in target + assert "OutboundAuthProviderArn" not in target + + assert "GatewayArn" not in gateway_params + assert "GatewayUrl" not in gateway_params + assert "GatewayId" not in gateway_params + assert body["Data"]["UseCaseType"] == "MCPServer" + + +@mock_aws +@freeze_time("2000-01-01T00:00:00") +@pytest.mark.parametrize("requestType", ["Create", "Update", "Delete"]) +def test_sending_metric_with_mcp_runtime_params(mock_lambda_context, requestType, setup_config_ddb): + event = { + "RequestType": requestType, + "ResponseURL": "https://fakeurl/doesnotexist", + "StackId": "fakeStackId", + "RequestId": "fakeRequestId", + "ResourceType": "Custom::AnonymousMetric", + "LogicalResourceId": "fakeLogicalResourceId", + "PhysicalResourceId": "fake_physical_resource_id", + RESOURCE_PROPERTIES: { + RESOURCE: operation_types.ANONYMOUS_METRIC, + "SolutionId": "SO0999", + "Version": "v9.9.9", + USE_CASE_CONFIG_TABLE_NAME: "fake_ddb_table", + USE_CASE_CONFIG_RECORD_KEY: "fake_ddb_table_hash_key_7", + UUID: "fakeuuid", + }, + } + + with mock.patch("cfn_response.http") as cfn_mocked_PoolManager: + with mock.patch("utils.metrics.http") as metrics_mocked_PoolManager: + execute(event, mock_lambda_context) + call_kwargs = metrics_mocked_PoolManager.request.call_args.kwargs + assert call_kwargs["method"] == "POST" + assert call_kwargs["url"] == METRICS_ENDPOINT + body = json.loads(call_kwargs["body"]) + assert body["Solution"] == "SO0999" + assert body.get("UUID") == "fakeuuid" + if body["Data"]: + assert "MCPParams" in body["Data"] + assert body["Data"]["MCPParams"]["MCPType"] == "Runtime" + assert "RuntimeArn" not in body["Data"]["MCPParams"] + assert "RuntimeUrl" not in body["Data"]["MCPParams"] + assert "RuntimeId" not in body["Data"]["MCPParams"] + assert body["Data"]["UseCaseType"] == "MCPServer" + + +@mock_aws +@freeze_time("2000-01-01T00:00:00") +@pytest.mark.parametrize("requestType", ["Create", "Update", "Delete"]) +def test_sending_metric_with_agent_builder_params(mock_lambda_context, requestType, setup_config_ddb): + event = { + "RequestType": requestType, + "ResponseURL": "https://fakeurl/doesnotexist", + "StackId": "fakeStackId", + "RequestId": "fakeRequestId", + "ResourceType": "Custom::AnonymousMetric", + "LogicalResourceId": "fakeLogicalResourceId", + "PhysicalResourceId": "fake_physical_resource_id", + RESOURCE_PROPERTIES: { + RESOURCE: operation_types.ANONYMOUS_METRIC, + "SolutionId": "SO0999", + "Version": "v9.9.9", + USE_CASE_CONFIG_TABLE_NAME: "fake_ddb_table", + USE_CASE_CONFIG_RECORD_KEY: "fake_ddb_table_hash_key_8", + UUID: "fakeuuid", + }, + } + + with mock.patch("cfn_response.http") as cfn_mocked_PoolManager: + with mock.patch("utils.metrics.http") as metrics_mocked_PoolManager: + execute(event, mock_lambda_context) + call_kwargs = metrics_mocked_PoolManager.request.call_args.kwargs + assert call_kwargs["method"] == "POST" + assert call_kwargs["url"] == METRICS_ENDPOINT + body = json.loads(call_kwargs["body"]) + assert body["Solution"] == "SO0999" + assert body.get("UUID") == "fakeuuid" + if body["Data"]: + assert "AgentBuilderParams" in body["Data"] + agent_builder_params = body["Data"]["AgentBuilderParams"] + + assert "MemoryConfig" in agent_builder_params + assert agent_builder_params["MemoryConfig"]["LongTermEnabled"] == False + + assert "BuiltInToolsCount" in agent_builder_params + assert agent_builder_params["BuiltInToolsCount"] == 3 + assert "BuiltInTools" in agent_builder_params + assert set(agent_builder_params["BuiltInTools"]) == {"calculator", "current_time", "environment"} + + assert "MCPServersCount" in agent_builder_params + assert agent_builder_params["MCPServersCount"] == 1 + assert "MCPServers" in agent_builder_params + assert len(agent_builder_params["MCPServers"]) == 1 + mcp_server = agent_builder_params["MCPServers"][0] + assert mcp_server["Type"] == "runtime" + + assert "SystemPrompt" not in agent_builder_params + assert "Url" not in mcp_server + + assert body["Data"]["UseCaseType"] == "AgentBuilder" + + +@mock_aws +@freeze_time("2000-01-01T00:00:00") +@pytest.mark.parametrize("requestType", ["Create", "Update", "Delete"]) +def test_sending_metric_with_workflow_params(mock_lambda_context, requestType, setup_config_ddb): + event = { + "RequestType": requestType, + "ResponseURL": "https://fakeurl/doesnotexist", + "StackId": "fakeStackId", + "RequestId": "fakeRequestId", + "ResourceType": "Custom::AnonymousMetric", + "LogicalResourceId": "fakeLogicalResourceId", + "PhysicalResourceId": "fake_physical_resource_id", + RESOURCE_PROPERTIES: { + RESOURCE: operation_types.ANONYMOUS_METRIC, + "SolutionId": "SO0999", + "Version": "v9.9.9", + USE_CASE_CONFIG_TABLE_NAME: "fake_ddb_table", + USE_CASE_CONFIG_RECORD_KEY: "fake_ddb_table_hash_key_9", + UUID: "fakeuuid", + }, + } + + with mock.patch("cfn_response.http") as cfn_mocked_PoolManager: + with mock.patch("utils.metrics.http") as metrics_mocked_PoolManager: + execute(event, mock_lambda_context) + call_kwargs = metrics_mocked_PoolManager.request.call_args.kwargs + assert call_kwargs["method"] == "POST" + assert call_kwargs["url"] == METRICS_ENDPOINT + body = json.loads(call_kwargs["body"]) + assert body["Solution"] == "SO0999" + assert body.get("UUID") == "fakeuuid" + if body["Data"]: + assert "WorkflowParams" in body["Data"] + workflow_params = body["Data"]["WorkflowParams"] + + assert "OrchestrationPattern" in workflow_params + assert workflow_params["OrchestrationPattern"] == "agents-as-tools" + + assert "AgentsCount" in workflow_params + assert workflow_params["AgentsCount"] == 2 + assert "Agents" in workflow_params + assert len(workflow_params["Agents"]) == 2 + for agent in workflow_params["Agents"]: + assert "Type" in agent + assert agent["Type"] == "AgentBuilder" + + assert "MemoryConfig" in workflow_params + assert workflow_params["MemoryConfig"]["LongTermEnabled"] == False + + assert "SystemPrompt" not in workflow_params + assert "AgentsAsToolsParams" not in workflow_params + + assert body["Data"]["UseCaseType"] == "Workflow" + + +@mock_aws +@freeze_time("2000-01-01T00:00:00") +@pytest.mark.parametrize("requestType", ["Create", "Update"]) +def test_sending_metric_with_provisioned_concurrency(mock_lambda_context, requestType, setup_config_ddb, llm_config_value_with_provisioned_concurrency): + event = { + "RequestType": requestType, + "ResponseURL": "https://fakeurl/doesnotexist", + "StackId": "arn:aws:cloudformation:us-east-1:123456789012:stack/teststack/51af3dc0-da77-11e4-872e-1234567db123", + "RequestId": "5d478078-13e9-baf0-464a-7ef285ecc786", + "LogicalResourceId": "MyTestResource", + "ResourceType": "AWS::CloudFormation::CustomResource", + RESOURCE_PROPERTIES: { + RESOURCE: operation_types.METRIC, + "SolutionId": "SO0999", + "Version": "v9.9.9", + "ServiceToken": "arn:aws:lambda:us-east-1:123456789012:function:fakefunction:1", + USE_CASE_CONFIG_TABLE_NAME: "fake_ddb_table", + USE_CASE_CONFIG_RECORD_KEY: "fake_ddb_table_hash_key_10", + UUID: "fakeuuid", + }, + } + + with mock.patch("cfn_response.http") as cfn_mocked_PoolManager: + with mock.patch("utils.metrics.http") as metrics_mocked_PoolManager: + execute(event, mock_lambda_context) + call_kwargs = metrics_mocked_PoolManager.request.call_args.kwargs + assert call_kwargs["method"] == "POST" + assert call_kwargs["url"] == METRICS_ENDPOINT + body = json.loads(call_kwargs["body"]) + assert body["Solution"] == "SO0999" + assert body.get("UUID") == "fakeuuid" + if body["Data"]: + assert body["Data"]["ProvisionedConcurrencyValue"] == 10 diff --git a/source/lambda/custom-resource/test/operations/test_shared.py b/source/lambda/custom-resource/test/operations/test_shared.py index a51d23a7..b32d22b3 100644 --- a/source/lambda/custom-resource/test/operations/test_shared.py +++ b/source/lambda/custom-resource/test/operations/test_shared.py @@ -2,13 +2,16 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 +import time import zipfile +from test.fixtures.copy_web_ui_events import lambda_event, web_ui_copy_setup +from unittest.mock import Mock, patch import botocore import pytest +from botocore.exceptions import ClientError from operations.operation_types import RESOURCE_PROPERTIES, SOURCE_BUCKET_NAME, SOURCE_PREFIX -from operations.shared import get_zip_archive -from test.fixtures.copy_web_ui_events import lambda_event, web_ui_copy_setup +from operations.shared import MAX_RETRIES, RETRY_DELAY_BASE, TRANSIENT_ERROR_CODES, get_zip_archive, retry_with_backoff def test_get_zip_archive(web_ui_copy_setup): @@ -46,3 +49,113 @@ def test_with_bad_zip_file(tmp_path, web_ui_copy_setup): with pytest.raises(zipfile.error): get_zip_archive(s3_resource, source_bucket_name, source_prefix) + + +def test_retry_successful_function_call_on_first_attempt(): + """Test that a successful function call returns immediately without retries.""" + mock_func = Mock(return_value="success") + + result = retry_with_backoff(mock_func, "arg1", kwarg1="value1") + + assert result == "success" + assert mock_func.call_count == 1 + mock_func.assert_called_with("arg1", kwarg1="value1") + + +def test_retry_successful_function_call_after_retries(): + """Test that function succeeds after some transient failures.""" + mock_func = Mock() + # First two calls fail with transient error, third succeeds + mock_func.side_effect = [ + ClientError({"Error": {"Code": "ThrottlingException", "Message": "Rate exceeded"}}, "TestOperation"), + ClientError( + {"Error": {"Code": "ServiceUnavailableException", "Message": "Service unavailable"}}, "TestOperation" + ), + "success", + ] + + with patch("time.sleep") as mock_sleep: + result = retry_with_backoff(mock_func) + + assert result == "success" + assert mock_func.call_count == 3 + # Verify sleep was called with exponential backoff delays + assert mock_sleep.call_count == 2 + mock_sleep.assert_any_call(RETRY_DELAY_BASE**0) # 3^0 = 1 second (first retry) + mock_sleep.assert_any_call(RETRY_DELAY_BASE**1) # 3^1 = 3 seconds (second retry) + + +def test_retry_non_transient_error_raises_immediately(): + """Test that non-transient errors are raised immediately without retries.""" + mock_func = Mock() + mock_func.side_effect = ClientError( + {"Error": {"Code": "ValidationException", "Message": "Invalid input"}}, "TestOperation" + ) + + with pytest.raises(ClientError) as exc_info: + retry_with_backoff(mock_func) + + assert exc_info.value.response["Error"]["Code"] == "ValidationException" + assert mock_func.call_count == 1 + + +def test_retry_max_retries_exceeded_for_transient_errors(): + """Test that transient errors are retried up to MAX_RETRIES times.""" + mock_func = Mock() + # Always fail with transient error + mock_func.side_effect = ClientError( + {"Error": {"Code": "ThrottlingException", "Message": "Rate exceeded"}}, "TestOperation" + ) + + with patch("time.sleep"): + with pytest.raises(ClientError) as exc_info: + retry_with_backoff(mock_func) + + assert exc_info.value.response["Error"]["Code"] == "ThrottlingException" + # Should be called MAX_RETRIES + 1 times (initial attempt + retries) + assert mock_func.call_count == MAX_RETRIES + 1 + +def test_retry_all_transient_error_codes_are_retried(): + """Test that all error codes in TRANSIENT_ERROR_CODES are retried.""" + for error_code in TRANSIENT_ERROR_CODES: + mock_func = Mock() + mock_func.side_effect = [ + ClientError({"Error": {"Code": error_code, "Message": f"{error_code} occurred"}}, "TestOperation"), + "success", + ] + + with patch("time.sleep"): + result = retry_with_backoff(mock_func) + + assert result == "success" + assert mock_func.call_count == 2 + + +def test_retry_non_client_error_exceptions_are_not_retried(): + """Test that non-ClientError exceptions are raised immediately.""" + mock_func = Mock() + mock_func.side_effect = ValueError("Invalid value") + + with pytest.raises(ValueError) as exc_info: + retry_with_backoff(mock_func) + + assert str(exc_info.value) == "Invalid value" + assert mock_func.call_count == 1 + + +def test_retry_function_arguments_are_passed_correctly(): + """Test that function arguments and kwargs are passed correctly on retries.""" + mock_func = Mock() + mock_func.side_effect = [ + ClientError({"Error": {"Code": "ThrottlingException", "Message": "Rate exceeded"}}, "TestOperation"), + "success", + ] + + with patch("time.sleep"): + result = retry_with_backoff(mock_func, "arg1", "arg2", kwarg1="value1", kwarg2="value2") + + assert result == "success" + assert mock_func.call_count == 2 + # Verify both calls had the same arguments + for call in mock_func.call_args_list: + assert call == (("arg1", "arg2"), {"kwarg1": "value1", "kwarg2": "value2"}) diff --git a/source/lambda/custom-resource/test/test_lambda_func.py b/source/lambda/custom-resource/test/test_lambda_func.py index c862b68c..fd29dfaf 100644 --- a/source/lambda/custom-resource/test/test_lambda_func.py +++ b/source/lambda/custom-resource/test/test_lambda_func.py @@ -19,7 +19,12 @@ def patch_powertools(): os.environ["STACK_NAME"] = "fake_stack_name" -@pytest.mark.parametrize("resource", [operation_types.COPY_WEB_UI, operation_types.UPDATE_LLM_CONFIG, "NOT_SUPPORTED_OPERATION"]) +@pytest.mark.parametrize("resource", [ + operation_types.COPY_WEB_UI, + operation_types.UPDATE_LLM_CONFIG, + operation_types.AGENTCORE_OAUTH_CLIENT, + "NOT_SUPPORTED_OPERATION" +]) def test_get_function_for_operation(resource): if resource == "NOT_SUPPORTED_OPERATION": with pytest.raises(UnSupportedOperationTypeException): diff --git a/source/lambda/custom-resource/test/test_lambda_ops_metrics.py b/source/lambda/custom-resource/test/test_lambda_ops_metrics.py index 3bbc5bb2..c5948124 100644 --- a/source/lambda/custom-resource/test/test_lambda_ops_metrics.py +++ b/source/lambda/custom-resource/test/test_lambda_ops_metrics.py @@ -14,7 +14,7 @@ def test_lambda_handler_success(mock_lambda_context, monkeypatch, caplog): envs = { "UNIT_TEST_ENV": "yes", - "POWERTOOLS_SERVICE_NAME": "ANONYMOUS-CW-METRICS", + "POWERTOOLS_SERVICE_NAME": "CW-METRICS", "SOLUTION_ID": "SO0999", "SOLUTION_VERSION": "v99.99.99", } @@ -38,7 +38,7 @@ def test_lambda_handler_empty_metrics(mock_lambda_context, monkeypatch, caplog): # When error is thrown for env variables not set, it doesn't raise Exception envs = { "UNIT_TEST_ENV": "yes", - "POWERTOOLS_SERVICE_NAME": "ANONYMOUS-CW-METRICS", + "POWERTOOLS_SERVICE_NAME": "CW-METRICS", "SOLUTION_ID": "SO0999", "SOLUTION_VERSION": "v99.99.99", } @@ -47,3 +47,25 @@ def test_lambda_handler_empty_metrics(mock_lambda_context, monkeypatch, caplog): assert handler({}, mock_lambda_context) == None # Making sure to log NOT publishing the metrics assert "Skipping metrics publishing — all metric values are empty." in caplog.text + + +@mock.patch("lambda_ops_metrics.get_metrics_payload", return_value={"fake-metric-1": 5, "fake-metric-2": 10}) +@mock.patch("lambda_ops_metrics.push_builder_metrics") +def test_lambda_handler_includes_account_id(mock_push, mock_get_payload, mock_lambda_context, monkeypatch): + """Test that scheduled metrics include account ID in BuilderMetrics""" + envs = { + "UNIT_TEST_ENV": "yes", + "POWERTOOLS_SERVICE_NAME": "ANONYMOUS-CW-METRICS", + "SOLUTION_ID": "SO0999", + "SOLUTION_VERSION": "v99.99.99", + "USE_CASE_UUID": "test-uuid-123", + } + monkeypatch.setattr(os, "environ", envs) + + handler({}, mock_lambda_context) + + assert mock_push.called + + builder_metrics_arg = mock_push.call_args[0][0] + assert builder_metrics_arg.account_id is not None + assert isinstance(builder_metrics_arg.account_id, str) diff --git a/source/lambda/custom-resource/test/utils/test_agentcore_mcp.py b/source/lambda/custom-resource/test/utils/test_agentcore_mcp.py new file mode 100644 index 00000000..512db692 --- /dev/null +++ b/source/lambda/custom-resource/test/utils/test_agentcore_mcp.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from unittest.mock import Mock, patch +from utils.agentcore_mcp import AgentcoreMCP + + +class ConcreteAgentcoreMCP(AgentcoreMCP): + """Concrete implementation for testing abstract base class.""" + + def create(self): + return {"status": "created"} + + def update(self): + return {"status": "updated"} + + def delete(self): + return {"status": "deleted"} + + +@patch("utils.agentcore_mcp.get_service_client") +@patch.dict("os.environ", {"AWS_REGION": "us-east-1"}) +def test_initialization_success(mock_get_client): + """Test successful initialization of AgentcoreMCP.""" + mock_client = Mock() + mock_get_client.return_value = mock_client + + config = {"test": "config"} + cognito_pool_id = "us-east-1_ABC123" + + instance = ConcreteAgentcoreMCP(config, cognito_pool_id) + + assert instance.config == config + assert instance.cognito_user_pool_id == cognito_pool_id + assert instance.agentcore_client == mock_client + mock_get_client.assert_called_once_with("bedrock-agentcore-control", region_name="us-east-1") + + +@patch("utils.agentcore_mcp.get_service_client") +@patch.dict("os.environ", {"AWS_REGION": "us-west-2"}) +def test_initialization_client_retry(mock_get_client): + """Test client initialization with retry on first failure.""" + mock_client = Mock() + mock_get_client.side_effect = [Exception("First attempt failed"), mock_client] + + config = {"test": "config"} + cognito_pool_id = "us-west-2_XYZ789" + + instance = ConcreteAgentcoreMCP(config, cognito_pool_id) + + assert instance.agentcore_client == mock_client + assert mock_get_client.call_count == 2 + + +@patch("utils.agentcore_mcp.get_service_client") +@patch.dict("os.environ", {"AWS_REGION": "us-east-1"}) +def test_base_auth_config_with_cognito(mock_get_client): + """Test base_auth_config property with Cognito user pool.""" + mock_get_client.return_value = Mock() + + config = {} + cognito_pool_id = "us-east-1_TEST123" + + instance = ConcreteAgentcoreMCP(config, cognito_pool_id) + auth_config = instance.base_auth_config + + expected_discovery_url = ( + f"https://cognito-idp.us-east-1.amazonaws.com/{cognito_pool_id}/.well-known/openid-configuration" + ) + + assert "authorizerConfiguration" in auth_config + assert "customJWTAuthorizer" in auth_config["authorizerConfiguration"] + assert auth_config["authorizerConfiguration"]["customJWTAuthorizer"]["discoveryUrl"] == expected_discovery_url + assert auth_config["authorizerConfiguration"]["customJWTAuthorizer"]["allowedClients"] == ["-"] + + +@patch("utils.agentcore_mcp.get_service_client") +@patch.dict("os.environ", {"AWS_REGION": "us-east-1"}) +def test_base_auth_config_without_cognito(mock_get_client): + """Test base_auth_config property without Cognito user pool.""" + mock_get_client.return_value = Mock() + + config = {} + cognito_pool_id = None + + instance = ConcreteAgentcoreMCP(config, cognito_pool_id) + auth_config = instance.base_auth_config + + # Should return None when no cognito pool + assert auth_config is None + + +@patch("utils.agentcore_mcp.get_service_client") +@patch.dict("os.environ", {"AWS_REGION": "us-east-1"}) +def test_abstract_methods_implemented(mock_get_client): + """Test that concrete class implements all abstract methods.""" + mock_get_client.return_value = Mock() + + instance = ConcreteAgentcoreMCP({}, "pool-id") + + assert instance.create() == {"status": "created"} + assert instance.update() == {"status": "updated"} + assert instance.delete() == {"status": "deleted"} + + +def test_cannot_instantiate_abstract_class(): + """Test that AgentcoreMCP cannot be instantiated directly.""" + with pytest.raises(TypeError): + AgentcoreMCP({}, "pool-id", ["client"]) diff --git a/source/lambda/custom-resource/test/utils/test_auth_manager.py b/source/lambda/custom-resource/test/utils/test_auth_manager.py new file mode 100644 index 00000000..9fb8df5e --- /dev/null +++ b/source/lambda/custom-resource/test/utils/test_auth_manager.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from unittest.mock import Mock +from utils.auth_manager import AuthManager +from utils.data import MCPServerData +from utils.constants import EntityType + + +@pytest.fixture +def mock_bedrock_client(): + return Mock() + + +@pytest.fixture +def auth_manager(mock_bedrock_client): + return AuthManager("test-client-id", "test-use-case-id", mock_bedrock_client) + + +@pytest.fixture +def runtime_mcp_server(): + return MCPServerData( + EntityType.RUNTIME.value, + "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agentcore%3Aus-east-1%3A123456789012%3Aruntime%2Ftest_agentcore_id/invocations?qualifier=DEFAULT" + "test-use-case", + "test-name", + "123456789012" + ) + + +@pytest.fixture +def gateway_mcp_server(): + return MCPServerData( + EntityType.GATEWAY.value, + "https://test-gateway.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp", + "test-use-case", + "test-name", + "123456789012" + ) + + +class TestAuthManager: + + def test_extract_values_regex_valid_arn(self): + arn = "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/test-id" + entity_type, entity_id = AuthManager.extract_values_regex(arn) + assert entity_type == "runtime" + assert entity_id == "test-id" + + def test_extract_values_regex_invalid_arn(self): + with pytest.raises(ValueError, match="Invalid ARN format"): + AuthManager.extract_values_regex("invalid-arn") + + def test_get_resource_tags(self, auth_manager): + auth_manager.bedrock.list_tags_for_resource.return_value = { + 'tags': {'client1': 'use-case-1', 'client2': 'use-case-2'} + } + + tags = auth_manager._get_resource_tags("test-arn") + + assert tags == {'client1': 'use-case-1', 'client2': 'use-case-2'} + auth_manager.bedrock.list_tags_for_resource.assert_called_once_with(resourceArn="test-arn") + + def test_update_gateway_permissions_add_client(self, auth_manager): + auth_manager.bedrock.get_gateway.return_value = { + 'name': 'test-gateway', + 'description': 'test description', + 'roleArn': 'test-role-arn', + 'protocolType': 'HTTP', + 'authorizerType': 'JWT', + 'authorizerConfiguration': { + 'customJWTAuthorizer': { + 'allowedClients': ['existing-client'] + } + } + } + + auth_manager._update_gateway_permissions("test-gateway-id", True) + + auth_manager.bedrock.update_gateway.assert_called_once() + call_args = auth_manager.bedrock.update_gateway.call_args[1] + assert 'test-client-id' in call_args['authorizerConfiguration']['customJWTAuthorizer']['allowedClients'] + + def test_update_gateway_permissions_remove_client(self, auth_manager): + auth_manager.bedrock.get_gateway.return_value = { + 'name': 'test-gateway', + 'description': 'test description', + 'roleArn': 'test-role-arn', + 'protocolType': 'HTTP', + 'authorizerType': 'JWT', + 'authorizerConfiguration': { + 'customJWTAuthorizer': { + 'allowedClients': ['test-client-id', 'other-client'] + } + } + } + + auth_manager._update_gateway_permissions("test-gateway-id", False) + + auth_manager.bedrock.update_gateway.assert_called_once() + call_args = auth_manager.bedrock.update_gateway.call_args[1] + assert 'test-client-id' not in call_args['authorizerConfiguration']['customJWTAuthorizer']['allowedClients'] + + def test_update_runtime_permissions_add_client(self, auth_manager): + auth_manager.bedrock.get_agent_runtime.return_value = { + 'description': 'test runtime', + 'agentRuntimeArtifact': 'test-artifact', + 'roleArn': 'test-role-arn', + 'networkConfiguration': {}, + 'authorizerConfiguration': { + 'customJWTAuthorizer': { + 'allowedClients': ['existing-client'] + } + } + } + + auth_manager._update_runtime_permissions("test-runtime-id", True) + + auth_manager.bedrock.update_agent_runtime.assert_called_once() + call_args = auth_manager.bedrock.update_agent_runtime.call_args[1] + assert 'test-client-id' in call_args['authorizerConfiguration']['customJWTAuthorizer']['allowedClients'] + + def test_update_runtime_permissions_remove_client(self, auth_manager): + auth_manager.bedrock.get_agent_runtime.return_value = { + 'description': 'test runtime', + 'agentRuntimeArtifact': 'test-artifact', + 'roleArn': 'test-role-arn', + 'networkConfiguration': {}, + 'authorizerConfiguration': { + 'customJWTAuthorizer': { + 'allowedClients': ['test-client-id', 'other-client'] + } + } + } + + auth_manager._update_runtime_permissions("test-runtime-id", False) + + auth_manager.bedrock.update_agent_runtime.assert_called_once() + call_args = auth_manager.bedrock.update_agent_runtime.call_args[1] + assert 'test-client-id' not in call_args['authorizerConfiguration']['customJWTAuthorizer']['allowedClients'] + + def test_add_permission_new_client_tag(self, auth_manager, runtime_mcp_server): + auth_manager._get_resource_tags = Mock(return_value={}) + auth_manager._update_allowed_clients = Mock() + + auth_manager.add_permission(runtime_mcp_server) + + auth_manager.bedrock.tag_resource.assert_called_once_with( + resourceArn=runtime_mcp_server.agentcore_arn, + tags={'test-client-id': 'test-use-case-id'} + ) + auth_manager._update_allowed_clients.assert_called_once_with(runtime_mcp_server, True) + + def test_add_permission_existing_client_tag(self, auth_manager, runtime_mcp_server): + auth_manager._get_resource_tags = Mock(return_value={'test-client-id': 'existing-use-case'}) + auth_manager._update_allowed_clients = Mock() + + auth_manager.add_permission(runtime_mcp_server) + + auth_manager.bedrock.tag_resource.assert_called_once_with( + resourceArn=runtime_mcp_server.agentcore_arn, + tags={'test-client-id': 'existing-use-case:test-use-case-id'} + ) + auth_manager._update_allowed_clients.assert_not_called() + + def test_remove_permission_last_use_case(self, auth_manager, runtime_mcp_server): + auth_manager._get_resource_tags = Mock(return_value={'test-client-id': 'test-use-case-id'}) + auth_manager._update_allowed_clients = Mock() + + auth_manager.remove_permission(runtime_mcp_server) + + auth_manager.bedrock.untag_resource.assert_called_once_with( + resourceArn=runtime_mcp_server.agentcore_arn, + tagKeys=['test-client-id'] + ) + auth_manager._update_allowed_clients.assert_called_once_with(runtime_mcp_server, False) + + def test_remove_permission_multiple_use_cases(self, auth_manager, runtime_mcp_server): + auth_manager._get_resource_tags = Mock(return_value={'test-client-id': 'use-case-1:test-use-case-id:use-case-2'}) + auth_manager._update_allowed_clients = Mock() + + auth_manager.remove_permission(runtime_mcp_server) + + auth_manager.bedrock.tag_resource.assert_called_once_with( + resourceArn=runtime_mcp_server.agentcore_arn, + tags={'test-client-id': 'use-case-1:use-case-2'} + ) + auth_manager._update_allowed_clients.assert_not_called() + + def test_update_allowed_clients_runtime(self, auth_manager, runtime_mcp_server): + auth_manager._update_runtime_permissions = Mock() + + auth_manager._update_allowed_clients(runtime_mcp_server, True) + + auth_manager._update_runtime_permissions.assert_called_once_with(runtime_mcp_server.agentcore_id, True) + + def test_update_allowed_clients_gateway(self, auth_manager, gateway_mcp_server): + auth_manager._update_gateway_permissions = Mock() + + auth_manager._update_allowed_clients(gateway_mcp_server, True) + + auth_manager._update_gateway_permissions.assert_called_once_with(gateway_mcp_server.agentcore_id, True) + + def test_update_allowed_clients_invalid_type(self, auth_manager): + invalid_server = Mock() + invalid_server.type = "invalid" + + with pytest.raises(ValueError, match="Invalid ARN. Type must be gateway or runtime."): + auth_manager._update_allowed_clients(invalid_server, True) diff --git a/source/lambda/custom-resource/test/utils/test_data.py b/source/lambda/custom-resource/test/utils/test_data.py new file mode 100644 index 00000000..d8c28442 --- /dev/null +++ b/source/lambda/custom-resource/test/utils/test_data.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import json +from decimal import Decimal +from uuid import uuid4 +from utils.data import AgentCoreUrlParser, DecimalEncoder, BuilderMetrics, MCPServerData +from utils.constants import EntityType + + +class TestAgentCoreUrlParser: + + def test_extract_runtime_id_valid_url(self): + url = "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agentcore%3Aus-east-1%3A123456789012%3Aruntime%2Ftest_agentcore_id/invocations?qualifier=DEFAULT" + result = AgentCoreUrlParser.extract_runtime_id(url) + assert result == "test_agentcore_id" + + def test_extract_runtime_id_invalid_url(self): + with pytest.raises(ValueError, match="Runtime ID could not be extracted"): + AgentCoreUrlParser.extract_runtime_id("invalid-url") + + def test_extract_gateway_id_valid_url(self): + url = "https://test-gateway.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp" + result = AgentCoreUrlParser.extract_gateway_id(url) + assert result == "test-gateway" + + def test_extract_gateway_id_invalid_url(self): + with pytest.raises(ValueError, match="Gateway ID could not be extracted"): + AgentCoreUrlParser.extract_gateway_id("invalid-url") + + def test_extract_runtime_arn_valid_url(self): + url = "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agentcore%3Aus-east-1%3A123456789012%3Aruntime%2Ftest-runtime/invocations?qualifier=DEFAULT" + result = AgentCoreUrlParser.extract_runtime_arn(url) + assert result == "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/test-runtime" + + def test_extract_runtime_arn_invalid_url(self): + with pytest.raises(ValueError, match="ARN could not be extracted"): + AgentCoreUrlParser.extract_runtime_arn("invalid-url") + + def test_construct_gateway_arn_valid_url(self): + url = "https://test-gateway.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp" + account_id = "123456789012" + result = AgentCoreUrlParser.construct_gateway_arn(url, account_id) + assert result == "arn:aws:bedrock-agentcore:us-east-1:123456789012:gateway/test-gateway" + + def test_construct_gateway_arn_invalid_url(self): + with pytest.raises(ValueError, match="Invalid gateway URL format"): + AgentCoreUrlParser.construct_gateway_arn("invalid-url", "123456789012") + + +class TestDecimalEncoder: + + def test_encode_decimal(self): + data = {"value": Decimal("123.45")} + result = json.dumps(data, cls=DecimalEncoder) + assert result == '{"value": 123.45}' + + def test_encode_regular_types(self): + data = {"string": "test", "int": 123, "float": 45.67} + result = json.dumps(data, cls=DecimalEncoder) + assert '"string": "test"' in result + assert '"int": 123' in result + assert '"float": 45.67' in result + + +class TestBuilderMetrics: + + def test_init_valid_params(self): + test_uuid = uuid4() + metrics = BuilderMetrics(test_uuid, "test-solution", "1.0.0", {"key": "value"}) + + assert metrics.uuid == test_uuid + assert metrics.solution_id == "test-solution" + assert metrics.version == "1.0.0" + assert metrics.data == {"key": "value"} + assert metrics.timestamp is not None + + def test_init_no_data(self): + test_uuid = uuid4() + metrics = BuilderMetrics(test_uuid, "test-solution", "1.0.0") + + assert metrics.data == {} + + def test_post_init_invalid_solution_id(self): + test_uuid = uuid4() + with pytest.raises(TypeError, match="Expected .* to be a str"): + metrics = BuilderMetrics(test_uuid, 123, "1.0.0") + metrics.__post_init__() + + def test_post_init_invalid_version(self): + test_uuid = uuid4() + with pytest.raises(TypeError, match="Expected .* to be a str"): + metrics = BuilderMetrics(test_uuid, "test-solution", 123) + metrics.__post_init__() + + def test_post_init_invalid_data(self): + test_uuid = uuid4() + with pytest.raises(TypeError, match="Expected .* to be a dict"): + metrics = BuilderMetrics(test_uuid, "test-solution", "1.0.0", "invalid") + metrics.__post_init__() + + def test_post_init_invalid_uuid(self): + with pytest.raises(TypeError, match="Expected .* to be a UUID"): + metrics = BuilderMetrics("invalid-uuid", "test-solution", "1.0.0") + metrics.__post_init__() + + +class TestMCPServerData: + + def test_init_runtime_valid(self): + server = MCPServerData( + EntityType.RUNTIME.value, + "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agentcore%3Aus-east-1%3A123456789012%3Aruntime%2Ftest-runtime/invocations?qualifier=DEFAULT", + "test-use-case", + "test-name", + "123456789012" + ) + + assert server.type == EntityType.RUNTIME.value + assert server.agentcore_id == "test-runtime" + assert server.agentcore_arn == "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/test-runtime" + + def test_init_gateway_valid(self): + server = MCPServerData( + EntityType.GATEWAY.value, + "https://test-gateway.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp", + "test-use-case", + "test-name", + "123456789012" + ) + + assert server.type == EntityType.GATEWAY.value + assert server.agentcore_id == "test-gateway" + assert server.agentcore_arn == "arn:aws:bedrock-agentcore:us-east-1:123456789012:gateway/test-gateway" + + def test_init_gateway_no_account_id(self): + with pytest.raises(ValueError, match="Account ID is required"): + MCPServerData( + EntityType.GATEWAY.value, + "https://test-gateway.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp", + "test-use-case", + "test-name" + ) + + def test_init_invalid_type(self): + with pytest.raises(ValueError, match="Invalid type"): + MCPServerData( + "invalid-type", + "test-url", + "test-use-case", + "test-name", + "123456789012" + ) + + def test_extract_id_runtime(self): + server = MCPServerData( + EntityType.RUNTIME.value, + "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agentcore%3Aus-east-1%3A123456789012%3Aruntime%2Ftest-runtime/invocations?qualifier=DEFAULT", + "test-use-case", + "test-name", + "123456789012" + ) + assert server._extract_id() == "test-runtime" + + def test_extract_id_gateway(self): + server = MCPServerData( + EntityType.GATEWAY.value, + "https://test-gateway.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp", + "test-use-case", + "test-name", + "123456789012" + ) + assert server._extract_id() == "test-gateway" + + def test_construct_arn_runtime(self): + server = MCPServerData( + EntityType.RUNTIME.value, + "https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agentcore%3Aus-east-1%3A123456789012%3Aruntime%2Ftest-runtime/invocations?qualifier=DEFAULT", + "test-use-case", + "test-name", + "123456789012" + ) + result = server._construct_arn("123456789012") + assert result == "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/test-runtime" + + def test_construct_arn_gateway(self): + server = MCPServerData( + EntityType.GATEWAY.value, + "https://test-gateway.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp", + "test-use-case", + "test-name", + "123456789012" + ) + result = server._construct_arn("123456789012") + assert result == "arn:aws:bedrock-agentcore:us-east-1:123456789012:gateway/test-gateway" diff --git a/source/lambda/custom-resource/test/utils/test_gateway_mcp.py b/source/lambda/custom-resource/test/utils/test_gateway_mcp.py new file mode 100644 index 00000000..ed1028b4 --- /dev/null +++ b/source/lambda/custom-resource/test/utils/test_gateway_mcp.py @@ -0,0 +1,350 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from unittest.mock import Mock, patch +from utils.gateway_mcp import GatewayMCP + + +@pytest.fixture +def gateway_mcp_factory(): + """Factory fixture to create GatewayMCP instances.""" + @patch("utils.agentcore_mcp.get_service_client") + @patch.dict("os.environ", {"AWS_REGION": "us-east-1"}) + def _create_gateway(mock_get_client, gateway_id=None): + mock_client = Mock() + mock_get_client.return_value = mock_client + + config = { + "use_case_description": "Test gateway description", + "target_params": [ + { + "TargetName": "test-lambda", + "TargetType": "lambda", + "LambdaArn": "arn:aws:lambda:us-east-1:123456789012:function:test", + "SchemaUri": "schemas/lambda-schema.json", + } + ], + } + + gateway = GatewayMCP( + config=config, + cognito_user_pool_id="us-east-1_ABC123", + gateway_role_arn="arn:aws:iam::123456789012:role/gateway-role", + gateway_name="test-gateway", + schema_bucket_name="test-bucket", + gateway_id=gateway_id, + ) + + return gateway, mock_client + + return _create_gateway + + +def test_initialization(gateway_mcp_factory): + """Test GatewayMCP initialization.""" + gateway, _ = gateway_mcp_factory() + + assert gateway.gateway_name == "test-gateway" + assert gateway.gateway_role_arn == "arn:aws:iam::123456789012:role/gateway-role" + assert gateway.schema_bucket_name == "test-bucket" + assert gateway.gateway_id is None + assert gateway.gateway_arn is None + assert gateway.gateway_url is None + assert gateway.targets == [] + + +def test_base_gateway_params(gateway_mcp_factory): + """Test base_gateway_params property.""" + gateway, _ = gateway_mcp_factory() + + params = gateway.base_gateway_params + + assert params["name"] == "test-gateway" + assert params["roleArn"] == "arn:aws:iam::123456789012:role/gateway-role" + assert params["protocolType"] == "MCP" + assert params["exceptionLevel"] == "DEBUG" + assert params["description"] == "Test gateway description" + assert params["authorizerType"] == "CUSTOM_JWT" + + +def test_create_gateway_params(gateway_mcp_factory): + """Test create_gateway_params property.""" + gateway, _ = gateway_mcp_factory() + + params = gateway.create_gateway_params + + assert "clientToken" in params + assert "name" in params + assert "roleArn" in params + + +def test_update_gateway_params(gateway_mcp_factory): + """Test update_gateway_params property.""" + gateway, _ = gateway_mcp_factory(gateway_id="gateway-123") + + params = gateway.update_gateway_params + + assert params["gatewayIdentifier"] == "gateway-123" + assert "name" in params + assert "roleArn" in params + + +def test_gateway_auth_config(gateway_mcp_factory): + """Test gateway_auth_config property.""" + gateway, _ = gateway_mcp_factory() + + auth_config = gateway.gateway_auth_config + + assert auth_config["authorizerType"] == "CUSTOM_JWT" + assert "authorizerConfiguration" in auth_config + + +@patch("utils.gateway_mcp.retry_with_backoff") +@patch("utils.gateway_mcp.MCPGatewayFactory") +def test_create_success(mock_factory, mock_retry, gateway_mcp_factory): + """Test successful gateway creation.""" + gateway, mock_client = gateway_mcp_factory() + + # Mock gateway creation + mock_retry.side_effect = [ + { + "gatewayId": "gateway-abc123", + "gatewayArn": "arn:aws:bedrock:us-east-1:123456789012:gateway/gateway-abc123", + "gatewayUrl": "https://gateway-abc123.execute-api.us-east-1.amazonaws.com", + }, + {"status": "READY"}, + { + "targetId": "target-123", + "targetArn": "arn:aws:bedrock:us-east-1:123456789012:target/target-123", + "status": "ACTIVE", + }, + ] + + # Mock get_gateway for wait + mock_client.get_gateway.return_value = {"status": "READY"} + + # Mock target creator + mock_target_creator = Mock() + mock_target_creator.create_target_configuration.return_value = {"lambda": {}} + mock_target_creator.build_credential_provider_configurations.return_value = [] + mock_factory.create_target_creator.return_value = mock_target_creator + mock_factory.validate_all_targets.return_value = [mock_target_creator] + + # Mock policy manager to avoid IAM calls + with patch.object(gateway.policy_manager, 'gateway_policy_factory'): + gateway.create() + + assert gateway.gateway_id == "gateway-abc123" + assert gateway.gateway_arn == "arn:aws:bedrock:us-east-1:123456789012:gateway/gateway-abc123" + assert gateway.gateway_url == "https://gateway-abc123.execute-api.us-east-1.amazonaws.com" + assert len(gateway.targets) == 1 + + +@patch("utils.gateway_mcp.retry_with_backoff") +def test_create_failure(mock_retry, gateway_mcp_factory): + """Test gateway creation failure.""" + gateway, _ = gateway_mcp_factory() + + mock_retry.side_effect = Exception("API Error") + + with pytest.raises(Exception): + gateway.create() + + +@patch("utils.gateway_mcp.retry_with_backoff") +@patch("utils.gateway_mcp.MCPGatewayFactory") +def test_create_targets_success(mock_factory, mock_retry, gateway_mcp_factory): + """Test successful target creation.""" + gateway, _ = gateway_mcp_factory(gateway_id="gateway-123") + + # Mock target creator + mock_target_creator = Mock() + mock_target_creator.create_target_configuration.return_value = {"lambda": {}} + mock_target_creator.build_credential_provider_configurations.return_value = [] + mock_factory.create_target_creator.return_value = mock_target_creator + mock_factory.validate_all_targets.return_value = [mock_target_creator] + + mock_retry.return_value = { + "targetId": "target-123", + "targetArn": "arn:aws:bedrock:us-east-1:123456789012:target/target-123", + "status": "ACTIVE", + } + + # Mock policy manager to avoid IAM calls + with patch.object(gateway.policy_manager, 'gateway_policy_factory'): + gateway.create_targets() + + assert len(gateway.targets) == 1 + assert gateway.targets[0]["targetId"] == "target-123" + assert gateway.targets[0]["targetName"] == "test-lambda" + + +@patch("utils.gateway_mcp.retry_with_backoff") +def test_delete_targets_success(mock_retry, gateway_mcp_factory): + """Test successful target deletion.""" + gateway, mock_client = gateway_mcp_factory(gateway_id="gateway-123") + + mock_client.list_gateway_targets.side_effect = [ + {"items": [{"targetId": "target-1", "name": "target-1"}]}, + {"items": []}, # After deletion + ] + + gateway.delete_targets() + + mock_retry.assert_called_once() + assert mock_client.list_gateway_targets.call_count == 2 + + +@patch("utils.gateway_mcp.retry_with_backoff") +def test_update_success(mock_retry, gateway_mcp_factory): + """Test successful gateway update.""" + gateway, mock_client = gateway_mcp_factory(gateway_id="gateway-123") + + mock_client.get_gateway.side_effect = [ + { + "gatewayArn": "arn:aws:bedrock:us-east-1:123456789012:gateway/gateway-123", + "gatewayUrl": "https://gateway-123.execute-api.us-east-1.amazonaws.com", + "description": "Old description", + "status": "READY", + "authorizerConfiguration": { + "customJWTAuthorizer": { + "allowedClients": ["-"] + } + } + }, + {"status": "READY"}, + ] + + mock_client.list_gateway_targets.return_value = {"items": []} + + mock_retry.return_value = {"status": "READY"} + + # Mock target creation + with patch.object(gateway, "create_targets"): + gateway.update() + + assert gateway.gateway_arn == "arn:aws:bedrock:us-east-1:123456789012:gateway/gateway-123" + assert gateway.gateway_url == "https://gateway-123.execute-api.us-east-1.amazonaws.com" + + +def test_update_missing_gateway_id(gateway_mcp_factory): + """Test gateway update without gateway ID.""" + gateway, _ = gateway_mcp_factory() + + with pytest.raises(RuntimeError, match="Failed to update MCP gateway.*Gateway ID is required for update operation"): + gateway.update() + + +@patch("utils.gateway_mcp.retry_with_backoff") +def test_update_failure(mock_retry, gateway_mcp_factory): + """Test gateway update failure.""" + gateway, mock_client = gateway_mcp_factory(gateway_id="gateway-123") + + mock_client.get_gateway.side_effect = Exception("API Error") + + with pytest.raises(RuntimeError, match="Failed to update MCP gateway"): + gateway.update() + + +def test_delete_success(gateway_mcp_factory): + """Test successful gateway deletion.""" + gateway, mock_client = gateway_mcp_factory(gateway_id="gateway-123") + + mock_client.list_gateway_targets.return_value = {"items": []} + + gateway.delete() + + mock_client.delete_gateway.assert_called_once_with(gatewayIdentifier="gateway-123") + + +def test_delete_missing_gateway_id(gateway_mcp_factory): + """Test gateway deletion without gateway ID.""" + gateway, mock_client = gateway_mcp_factory() + + # When gateway_id is None, it will try to delete and fail + # This should raise a RuntimeError + with pytest.raises(RuntimeError, match="Failed to delete MCP gateway"): + gateway.delete() + + +def test_delete_failure(gateway_mcp_factory): + """Test gateway deletion failure.""" + gateway, mock_client = gateway_mcp_factory(gateway_id="gateway-123") + + mock_client.list_gateway_targets.side_effect = Exception("API Error") + + with pytest.raises(RuntimeError, match="Failed to delete MCP gateway"): + gateway.delete() + + +def test_to_dict(gateway_mcp_factory): + """Test to_dict method.""" + gateway, _ = gateway_mcp_factory(gateway_id="gateway-123") + gateway.gateway_arn = "arn:aws:bedrock:us-east-1:123456789012:gateway/gateway-123" + gateway.gateway_url = "https://gateway-123.execute-api.us-east-1.amazonaws.com" + gateway.targets = [ + {"targetId": "target-1", "targetName": "test-target", "targetType": "lambda", "status": "ACTIVE"} + ] + + result = gateway.to_dict() + + assert result["GatewayId"] == "gateway-123" + assert result["GatewayArn"] == "arn:aws:bedrock:us-east-1:123456789012:gateway/gateway-123" + assert result["GatewayUrl"] == "https://gateway-123.execute-api.us-east-1.amazonaws.com" + assert result["GatewayName"] == "test-gateway" + assert result["TargetCount"] == 1 + assert len(result["Targets"]) == 1 + + +def test_wait_for_gateway_active_success(gateway_mcp_factory): + """Test _wait_for_gateway_active with successful transition.""" + gateway, mock_client = gateway_mcp_factory(gateway_id="gateway-123") + + mock_client.get_gateway.return_value = {"status": "READY"} + + result = gateway._wait_for_gateway_active(max_wait_time=10, poll_interval=1) + + assert result is True + + +def test_wait_for_gateway_active_timeout(gateway_mcp_factory): + """Test _wait_for_gateway_active with timeout.""" + gateway, mock_client = gateway_mcp_factory(gateway_id="gateway-123") + + mock_client.get_gateway.return_value = {"status": "CREATING"} + + with pytest.raises(TimeoutError, match="Gateway did not become READY"): + gateway._wait_for_gateway_active(max_wait_time=2, poll_interval=1) + + +def test_wait_for_gateway_active_failed_state(gateway_mcp_factory): + """Test _wait_for_gateway_active with failed state.""" + gateway, mock_client = gateway_mcp_factory(gateway_id="gateway-123") + + mock_client.get_gateway.return_value = {"status": "FAILED"} + + with pytest.raises(RuntimeError, match="Gateway entered terminal state: FAILED"): + gateway._wait_for_gateway_active(max_wait_time=10, poll_interval=1) + + +def test_wait_for_targets_cleared_success(gateway_mcp_factory): + """Test _wait_for_targets_cleared with successful clearing.""" + gateway, mock_client = gateway_mcp_factory(gateway_id="gateway-123") + + mock_client.list_gateway_targets.return_value = {"items": []} + + result = gateway._wait_for_targets_cleared(max_wait_time=10, poll_interval=1) + + assert result is True + + +def test_wait_for_targets_cleared_timeout(gateway_mcp_factory): + """Test _wait_for_targets_cleared with timeout.""" + gateway, mock_client = gateway_mcp_factory(gateway_id="gateway-123") + + mock_client.list_gateway_targets.return_value = {"items": [{"targetId": "target-1"}]} + + with pytest.raises(TimeoutError, match="Targets still present after timeout"): + gateway._wait_for_targets_cleared(max_wait_time=2, poll_interval=1) diff --git a/source/lambda/custom-resource/test/utils/test_lambda_target_creator.py b/source/lambda/custom-resource/test/utils/test_lambda_target_creator.py new file mode 100644 index 00000000..2269879f --- /dev/null +++ b/source/lambda/custom-resource/test/utils/test_lambda_target_creator.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from utils.lambda_target_creator import LambdaTargetCreator + + +class TestLambdaTargetCreator: + + def test_initialization(self): + config = { + "TargetName": "test-lambda", + "TargetType": "lambda", + "LambdaArn": "arn:aws:lambda:us-east-1:123456789012:function:test-function", + "SchemaUri": "schemas/lambda-schema.json", + } + + creator = LambdaTargetCreator(config, "test-bucket") + assert creator.target_name == "test-lambda" + assert creator.target_type == "lambda" + assert creator.lambda_arn == "arn:aws:lambda:us-east-1:123456789012:function:test-function" + assert creator.schema_uri == "schemas/lambda-schema.json" + + def test_validate_configuration_success(self): + config = { + "TargetName": "test-lambda", + "TargetType": "lambda", + "LambdaArn": "arn:aws:lambda:us-east-1:123456789012:function:test-function", + "SchemaUri": "schemas/lambda-schema.json", + } + + creator = LambdaTargetCreator(config, "test-bucket") + assert creator.validate_configuration() is True + + def test_validate_configuration_missing_name(self): + config = { + "TargetType": "lambda", + "LambdaArn": "arn:aws:lambda:us-east-1:123456789012:function:test-function", + "SchemaUri": "schemas/lambda-schema.json", + } + + creator = LambdaTargetCreator(config, "test-bucket") + with pytest.raises(ValueError, match="TargetName and LambdaArn are required"): + creator.validate_configuration() + + def test_validate_configuration_missing_arn(self): + config = {"TargetName": "test-lambda", "TargetType": "lambda", "SchemaUri": "schemas/lambda-schema.json"} + + creator = LambdaTargetCreator(config, "test-bucket") + with pytest.raises(ValueError, match="TargetName and LambdaArn are required"): + creator.validate_configuration() + + def test_create_target_configuration(self): + config = { + "TargetName": "test-lambda", + "TargetType": "lambda", + "LambdaArn": "arn:aws:lambda:us-east-1:123456789012:function:test-function", + "SchemaUri": "schemas/lambda-schema.json", + } + + creator = LambdaTargetCreator(config, "test-bucket") + result = creator.create_target_configuration() + + expected = { + "lambda": { + "lambdaArn": "arn:aws:lambda:us-east-1:123456789012:function:test-function", + "toolSchema": {"s3": {"uri": "s3://test-bucket/schemas/lambda-schema.json"}}, + } + } + + assert result == expected + + def test_build_credential_provider_configurations(self): + config = { + "TargetName": "test-lambda", + "TargetType": "lambda", + "LambdaArn": "arn:aws:lambda:us-east-1:123456789012:function:test-function", + "SchemaUri": "schemas/lambda-schema.json", + } + + creator = LambdaTargetCreator(config, "test-bucket") + result = creator.build_credential_provider_configurations() + + expected = [{"credentialProviderType": "GATEWAY_IAM_ROLE"}] + assert result == expected \ No newline at end of file diff --git a/source/lambda/custom-resource/test/utils/test_mcp_config_manager.py b/source/lambda/custom-resource/test/utils/test_mcp_config_manager.py new file mode 100644 index 00000000..d60e41ee --- /dev/null +++ b/source/lambda/custom-resource/test/utils/test_mcp_config_manager.py @@ -0,0 +1,770 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import os +from unittest.mock import Mock, patch +from boto3.dynamodb.types import TypeDeserializer, TypeSerializer + +from utils.mcp_config_manager import MCPConfigManager + + +# Fixtures +@pytest.fixture +def mock_ddb_client(): + """Mock DynamoDB client fixture.""" + return Mock() + + +@pytest.fixture +def sample_config(): + """Sample MCP configuration for testing.""" + return { + "UseCaseType": "MCPServer", + "UseCaseName": "test-mcp", + "UseCaseDescription": "Test MCP Gateway", + "MCPParams": { + "GatewayParams": { + "TargetParams": [ + { + "TargetName": "test-lambda", + "TargetType": "lambda", + "SchemaUri": "mcp/schemas/lambda/test-schema.json", + "LambdaArn": "arn:aws:lambda:us-east-1:123456789012:function:test-function", + "TargetDescription": "Test Lambda target", + }, + { + "TargetName": "test-openapi", + "TargetType": "openApiSchema", + "SchemaUri": "mcp/schemas/openapi/test-schema.json", + "TargetDescription": "Test OpenAPI target", + }, + ] + } + }, + } + + +@pytest.fixture +def sample_gateway_result(): + """Sample gateway creation result for testing.""" + return { + "gateway_id": "test-gateway-123", + "gateway_arn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:gateway/test-gateway-123", + "gateway_name": "test-mcp", + "gateway_url": "https://test-gateway-123.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp", + "targets": [ + {"targetId": "target-456", "targetName": "test-lambda", "targetType": "lambda"}, + {"targetId": "target-789", "targetName": "test-openapi", "targetType": "openApiSchema"}, + ], + } + + +@pytest.fixture +def ddb_response_item(sample_config): + """DynamoDB response item fixture.""" + serializer = TypeSerializer() + serialized_config = serializer.serialize(sample_config) + return {"Item": {"key": {"S": "test-key"}, "config": serialized_config}} + + +@pytest.fixture +def sample_runtime_config(): + """Sample MCP runtime configuration for testing.""" + return { + "UseCaseType": "MCPServer", + "UseCaseName": "test-use-case", + "UseCaseDescription": "Test MCP use case", + "MCPParams": { + "RuntimeParams": { + "EcrUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest", + "EnvironmentVariables": {"ENV_VAR_1": "value1", "ENV_VAR_2": "value2"}, + } + }, + } + + +@patch("utils.mcp_config_manager.get_service_client") +def test_init_with_table_name(mock_get_service_client, mock_ddb_client): + """Test MCPConfigManager initialization with explicit table name.""" + mock_get_service_client.return_value = mock_ddb_client + + manager = MCPConfigManager(table_name="test-table") + + assert manager.table_name == "test-table" + assert manager.ddb_client == mock_ddb_client + assert isinstance(manager.deserializer, TypeDeserializer) + assert isinstance(manager.serializer, TypeSerializer) + mock_get_service_client.assert_called_once_with("dynamodb") + + +@patch.dict(os.environ, {"USE_CASE_CONFIG_TABLE_NAME": "env-table"}) +@patch("utils.mcp_config_manager.get_service_client") +def test_init_with_env_var(mock_get_service_client, mock_ddb_client): + """Test MCPConfigManager initialization with environment variable.""" + mock_get_service_client.return_value = mock_ddb_client + + manager = MCPConfigManager() + + assert manager.table_name == "env-table" + + +@patch("utils.mcp_config_manager.get_service_client") +def test_init_missing_table_name(mock_get_service_client): + """Test MCPConfigManager initialization fails without table name.""" + mock_get_service_client.return_value = Mock() + + with patch.dict(os.environ, {}, clear=True): + with pytest.raises(ValueError, match="Table name must be provided"): + MCPConfigManager() + + +# read_mcp_config Tests +@patch("utils.mcp_config_manager.get_service_client") +def test_read_mcp_config_success(mock_get_service_client, mock_ddb_client, ddb_response_item, sample_config): + """Test successful config reading from DynamoDB.""" + mock_get_service_client.return_value = mock_ddb_client + mock_ddb_client.get_item.return_value = ddb_response_item + + manager = MCPConfigManager(table_name="test-table") + result = manager.read_mcp_config("test-key") + + assert result == sample_config + mock_ddb_client.get_item.assert_called_once_with(TableName="test-table", Key={"key": {"S": "test-key"}}) + + +@patch("utils.mcp_config_manager.get_service_client") +def test_read_mcp_config_not_found(mock_get_service_client, mock_ddb_client): + """Test read_mcp_config when item is not found.""" + mock_get_service_client.return_value = mock_ddb_client + mock_ddb_client.get_item.return_value = {} # No Item key + + manager = MCPConfigManager(table_name="test-table") + + with pytest.raises(ValueError, match="Configuration not found for key: nonexistent-key"): + manager.read_mcp_config("nonexistent-key") + + +@patch("utils.mcp_config_manager.get_service_client") +def test_read_mcp_config_dynamodb_error(mock_get_service_client, mock_ddb_client): + """Test read_mcp_config when DynamoDB operation fails.""" + mock_get_service_client.return_value = mock_ddb_client + mock_ddb_client.get_item.side_effect = Exception("DynamoDB error") + + manager = MCPConfigManager(table_name="test-table") + + with pytest.raises(RuntimeError, match="Failed to read configuration"): + manager.read_mcp_config("test-key") + + +# write_config Tests +@patch("utils.mcp_config_manager.get_service_client") +def test_write_config_success(mock_get_service_client, mock_ddb_client, sample_config): + """Test successful config writing to DynamoDB.""" + mock_get_service_client.return_value = mock_ddb_client + mock_ddb_client.put_item.return_value = {"ResponseMetadata": {"HTTPStatusCode": 200}} + + manager = MCPConfigManager(table_name="test-table") + result = manager.write_config("test-key", sample_config) + + assert result["success"] is True + mock_ddb_client.put_item.assert_called_once() + + # Verify the call arguments + call_args = mock_ddb_client.put_item.call_args[1] + assert call_args["TableName"] == "test-table" + assert call_args["Item"]["key"]["S"] == "test-key" + assert "config" in call_args["Item"] + + +# validate_mcp_gateway_config Tests +def test_validate_mcp_gateway_config_success(sample_config): + """Test successful MCP gateway config validation.""" + manager = MCPConfigManager(table_name="test-table") + result = manager.validate_mcp_gateway_config(sample_config) + + assert result["use_case_name"] == "test-mcp" + assert result["use_case_description"] == "Test MCP Gateway" + assert len(result["target_params"]) == 2 + assert result["target_params"][0]["TargetName"] == "test-lambda" + assert result["target_params"][1]["TargetName"] == "test-openapi" + + +def test_validate_mcp_gateway_config_invalid_use_case_type(): + """Test validate_mcp_gateway_config with invalid UseCaseType.""" + manager = MCPConfigManager(table_name="test-table") + config = {"UseCaseType": "InvalidType"} + + with pytest.raises(ValueError, match="Invalid UseCaseType: InvalidType"): + manager.validate_mcp_gateway_config(config) + + +def test_validate_mcp_gateway_config_missing_mcp_params(): + """Test validate_mcp_gateway_config with missing MCPParams.""" + manager = MCPConfigManager(table_name="test-table") + config = {"UseCaseType": "MCPServer"} + + with pytest.raises(ValueError, match="MCPParams not found in configuration"): + manager.validate_mcp_gateway_config(config) + + +def test_validate_mcp_gateway_config_missing_gateway_params(): + """Test validate_mcp_gateway_config with missing GatewayParams.""" + manager = MCPConfigManager(table_name="test-table") + config = {"UseCaseType": "MCPServer", "MCPParams": {"GatewayParams": None}} + + with pytest.raises(ValueError, match="GatewayParams not found in MCPParams"): + manager.validate_mcp_gateway_config(config) + + +def test_validate_mcp_gateway_config_missing_target_params(): + """Test validate_mcp_gateway_config with missing TargetParams.""" + manager = MCPConfigManager(table_name="test-table") + config = {"UseCaseType": "MCPServer", "MCPParams": {"GatewayParams": {"TargetParams": None}}} + + with pytest.raises(ValueError, match="TargetParams not found in GatewayParams"): + manager.validate_mcp_gateway_config(config) + + +def test_validate_mcp_gateway_config_empty_target_params(): + """Test validate_mcp_gateway_config with empty TargetParams.""" + manager = MCPConfigManager(table_name="test-table") + config = {"UseCaseType": "MCPServer", "MCPParams": {"GatewayParams": {"TargetParams": []}}} + + # Empty list is falsy, so it triggers "TargetParams not found" first + with pytest.raises(ValueError, match="TargetParams not found in GatewayParams"): + manager.validate_mcp_gateway_config(config) + + +# validate_target_params Tests +def test_validate_target_params_success_lambda(): + """Test successful lambda target validation.""" + manager = MCPConfigManager(table_name="test-table") + target = { + "TargetName": "test-lambda", + "TargetType": "lambda", + "SchemaUri": "test-schema.json", + "LambdaArn": "arn:aws:lambda:us-east-1:123456789012:function:test", + } + + # Should not raise any exception + manager.validate_target_params(target, 0) + + +def test_validate_target_params_success_openapi(): + """Test successful OpenAPI target validation.""" + manager = MCPConfigManager(table_name="test-table") + target = {"TargetName": "test-openapi", "TargetType": "openApiSchema", "SchemaUri": "test-schema.json"} + + # Should not raise any exception + manager.validate_target_params(target, 0) + + +def test_validate_target_params_missing_required_field(): + """Test validate_target_params with missing required field.""" + manager = MCPConfigManager(table_name="test-table") + target = { + "TargetName": "test-target", + "TargetType": "lambda", + # Missing SchemaUri + } + + with pytest.raises(ValueError, match="Required field 'SchemaUri' missing in target 0"): + manager.validate_target_params(target, 0) + + +def test_validate_target_params_invalid_target_type(): + """Test validate_target_params with invalid target type.""" + manager = MCPConfigManager(table_name="test-table") + target = {"TargetName": "test-target", "TargetType": "invalid", "SchemaUri": "test-schema.json"} + + with pytest.raises(ValueError, match="Invalid TargetType: invalid. Must be one of: lambda, openapi, smithyModel"): + manager.validate_target_params(target, 0) + + +def test_validate_target_params_lambda_missing_arn(): + """Test validate_target_params for lambda target missing LambdaArn.""" + manager = MCPConfigManager(table_name="test-table") + target = { + "TargetName": "test-lambda", + "TargetType": "lambda", + "SchemaUri": "test-schema.json", + # Missing LambdaArn + } + + with pytest.raises(ValueError, match="LambdaArn required for lambda target 0"): + manager.validate_target_params(target, 0) + + +# get_mcp_gateway_config Tests +@patch("utils.mcp_config_manager.get_service_client") +def test_get_mcp_gateway_config_success(mock_get_service_client, mock_ddb_client, ddb_response_item): + """Test successful get_mcp_gateway_config flow.""" + mock_get_service_client.return_value = mock_ddb_client + mock_ddb_client.get_item.return_value = ddb_response_item + + manager = MCPConfigManager(table_name="test-table") + result = manager.get_mcp_gateway_config("test-key") + + assert result["use_case_name"] == "test-mcp" + assert len(result["target_params"]) == 2 + + +@patch("utils.mcp_config_manager.get_service_client") +def test_get_mcp_gateway_config_validation_failure(mock_get_service_client, mock_ddb_client): + """Test get_mcp_gateway_config with validation failure.""" + mock_get_service_client.return_value = mock_ddb_client + + # Invalid config + serializer = TypeSerializer() + invalid_config = {"UseCaseType": "InvalidType"} + mock_ddb_client.get_item.return_value = { + "Item": {"key": {"S": "test-key"}, "config": serializer.serialize(invalid_config)} + } + + manager = MCPConfigManager(table_name="test-table") + + with pytest.raises(ValueError, match="Invalid UseCaseType"): + manager.get_mcp_gateway_config("test-key") + + +# validate_runtime_params Tests +def test_validate_runtime_params_success(): + """Test successful runtime params validation.""" + manager = MCPConfigManager(table_name="test-table") + runtime_params = { + "EcrUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest", + "EnvironmentVariables": {"ENV_VAR_1": "value1", "ENV_VAR_2": "value2"}, + } + + # Should not raise any exception + manager.validate_runtime_params(runtime_params) + + +def test_validate_runtime_params_missing_ecr_uri(): + """Test validate_runtime_params with missing EcrUri.""" + manager = MCPConfigManager(table_name="test-table") + runtime_params = {"EnvironmentVariables": {"ENV_VAR_1": "value1"}} + + with pytest.raises(ValueError, match="Required field 'EcrUri' missing in RuntimeParams"): + manager.validate_runtime_params(runtime_params) + + +def test_validate_runtime_params_invalid_ecr_uri(): + """Test validate_runtime_params with invalid EcrUri.""" + manager = MCPConfigManager(table_name="test-table") + runtime_params = {"EcrUri": ""} # Empty string + + with pytest.raises(ValueError, match="EcrUri must be a non-empty string"): + manager.validate_runtime_params(runtime_params) + + +def test_validate_runtime_params_invalid_env_vars_type(): + """Test validate_runtime_params with invalid EnvironmentVariables type.""" + manager = MCPConfigManager(table_name="test-table") + runtime_params = { + "EcrUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest", + "EnvironmentVariables": "not-a-dict", + } + + with pytest.raises(ValueError, match="EnvironmentVariables must be a dictionary"): + manager.validate_runtime_params(runtime_params) + + +def test_validate_runtime_params_invalid_env_var_key(): + """Test validate_runtime_params with invalid environment variable key.""" + manager = MCPConfigManager(table_name="test-table") + runtime_params = { + "EcrUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest", + "EnvironmentVariables": {"": "value1"}, # Empty key + } + + with pytest.raises(ValueError, match="Environment variable names must be non-empty strings"): + manager.validate_runtime_params(runtime_params) + + +def test_validate_runtime_params_invalid_env_var_value(): + """Test validate_runtime_params with invalid environment variable value.""" + manager = MCPConfigManager(table_name="test-table") + runtime_params = { + "EcrUri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest", + "EnvironmentVariables": {"ENV_VAR_1": 123}, # Non-string value + } + + with pytest.raises(ValueError, match="Environment variable 'ENV_VAR_1' value must be a string"): + manager.validate_runtime_params(runtime_params) + + +# validate_mcp_runtime_config Tests +def test_validate_mcp_runtime_config_success(sample_runtime_config): + """Test successful MCP runtime config validation.""" + manager = MCPConfigManager(table_name="test-table") + result = manager.validate_mcp_runtime_config(sample_runtime_config) + + assert result["use_case_name"] == "test-use-case" + assert result["use_case_description"] == "Test MCP use case" + assert result["ecr_uri"] == "123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest" + assert result["environment_variables"]["ENV_VAR_1"] == "value1" + assert result["environment_variables"]["ENV_VAR_2"] == "value2" + + +def test_validate_mcp_runtime_config_invalid_use_case_type(): + """Test validate_mcp_runtime_config with invalid UseCaseType.""" + manager = MCPConfigManager(table_name="test-table") + config = {"UseCaseType": "InvalidType"} + + with pytest.raises(ValueError, match="Invalid UseCaseType: InvalidType"): + manager.validate_mcp_runtime_config(config) + + +def test_validate_mcp_runtime_config_missing_runtime_params(): + """Test validate_mcp_runtime_config with missing RuntimeParams.""" + manager = MCPConfigManager(table_name="test-table") + config = {"UseCaseType": "MCPServer", "MCPParams": {"SomeOtherParam": "value"}} # RuntimeParams is missing + + with pytest.raises(ValueError) as exc_info: + manager.validate_mcp_runtime_config(config) + + # Check that the error message contains the expected text + assert "RuntimeParams not found in MCPParams" in str(exc_info.value) + + +# get_mcp_runtime_config Tests +@patch("utils.mcp_config_manager.get_service_client") +def test_get_mcp_runtime_config_success(mock_get_service_client, mock_ddb_client, sample_runtime_config): + """Test successful get_mcp_runtime_config flow.""" + mock_get_service_client.return_value = mock_ddb_client + + serializer = TypeSerializer() + ddb_item = {"Item": {"key": {"S": "test-key"}, "config": serializer.serialize(sample_runtime_config)}} + mock_ddb_client.get_item.return_value = ddb_item + + manager = MCPConfigManager(table_name="test-table") + result = manager.get_mcp_runtime_config("test-key") + + assert result["use_case_name"] == "test-use-case" + assert result["ecr_uri"] == "123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest" + + +@patch("utils.mcp_config_manager.get_service_client") +def test_get_mcp_runtime_config_validation_failure(mock_get_service_client, mock_ddb_client): + """Test get_mcp_runtime_config with validation failure.""" + mock_get_service_client.return_value = mock_ddb_client + + # Invalid config + serializer = TypeSerializer() + invalid_config = {"UseCaseType": "InvalidType"} + mock_ddb_client.get_item.return_value = { + "Item": {"key": {"S": "test-key"}, "config": serializer.serialize(invalid_config)} + } + + manager = MCPConfigManager(table_name="test-table") + + with pytest.raises(ValueError, match="Invalid UseCaseType"): + manager.get_mcp_runtime_config("test-key") + + +# update_gateway_config Tests +@patch("utils.mcp_config_manager.get_service_client") +def test_update_gateway_config_success( + mock_get_service_client, mock_ddb_client, ddb_response_item, sample_gateway_result +): + """Test successful MCP config update with gateway information.""" + mock_get_service_client.return_value = mock_ddb_client + mock_ddb_client.get_item.return_value = ddb_response_item + + # Mock DynamoDB put_item for write_config method + mock_ddb_client.put_item.return_value = {"ResponseMetadata": {"HTTPStatusCode": 200}} + + manager = MCPConfigManager(table_name="test-table") + result = manager.update_gateway_config("test-key", sample_gateway_result) + + # The method should return a dictionary with either success=True or success=False + assert isinstance(result, dict) + assert "success" in result + + +@patch("utils.mcp_config_manager.get_service_client") +def test_update_gateway_config_partial_targets(mock_get_service_client, mock_ddb_client, ddb_response_item): + """Test update_gateway_config with partial target matches.""" + mock_get_service_client.return_value = mock_ddb_client + mock_ddb_client.get_item.return_value = ddb_response_item + + # Gateway result with only one target + partial_gateway_result = { + "gateway_id": "test-gateway-123", + "gateway_arn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:gateway/test-gateway-123", + "gateway_url": "https://test-gateway-123.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp", + "targets": [ + {"targetId": "target-456", "targetName": "test-lambda", "targetType": "lambda"} + # Missing test-openapi target + ], + } + + mock_ddb_client.put_item.return_value = {"ResponseMetadata": {"HTTPStatusCode": 200}} + + manager = MCPConfigManager(table_name="test-table") + result = manager.update_gateway_config("test-key", partial_gateway_result) + + # The method should return a result dictionary + assert isinstance(result, dict) + assert "success" in result + + +@patch("utils.mcp_config_manager.get_service_client") +def test_update_gateway_config_read_failure(mock_get_service_client, mock_ddb_client): + """Test update_gateway_config when reading original config fails.""" + mock_get_service_client.return_value = mock_ddb_client + + mock_ddb_client.get_item.side_effect = Exception("DynamoDB read error") + + manager = MCPConfigManager(table_name="test-table") + result = manager.update_gateway_config("test-key", {"gateway_id": "test"}) + + assert result["success"] is False + assert "error" in result + + +@patch("utils.mcp_config_manager.get_service_client") +def test_update_gateway_config_write_failure( + mock_get_service_client, mock_ddb_client, ddb_response_item, sample_gateway_result +): + """Test update_gateway_config when writing updated config fails.""" + mock_get_service_client.return_value = mock_ddb_client + mock_ddb_client.get_item.return_value = ddb_response_item + + mock_ddb_client.get_item.return_value = ddb_response_item + mock_ddb_client.put_item.side_effect = Exception("DynamoDB write error") + + manager = MCPConfigManager(table_name="test-table") + result = manager.update_gateway_config("test-key", sample_gateway_result) + + assert result["success"] is False + assert "error" in result + + +@patch("utils.mcp_config_manager.get_service_client") +def test_update_gateway_config_missing_gateway_params(mock_get_service_client, mock_ddb_client): + """Test update_gateway_config with malformed original config.""" + mock_get_service_client.return_value = mock_ddb_client + + # Config without proper structure + malformed_config = {"UseCaseType": "MCPServer"} + + # Config without proper structure + serializer = TypeSerializer() + malformed_config = {"UseCaseType": "MCPServer"} + mock_ddb_client.get_item.return_value = { + "Item": {"key": {"S": "test-key"}, "config": serializer.serialize(malformed_config)} + } + + manager = MCPConfigManager(table_name="test-table") + result = manager.update_gateway_config("test-key", {"gateway_id": "test"}) + + assert result["success"] is False + assert "error" in result + + +@patch("utils.mcp_config_manager.get_service_client") +def test_update_gateway_config_empty_targets(mock_get_service_client, mock_ddb_client, ddb_response_item): + """Test update_gateway_config with empty targets in gateway result.""" + mock_get_service_client.return_value = mock_ddb_client + mock_ddb_client.get_item.return_value = ddb_response_item + + gateway_result_no_targets = { + "gateway_id": "test-gateway-123", + "gateway_arn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:gateway/test-gateway-123", + "gateway_url": "https://test-gateway-123.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp", + "targets": [], # Empty targets + } + + mock_ddb_client.put_item.return_value = {"ResponseMetadata": {"HTTPStatusCode": 200}} + + manager = MCPConfigManager(table_name="test-table") + result = manager.update_gateway_config("test-key", gateway_result_no_targets) + + # The method should return a result dictionary + assert isinstance(result, dict) + assert "success" in result + + +@patch("utils.mcp_config_manager.get_service_client") +def test_gateway_workflow_integration(mock_get_service_client, mock_ddb_client, sample_config, sample_gateway_result): + """Test gateway workflow: read config -> validate -> update.""" + mock_get_service_client.return_value = mock_ddb_client + + # Setup responses + serializer = TypeSerializer() + ddb_item = {"Item": {"key": {"S": "test-key"}, "config": serializer.serialize(sample_config)}} + mock_ddb_client.get_item.return_value = ddb_item + + manager = MCPConfigManager(table_name="test-table") + + validated_config = manager.get_mcp_gateway_config("test-key") + assert validated_config["use_case_name"] == "test-mcp" + + # Mock put_item for write_config method + mock_ddb_client.put_item.return_value = {"ResponseMetadata": {"HTTPStatusCode": 200}} + update_result = manager.update_gateway_config("test-key", sample_gateway_result) + + # The method should return a result dictionary + assert isinstance(update_result, dict) + assert "success" in update_result + + assert mock_ddb_client.get_item.call_count == 2 # get_mcp_gateway_config + update_gateway_config + + +@patch("utils.mcp_config_manager.get_service_client") +def test_runtime_workflow_integration(mock_get_service_client, mock_ddb_client, sample_runtime_config): + """Test runtime workflow: read config -> validate runtime config.""" + mock_get_service_client.return_value = mock_ddb_client + + serializer = TypeSerializer() + ddb_item = {"Item": {"key": {"S": "test-key"}, "config": serializer.serialize(sample_runtime_config)}} + mock_ddb_client.get_item.return_value = ddb_item + + manager = MCPConfigManager(table_name="test-table") + + # Read and validate runtime config + validated_config = manager.get_mcp_runtime_config("test-key") + assert validated_config["use_case_name"] == "test-use-case" + assert validated_config["ecr_uri"] == "123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest" + assert validated_config["environment_variables"]["ENV_VAR_1"] == "value1" + + mock_ddb_client.get_item.assert_called_once() + + +@patch("utils.mcp_config_manager.get_service_client") +def test_update_runtime_config_success(mock_get_service_client, mock_ddb_client, sample_runtime_config): + """Test successful update_runtime_config operation.""" + mock_get_service_client.return_value = mock_ddb_client + + # Setup DynamoDB response with existing config + serializer = TypeSerializer() + ddb_item = {"Item": {"key": {"S": "test-key"}, "config": serializer.serialize(sample_runtime_config)}} + mock_ddb_client.get_item.return_value = ddb_item + mock_ddb_client.put_item.return_value = {"ResponseMetadata": {"HTTPStatusCode": 200}} + + # Runtime result from create/update operation + runtime_result = { + "MCPRuntimeId": "runtime-abc123", + "MCPRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/runtime-abc123", + "MCPAgentCoreName": "gaab_mcp_test" + } + + manager = MCPConfigManager(table_name="test-table") + result = manager.update_runtime_config("test-key", runtime_result) + + # Verify success + assert result["success"] is True + + # Verify put_item was called + mock_ddb_client.put_item.assert_called_once() + + # Verify the updated config includes runtime info + call_args = mock_ddb_client.put_item.call_args[1] + assert call_args["TableName"] == "test-table" + + +@patch("utils.mcp_config_manager.get_service_client") +def test_update_runtime_config_read_failure(mock_get_service_client, mock_ddb_client): + """Test update_runtime_config when reading config fails.""" + mock_get_service_client.return_value = mock_ddb_client + mock_ddb_client.get_item.side_effect = Exception("DynamoDB read error") + + runtime_result = { + "MCPRuntimeId": "runtime-abc123", + "MCPRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/runtime-abc123", + "MCPAgentCoreName": "gaab_mcp_test" + } + + manager = MCPConfigManager(table_name="test-table") + result = manager.update_runtime_config("test-key", runtime_result) + + assert result["success"] is False + assert "error" in result + + +@patch("utils.mcp_config_manager.get_service_client") +def test_update_runtime_config_write_failure(mock_get_service_client, mock_ddb_client, sample_runtime_config): + """Test update_runtime_config when writing config fails.""" + mock_get_service_client.return_value = mock_ddb_client + + serializer = TypeSerializer() + ddb_item = {"Item": {"key": {"S": "test-key"}, "config": serializer.serialize(sample_runtime_config)}} + mock_ddb_client.get_item.return_value = ddb_item + mock_ddb_client.put_item.side_effect = Exception("DynamoDB write error") + + runtime_result = { + "MCPRuntimeId": "runtime-abc123", + "MCPRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/runtime-abc123", + "MCPAgentCoreName": "gaab_mcp_test" + } + + manager = MCPConfigManager(table_name="test-table") + result = manager.update_runtime_config("test-key", runtime_result) + + assert result["success"] is False + assert "error" in result + + +@patch("utils.mcp_config_manager.get_service_client") +def test_update_runtime_config_missing_runtime_params(mock_get_service_client, mock_ddb_client): + """Test update_runtime_config with malformed original config.""" + mock_get_service_client.return_value = mock_ddb_client + + # Config without proper structure + serializer = TypeSerializer() + malformed_config = {"UseCaseType": "MCPServer"} + mock_ddb_client.get_item.return_value = { + "Item": {"key": {"S": "test-key"}, "config": serializer.serialize(malformed_config)} + } + + runtime_result = { + "MCPRuntimeId": "runtime-abc123", + "MCPRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/runtime-abc123", + "MCPAgentCoreName": "gaab_mcp_test" + } + + manager = MCPConfigManager(table_name="test-table") + result = manager.update_runtime_config("test-key", runtime_result) + + assert result["success"] is False + assert "error" in result + + +@patch("utils.mcp_config_manager.get_service_client") +def test_runtime_update_workflow_integration(mock_get_service_client, mock_ddb_client, sample_runtime_config): + """Test runtime update workflow: read config -> validate -> update with runtime info.""" + mock_get_service_client.return_value = mock_ddb_client + + serializer = TypeSerializer() + ddb_item = {"Item": {"key": {"S": "test-key"}, "config": serializer.serialize(sample_runtime_config)}} + mock_ddb_client.get_item.return_value = ddb_item + mock_ddb_client.put_item.return_value = {"ResponseMetadata": {"HTTPStatusCode": 200}} + + manager = MCPConfigManager(table_name="test-table") + + # Read and validate runtime config + validated_config = manager.get_mcp_runtime_config("test-key") + assert validated_config["use_case_name"] == "test-use-case" + assert validated_config["ecr_uri"] == "123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest" + + # Update with runtime info + runtime_result = { + "MCPRuntimeId": "runtime-abc123", + "MCPRuntimeArn": "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/runtime-abc123", + "MCPAgentCoreName": "gaab_mcp_test" + } + + update_result = manager.update_runtime_config("test-key", runtime_result) + + # Verify success + assert isinstance(update_result, dict) + assert "success" in update_result + assert update_result["success"] is True + + # Verify both get and put were called + assert mock_ddb_client.get_item.call_count == 2 # get_mcp_runtime_config + update_runtime_config + mock_ddb_client.put_item.assert_called_once() diff --git a/source/lambda/custom-resource/test/utils/test_mcp_factory.py b/source/lambda/custom-resource/test/utils/test_mcp_factory.py new file mode 100644 index 00000000..75f0293e --- /dev/null +++ b/source/lambda/custom-resource/test/utils/test_mcp_factory.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from unittest.mock import Mock + +from utils.mcp_factory import MCPGatewayFactory, MCPTargetCreator + + +class MockTargetCreator(MCPTargetCreator): + """Mock target creator for testing.""" + + def validate_configuration(self) -> bool: + return True + + def create_target_configuration(self) -> dict: + return {"mock": "config"} + + +class TestMCPGatewayFactory: + + def setup_method(self): + # Clear the registry before each test + MCPGatewayFactory.target_creators = {} + + def test_register_target_creator(self): + """Test registering a target creator.""" + MCPGatewayFactory.register_target_creator("mock", MockTargetCreator) + + assert "mock" in MCPGatewayFactory.target_creators + assert MCPGatewayFactory.target_creators["mock"] == MockTargetCreator + + def test_create_target_creator_success(self): + """Test successful target creator creation.""" + MCPGatewayFactory.register_target_creator("mock", MockTargetCreator) + + target_config = { + "TargetName": "test-target", + "TargetType": "mock", + "SchemaUri": "test-schema.json" + } + + creator = MCPGatewayFactory.create_target_creator(target_config, "test-bucket") + + assert isinstance(creator, MockTargetCreator) + assert creator.target_name == "test-target" + assert creator.target_type == "mock" + + def test_create_target_creator_missing_type(self): + """Test target creator creation with missing target type.""" + target_config = { + "TargetName": "test-target", + "SchemaUri": "test-schema.json" + } + + with pytest.raises(ValueError) as exc_info: + MCPGatewayFactory.create_target_creator(target_config, "test-bucket") + + assert "Target type is required" in str(exc_info.value) + + def test_create_target_creator_unsupported_type(self): + """Test target creator creation with unsupported target type.""" + MCPGatewayFactory.register_target_creator("mock", MockTargetCreator) + + target_config = { + "TargetName": "test-target", + "TargetType": "unsupported", + "SchemaUri": "test-schema.json" + } + + with pytest.raises(ValueError) as exc_info: + MCPGatewayFactory.create_target_creator(target_config, "test-bucket") + + assert "Unsupported target type: unsupported" in str(exc_info.value) + assert "Available types: ['mock']" in str(exc_info.value) + + def test_get_supported_target_types(self): + """Test getting supported target types.""" + MCPGatewayFactory.register_target_creator("mock1", MockTargetCreator) + MCPGatewayFactory.register_target_creator("mock2", MockTargetCreator) + + supported_types = MCPGatewayFactory.get_supported_target_types() + + assert "mock1" in supported_types + assert "mock2" in supported_types + assert len(supported_types) == 2 + + +class TestMCPTargetCreator: + + def test_get_target_info_with_description(self): + """Test getting target info with description.""" + target_config = { + "TargetName": "test-target", + "TargetType": "mock", + "SchemaUri": "test-schema.json", + "TargetDescription": "Test target description" + } + + creator = MockTargetCreator(target_config, "test-bucket") + target_info = creator.get_target_info() + + assert target_info["name"] == "test-target" + assert target_info["type"] == "mock" + assert target_info["schema_uri"] == "test-schema.json" + assert target_info["description"] == "Test target description" + + def test_get_target_info_without_description(self): + """Test getting target info without description.""" + target_config = { + "TargetName": "test-target", + "TargetType": "mock", + "SchemaUri": "test-schema.json" + } + + creator = MockTargetCreator(target_config, "test-bucket") + target_info = creator.get_target_info() + + assert target_info["name"] == "test-target" + assert target_info["type"] == "mock" + assert target_info["schema_uri"] == "test-schema.json" + assert "description" not in target_info + + +def test_register_default_creators(): + """Test that default creators are registered lazily when needed.""" + # Clear registry first + MCPGatewayFactory.target_creators = {} + + # Check that default creators are registered when we ask for supported types + supported_types = MCPGatewayFactory.get_supported_target_types() + assert "lambda" in supported_types + assert "openApiSchema" in supported_types + assert "smithyModel" in supported_types \ No newline at end of file diff --git a/source/lambda/custom-resource/test/utils/test_metrics.py b/source/lambda/custom-resource/test/utils/test_metrics.py index 0e308a03..3adda023 100644 --- a/source/lambda/custom-resource/test/utils/test_metrics.py +++ b/source/lambda/custom-resource/test/utils/test_metrics.py @@ -18,7 +18,7 @@ def test_when_env_variables_set(monkeypatch): envs = { "UNIT_TEST_ENV": "yes", - "POWERTOOLS_SERVICE_NAME": "ANONYMOUS-CW-METRICS", + "POWERTOOLS_SERVICE_NAME": "CW-METRICS", "SOLUTION_ID": "SO0001", "SOLUTION_VERSION": "v99.99.99", } @@ -30,7 +30,7 @@ def test_when_env_variables_set(monkeypatch): def test_when_solution_version_not_set(monkeypatch): envs = { "UNIT_TEST_ENV": "yes", - "POWERTOOLS_SERVICE_NAME": "ANONYMOUS-CW-METRICS", + "POWERTOOLS_SERVICE_NAME": "CW-METRICS", "SOLUTION_ID": "SO0001", } monkeypatch.setattr(os, "environ", envs) @@ -42,7 +42,7 @@ def test_when_solution_version_not_set(monkeypatch): def test_when_solution_id_not_set(monkeypatch): envs = { "UNIT_TEST_ENV": "yes", - "POWERTOOLS_SERVICE_NAME": "ANONYMOUS-CW-METRICS", + "POWERTOOLS_SERVICE_NAME": "CW-METRICS", "SOLUTION_VERSION": "v99.99.99", } monkeypatch.setattr(os, "environ", envs) diff --git a/source/lambda/custom-resource/test/utils/test_metrics_payload.py b/source/lambda/custom-resource/test/utils/test_metrics_payload.py index 6ae6cc47..3d2be8b7 100644 --- a/source/lambda/custom-resource/test/utils/test_metrics_payload.py +++ b/source/lambda/custom-resource/test/utils/test_metrics_payload.py @@ -30,7 +30,7 @@ def setup_metrics_environment(): def test_get_cloudwatch_metrics_queries(): - assert len(get_cloudwatch_metrics_queries()) == 12 + assert len(get_cloudwatch_metrics_queries()) == 30 @mock_aws diff --git a/source/lambda/custom-resource/test/utils/test_openapi_target_creator.py b/source/lambda/custom-resource/test/utils/test_openapi_target_creator.py new file mode 100644 index 00000000..42983bc4 --- /dev/null +++ b/source/lambda/custom-resource/test/utils/test_openapi_target_creator.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from utils.openapi_target_creator import OpenAPITargetCreator + + +class TestOpenAPITargetCreator: + + def test_initialization(self): + config = {"TargetName": "test-api", "TargetType": "openApiSchema", "SchemaUri": "schemas/openapi-schema.json"} + + creator = OpenAPITargetCreator(config, "test-bucket") + assert creator.target_name == "test-api" + assert creator.target_type == "openApiSchema" + assert creator.schema_uri == "schemas/openapi-schema.json" + + def test_validate_configuration_success(self): + config = {"TargetName": "test-api", "TargetType": "openApiSchema", "SchemaUri": "schemas/openapi-schema.json"} + + creator = OpenAPITargetCreator(config, "test-bucket") + assert creator.validate_configuration() is True + + def test_validate_configuration_missing_name(self): + config = {"TargetType": "openApiSchema", "SchemaUri": "schemas/openapi-schema.json"} + + creator = OpenAPITargetCreator(config, "test-bucket") + with pytest.raises(ValueError, match="TargetName and SchemaUri are required"): + creator.validate_configuration() + + def test_validate_configuration_missing_schema_uri(self): + config = {"TargetName": "test-api", "TargetType": "openApiSchema"} + + creator = OpenAPITargetCreator(config, "test-bucket") + with pytest.raises(ValueError, match="TargetName and SchemaUri are required"): + creator.validate_configuration() + + def test_create_target_configuration(self): + config = {"TargetName": "test-api", "TargetType": "openApiSchema", "SchemaUri": "schemas/openapi-schema.json"} + + creator = OpenAPITargetCreator(config, "test-bucket") + result = creator.create_target_configuration() + + expected = {"openApiSchema": {"s3": {"uri": "s3://test-bucket/schemas/openapi-schema.json"}}} + assert result == expected + + def test_build_oauth_credential_config(self): + config = { + "TargetName": "test-api", + "TargetType": "openApiSchema", + "SchemaUri": "schemas/openapi-schema.json", + "OutboundAuthParams": { + "OutboundAuthProviderType": "OAUTH", + "OutboundAuthProviderArn": "arn:aws:secretsmanager:us-east-1:123456789012:secret:oauth-secret", + "AdditionalConfigParams": { + "OAuthAdditionalConfig": { + "scopes": ["read", "write"], + "customParameters": [{"key": "audience", "value": "api.example.com"}], + } + }, + }, + } + + creator = OpenAPITargetCreator(config, "test-bucket") + result = creator.build_credential_provider_configurations() + + expected = [ + { + "credentialProviderType": "OAUTH", + "credentialProvider": { + "oauthCredentialProvider": { + "providerArn": "arn:aws:secretsmanager:us-east-1:123456789012:secret:oauth-secret", + "scopes": ["read", "write"], + "customParameters": {"audience": "api.example.com"}, + } + }, + } + ] + assert result == expected + + def test_build_api_key_credential_config(self): + config = { + "TargetName": "test-api", + "TargetType": "openApiSchema", + "SchemaUri": "schemas/openapi-schema.json", + "OutboundAuthParams": { + "OutboundAuthProviderType": "API_KEY", + "OutboundAuthProviderArn": "arn:aws:secretsmanager:us-east-1:123456789012:secret:api-key-secret", + "AdditionalConfigParams": { + "ApiKeyAdditionalConfig": { + "parameterName": "x-api-key", + "prefix": "Bearer", + "location": "header", + } + }, + }, + } + + creator = OpenAPITargetCreator(config, "test-bucket") + result = creator.build_credential_provider_configurations() + + expected = [ + { + "credentialProviderType": "API_KEY", + "credentialProvider": { + "apiKeyCredentialProvider": { + "providerArn": "arn:aws:secretsmanager:us-east-1:123456789012:secret:api-key-secret", + "credentialParameterName": "x-api-key", + "credentialPrefix": "Bearer", + "credentialLocation": "header", + } + }, + } + ] + assert result == expected + + def test_build_credential_config_missing_auth_params(self): + config = { + "TargetName": "test-api", + "TargetType": "openApiSchema", + "SchemaUri": "schemas/openapi-schema.json", + } + + creator = OpenAPITargetCreator(config, "test-bucket") + with pytest.raises( + ValueError, + match="OpenAPI targets require OutboundAuthParams with valid OutboundAuthProviderType and OutboundAuthProviderArn", + ): + creator.build_credential_provider_configurations() + + def test_convert_custom_parameters(self): + config = {"TargetName": "test-api", "TargetType": "openApiSchema", "SchemaUri": "schemas/openapi-schema.json"} + + creator = OpenAPITargetCreator(config, "test-bucket") + + custom_params_array = [{"key": "param1", "value": "value1"}, {"key": "param2", "value": "value2"}] + + result = creator.convert_custom_parameters(custom_params_array) + + expected = {"param1": "value1", "param2": "value2"} + assert result == expected + + def test_convert_custom_parameters_empty(self): + config = {"TargetName": "test-api", "TargetType": "openApiSchema", "SchemaUri": "schemas/openapi-schema.json"} + + creator = OpenAPITargetCreator(config, "test-bucket") + result = creator.convert_custom_parameters([]) + + assert result == {} \ No newline at end of file diff --git a/source/lambda/custom-resource/test/utils/test_policy_manager.py b/source/lambda/custom-resource/test/utils/test_policy_manager.py new file mode 100644 index 00000000..84edbaf0 --- /dev/null +++ b/source/lambda/custom-resource/test/utils/test_policy_manager.py @@ -0,0 +1,556 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import json +from unittest.mock import Mock, patch, MagicMock +from botocore.exceptions import ClientError +from utils.policy_manager import GatewayPolicyManager + + +@pytest.fixture +def policy_manager(): + """Fixture to create GatewayPolicyManager instance.""" + with patch("utils.policy_manager.get_service_client") as mock_get_client: + mock_iam_client = Mock() + mock_agentcore_client = Mock() + + # Create proper exception classes for IAM client + class NoSuchEntityException(Exception): + pass + + # Attach exceptions to the mock client + mock_iam_client.exceptions = Mock() + mock_iam_client.exceptions.NoSuchEntityException = NoSuchEntityException + + mock_get_client.return_value = mock_iam_client + + with patch.dict("os.environ", {"AWS_REGION": "us-east-1"}): + manager = GatewayPolicyManager("test-role", mock_agentcore_client) + manager.iam_client = mock_iam_client + yield manager, mock_iam_client, mock_agentcore_client + + +@patch("utils.policy_manager.get_service_client") +@patch.dict("os.environ", {"AWS_REGION": "us-east-1"}) +def test_initialization_success(mock_get_client): + """Test successful initialization.""" + mock_iam_client = Mock() + mock_agentcore_client = Mock() + mock_get_client.return_value = mock_iam_client + + manager = GatewayPolicyManager("my-role", mock_agentcore_client) + + assert manager.role_name == "my-role" + assert manager.agentcore_client == mock_agentcore_client + assert manager.iam_client == mock_iam_client + mock_get_client.assert_called_once_with("iam", region_name="us-east-1") + + +@patch("utils.policy_manager.get_service_client") +@patch.dict("os.environ", {"AWS_REGION": "us-west-2"}) +def test_initialization_with_retry(mock_get_client): + """Test initialization with client retry on first failure.""" + mock_iam_client = Mock() + mock_agentcore_client = Mock() + mock_get_client.side_effect = [Exception("First attempt failed"), mock_iam_client] + + manager = GatewayPolicyManager("my-role", mock_agentcore_client) + + assert manager.iam_client == mock_iam_client + assert mock_get_client.call_count == 2 + + +def test_add_lambda_policy_success(policy_manager): + """Test successfully adding a Lambda policy.""" + manager, mock_client, _ = policy_manager + mock_client.get_role_policy.side_effect = mock_client.exceptions.NoSuchEntityException( + "Policy not found" + ) + + manager.add_lambda_policy("my-target", "arn:aws:lambda:us-east-1:123456789012:function:my-function") + + # Verify put_role_policy was called + mock_client.put_role_policy.assert_called_once() + call_args = mock_client.put_role_policy.call_args + + assert call_args[1]["RoleName"] == "test-role" + assert call_args[1]["PolicyName"] == "my-target-lambda-access-policy" + + policy_doc = json.loads(call_args[1]["PolicyDocument"]) + assert policy_doc["Version"] == "2012-10-17" + assert len(policy_doc["Statement"]) == 1 + assert policy_doc["Statement"][0]["Effect"] == "Allow" + assert policy_doc["Statement"][0]["Action"] == ["lambda:InvokeFunction"] + assert policy_doc["Statement"][0]["Resource"] == ["arn:aws:lambda:us-east-1:123456789012:function:my-function"] + + +def test_add_lambda_policy_duplicate_skipped(policy_manager): + """Test that duplicate policy is skipped.""" + manager, mock_client, _ = policy_manager + + existing_policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": ["lambda:InvokeFunction"], + "Resource": ["arn:aws:lambda:us-east-1:123456789012:function:my-function"] + } + ] + } + + mock_client.get_role_policy.return_value = { + "PolicyDocument": existing_policy + } + + manager.add_lambda_policy("my-target", "arn:aws:lambda:us-east-1:123456789012:function:my-function") + + # Verify put_role_policy was NOT called + mock_client.put_role_policy.assert_not_called() + + +def test_add_lambda_policy_failure(policy_manager): + """Test Lambda policy addition failure.""" + manager, mock_client, _ = policy_manager + mock_client.get_role_policy.side_effect = mock_client.exceptions.NoSuchEntityException( + "Policy not found" + ) + mock_client.put_role_policy.side_effect = ClientError( + {"Error": {"Code": "AccessDenied", "Message": "Access denied"}}, + "put_role_policy" + ) + + with pytest.raises(RuntimeError, match="Failed to add Lambda policy for my-target"): + manager.add_lambda_policy("my-target", "arn:aws:lambda:us-east-1:123456789012:function:my-function") + + +def test_add_openapi_policy_success(policy_manager): + """Test successfully adding an OpenAPI policy.""" + manager, mock_client, _ = policy_manager + mock_client.get_role_policy.side_effect = mock_client.exceptions.NoSuchEntityException( + "Policy not found" + ) + + provider_arn = "arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/default/credential-provider/my-provider" + secret_arn = "arn:aws:secretsmanager:us-east-1:123456789012:secret:my-secret" + + manager.add_openapi_policy( + "my-target", + provider_arn, + "bedrock-agentcore:GetResourceOauth2Token", + secret_arn + ) + + # Verify put_role_policy was called + mock_client.put_role_policy.assert_called_once() + call_args = mock_client.put_role_policy.call_args + + assert call_args[1]["RoleName"] == "test-role" + assert call_args[1]["PolicyName"] == "my-target-my-provider-access-policy" + + policy_doc = json.loads(call_args[1]["PolicyDocument"]) + assert policy_doc["Version"] == "2012-10-17" + assert len(policy_doc["Statement"]) == 2 + + # Check auth statement + auth_statement = policy_doc["Statement"][0] + assert auth_statement["Effect"] == "Allow" + assert auth_statement["Action"] == ["bedrock-agentcore:GetResourceOauth2Token"] + assert len(auth_statement["Resource"]) == 2 + + # Check secrets manager statement + secrets_statement = policy_doc["Statement"][1] + assert secrets_statement["Effect"] == "Allow" + assert secrets_statement["Action"] == ["secretsmanager:GetSecretValue"] + assert secrets_statement["Resource"] == [secret_arn] + + +def test_add_openapi_policy_duplicate_skipped(policy_manager): + """Test that duplicate OpenAPI policy is skipped.""" + manager, mock_client, _ = policy_manager + + provider_arn = "arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/default/credential-provider/my-provider" + secret_arn = "arn:aws:secretsmanager:us-east-1:123456789012:secret:my-secret" + + existing_policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": ["bedrock-agentcore:GetResourceOauth2Token"], + "Resource": [ + "arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/default", + provider_arn + ] + }, + { + "Effect": "Allow", + "Action": ["secretsmanager:GetSecretValue"], + "Resource": [secret_arn] + } + ] + } + + mock_client.get_role_policy.return_value = { + "PolicyDocument": existing_policy + } + + manager.add_openapi_policy( + "my-target", + provider_arn, + "bedrock-agentcore:GetResourceOauth2Token", + secret_arn + ) + + # Verify put_role_policy was NOT called + mock_client.put_role_policy.assert_not_called() + + +def test_add_openapi_policy_failure(policy_manager): + """Test OpenAPI policy addition failure.""" + manager, mock_client, _ = policy_manager + mock_client.get_role_policy.side_effect = mock_client.exceptions.NoSuchEntityException( + "Policy not found" + ) + mock_client.put_role_policy.side_effect = ClientError( + {"Error": {"Code": "AccessDenied", "Message": "Access denied"}}, + "put_role_policy" + ) + + provider_arn = "arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/default/credential-provider/my-provider" + secret_arn = "arn:aws:secretsmanager:us-east-1:123456789012:secret:my-secret" + + with pytest.raises(RuntimeError, match="Failed to add OpenAPI policy for my-target"): + manager.add_openapi_policy( + "my-target", + provider_arn, + "bedrock-agentcore:GetResourceOauth2Token", + secret_arn + ) + + +def test_is_duplicate_policy_identical(policy_manager): + """Test duplicate detection with identical policy.""" + manager, mock_client, _ = policy_manager + + policy_doc = { + "Version": "2012-10-17", + "Statement": [{"Effect": "Allow", "Action": ["lambda:InvokeFunction"], "Resource": ["arn:aws:lambda:*"]}] + } + + mock_client.get_role_policy.return_value = { + "PolicyDocument": policy_doc + } + + result = manager.is_duplicate_policy(policy_doc, "test-policy") + + assert result is True + + +def test_is_duplicate_policy_different(policy_manager): + """Test duplicate detection with different policy.""" + manager, mock_client, _ = policy_manager + + existing_policy = { + "Version": "2012-10-17", + "Statement": [{"Effect": "Allow", "Action": ["lambda:InvokeFunction"], "Resource": ["arn:aws:lambda:*"]}] + } + + new_policy = { + "Version": "2012-10-17", + "Statement": [{"Effect": "Allow", "Action": ["s3:GetObject"], "Resource": ["arn:aws:s3:::*"]}] + } + + mock_client.get_role_policy.return_value = { + "PolicyDocument": existing_policy + } + + result = manager.is_duplicate_policy(new_policy, "test-policy") + + assert result is False + + +def test_is_duplicate_policy_not_found(policy_manager): + """Test duplicate detection when policy doesn't exist.""" + manager, mock_client, _ = policy_manager + mock_client.get_role_policy.side_effect = mock_client.exceptions.NoSuchEntityException( + "Policy not found" + ) + + policy_doc = {"Version": "2012-10-17", "Statement": []} + + result = manager.is_duplicate_policy(policy_doc, "test-policy") + + assert result is False + + +def test_is_duplicate_policy_url_encoded_string(policy_manager): + """Test duplicate detection with URL-encoded policy document string.""" + manager, mock_client, _ = policy_manager + + policy_doc = { + "Version": "2012-10-17", + "Statement": [{"Effect": "Allow", "Action": ["lambda:InvokeFunction"], "Resource": ["arn:aws:lambda:*"]}] + } + + # Simulate URL-encoded JSON string (older boto3 versions) + import urllib.parse + encoded_policy = urllib.parse.quote(json.dumps(policy_doc)) + + mock_client.get_role_policy.return_value = { + "PolicyDocument": encoded_policy + } + + result = manager.is_duplicate_policy(policy_doc, "test-policy") + + assert result is True + + +def test_is_duplicate_policy_error_handling(policy_manager): + """Test duplicate detection with unexpected error.""" + manager, mock_client, _ = policy_manager + mock_client.get_role_policy.side_effect = ClientError( + {"Error": {"Code": "ServiceUnavailable", "Message": "Service unavailable"}}, + "get_role_policy" + ) + + policy_doc = {"Version": "2012-10-17", "Statement": []} + + result = manager.is_duplicate_policy(policy_doc, "test-policy") + + assert result is False + + +def test_destroy_all_custom_policies_success(policy_manager): + """Test successfully destroying all custom policies.""" + manager, mock_client, _ = policy_manager + + mock_client.list_role_policies.return_value = { + "PolicyNames": ["policy1", "policy2", "policy3"] + } + + manager.destroy_all_custom_policies() + + # Verify all policies were deleted + assert mock_client.delete_role_policy.call_count == 3 + mock_client.delete_role_policy.assert_any_call(RoleName="test-role", PolicyName="policy1") + mock_client.delete_role_policy.assert_any_call(RoleName="test-role", PolicyName="policy2") + mock_client.delete_role_policy.assert_any_call(RoleName="test-role", PolicyName="policy3") + + +def test_destroy_all_custom_policies_empty(policy_manager): + """Test destroying policies when none exist.""" + manager, mock_client, _ = policy_manager + + mock_client.list_role_policies.return_value = { + "PolicyNames": [] + } + + manager.destroy_all_custom_policies() + + # Verify no delete calls were made + mock_client.delete_role_policy.assert_not_called() + + +def test_destroy_all_custom_policies_partial_failure(policy_manager): + """Test destroying policies with partial failures.""" + manager, mock_client, _ = policy_manager + + mock_client.list_role_policies.return_value = { + "PolicyNames": ["policy1", "policy2", "policy3"] + } + + # Make policy2 deletion fail + def delete_side_effect(RoleName, PolicyName): + if PolicyName == "policy2": + raise ClientError( + {"Error": {"Code": "AccessDenied", "Message": "Access denied"}}, + "delete_role_policy" + ) + + mock_client.delete_role_policy.side_effect = delete_side_effect + + # Should not raise, just log and continue + manager.destroy_all_custom_policies() + + # Verify all policies were attempted + assert mock_client.delete_role_policy.call_count == 3 + + +def test_destroy_all_custom_policies_already_deleted(policy_manager): + """Test destroying policies when some are already deleted.""" + manager, mock_client, _ = policy_manager + + mock_client.list_role_policies.return_value = { + "PolicyNames": ["policy1", "policy2"] + } + + # Make policy1 already deleted + def delete_side_effect(RoleName, PolicyName): + if PolicyName == "policy1": + raise mock_client.exceptions.NoSuchEntityException("Policy already deleted") + + mock_client.delete_role_policy.side_effect = delete_side_effect + + manager.destroy_all_custom_policies() + + # Verify both policies were attempted + assert mock_client.delete_role_policy.call_count == 2 + + +def test_destroy_all_custom_policies_list_failure(policy_manager): + """Test destroying policies when listing fails.""" + manager, mock_client, _ = policy_manager + + mock_client.list_role_policies.side_effect = ClientError( + {"Error": {"Code": "AccessDenied", "Message": "Access denied"}}, + "list_role_policies" + ) + + # Should not raise, just log warning + manager.destroy_all_custom_policies() + + # Verify no delete calls were made + mock_client.delete_role_policy.assert_not_called() + + +def test_gateway_policy_factory_lambda(policy_manager): + """Test gateway_policy_factory with Lambda target.""" + manager, mock_client, _ = policy_manager + mock_client.get_role_policy.side_effect = mock_client.exceptions.NoSuchEntityException("Policy not found") + + target = { + "TargetName": "my-lambda-target", + "LambdaArn": "arn:aws:lambda:us-east-1:123456789012:function:my-function" + } + + manager.gateway_policy_factory("lambda", target) + + # Verify Lambda policy was created + mock_client.put_role_policy.assert_called_once() + call_args = mock_client.put_role_policy.call_args + assert call_args[1]["PolicyName"] == "my-lambda-target-lambda-access-policy" + + +def test_gateway_policy_factory_openapi(policy_manager): + """Test gateway_policy_factory with OpenAPI target.""" + manager, mock_client, mock_agentcore = policy_manager + mock_client.get_role_policy.side_effect = mock_client.exceptions.NoSuchEntityException("Policy not found") + + provider_arn = "arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/default/credential-provider/my-provider" + secret_arn = "arn:aws:secretsmanager:us-east-1:123456789012:secret:my-secret" + + mock_agentcore.get_oauth2_credential_provider.return_value = { + "clientSecretArn": {"secretArn": secret_arn} + } + + target = { + "TargetName": "my-openapi-target", + "OutboundAuthParams": { + "OutboundAuthProviderType": "OAUTH", + "OutboundAuthProviderArn": provider_arn + } + } + + manager.gateway_policy_factory("openApiSchema", target) + + # Verify OpenAPI policy was created + mock_client.put_role_policy.assert_called_once() + mock_agentcore.get_oauth2_credential_provider.assert_called_once_with(name="my-provider") + + +def test_gateway_policy_factory_missing_target_name(policy_manager): + """Test gateway_policy_factory with missing TargetName.""" + manager, _, _ = policy_manager + + target = {"LambdaArn": "arn:aws:lambda:us-east-1:123456789012:function:my-function"} + + with pytest.raises(ValueError, match="TargetName is required"): + manager.gateway_policy_factory("lambda", target) + + +def test_add_openapi_policy_for_target_oauth(policy_manager): + """Test _add_openapi_policy_for_target with OAuth authentication.""" + manager, mock_client, mock_agentcore = policy_manager + mock_client.get_role_policy.side_effect = mock_client.exceptions.NoSuchEntityException("Policy not found") + + provider_arn = "arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/default/credential-provider/oauth-provider" + secret_arn = "arn:aws:secretsmanager:us-east-1:123456789012:secret:oauth-secret" + + mock_agentcore.get_oauth2_credential_provider.return_value = { + "clientSecretArn": {"secretArn": secret_arn} + } + + outbound_auth_params = { + "OutboundAuthProviderType": "OAUTH", + "OutboundAuthProviderArn": provider_arn + } + + manager._add_openapi_policy_for_target("test-target", outbound_auth_params) + + # Verify correct AgentCore call + mock_agentcore.get_oauth2_credential_provider.assert_called_once_with(name="oauth-provider") + + # Verify IAM policy was created with correct action + mock_client.put_role_policy.assert_called_once() + call_args = mock_client.put_role_policy.call_args + policy_doc = json.loads(call_args[1]["PolicyDocument"]) + assert policy_doc["Statement"][0]["Action"] == ["bedrock-agentcore:GetResourceOauth2Token"] + + +def test_add_openapi_policy_for_target_api_key(policy_manager): + """Test _add_openapi_policy_for_target with API Key authentication.""" + manager, mock_client, mock_agentcore = policy_manager + mock_client.get_role_policy.side_effect = mock_client.exceptions.NoSuchEntityException("Policy not found") + + provider_arn = "arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/default/credential-provider/apikey-provider" + secret_arn = "arn:aws:secretsmanager:us-east-1:123456789012:secret:apikey-secret" + + mock_agentcore.get_api_key_credential_provider.return_value = { + "apiKeySecretArn": {"secretArn": secret_arn} + } + + outbound_auth_params = { + "OutboundAuthProviderType": "API_KEY", + "OutboundAuthProviderArn": provider_arn + } + + manager._add_openapi_policy_for_target("test-target", outbound_auth_params) + + # Verify correct AgentCore call + mock_agentcore.get_api_key_credential_provider.assert_called_once_with(name="apikey-provider") + + # Verify IAM policy was created with correct action + mock_client.put_role_policy.assert_called_once() + call_args = mock_client.put_role_policy.call_args + policy_doc = json.loads(call_args[1]["PolicyDocument"]) + assert policy_doc["Statement"][0]["Action"] == ["bedrock-agentcore:GetResourceApiKey"] + + +def test_add_openapi_policy_for_target_missing_provider_arn(policy_manager): + """Test _add_openapi_policy_for_target with missing provider ARN.""" + manager, _, _ = policy_manager + + outbound_auth_params = {"OutboundAuthProviderType": "OAUTH"} + + with pytest.raises(ValueError, match="OutboundAuthProviderArn is required"): + manager._add_openapi_policy_for_target("test-target", outbound_auth_params) + + +def test_add_openapi_policy_for_target_agentcore_failure(policy_manager): + """Test _add_openapi_policy_for_target when AgentCore call fails.""" + manager, _, mock_agentcore = policy_manager + + provider_arn = "arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/default/credential-provider/oauth-provider" + + mock_agentcore.get_oauth2_credential_provider.side_effect = Exception("AgentCore error") + + outbound_auth_params = { + "OutboundAuthProviderType": "OAUTH", + "OutboundAuthProviderArn": provider_arn + } + + with pytest.raises(Exception, match="AgentCore error"): + manager._add_openapi_policy_for_target("test-target", outbound_auth_params) diff --git a/source/lambda/custom-resource/test/utils/test_runtime_mcp.py b/source/lambda/custom-resource/test/utils/test_runtime_mcp.py new file mode 100644 index 00000000..f4f4b6b3 --- /dev/null +++ b/source/lambda/custom-resource/test/utils/test_runtime_mcp.py @@ -0,0 +1,256 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from unittest.mock import Mock, patch +from utils.runtime_mcp import RuntimeMCP + + +@pytest.fixture +def runtime_mcp_factory(): + """Factory fixture to create RuntimeMCP instances.""" + + @patch("utils.agentcore_mcp.get_service_client") + @patch.dict("os.environ", {"AWS_REGION": "us-east-1"}) + def _create_runtime(mock_get_client, runtime_id=None): + mock_client = Mock() + mock_get_client.return_value = mock_client + + config = { + "ecr_uri": "123456789012.dkr.ecr.us-east-1.amazonaws.com/mcp-server:latest", + "environment_variables": {"CUSTOM_VAR": "custom_value"}, + "use_case_description": "Test runtime description", + } + + runtime = RuntimeMCP( + config=config, + cognito_user_pool_id="us-east-1_ABC123", + runtime_name="test-runtime", + execution_role_arn="arn:aws:iam::123456789012:role/test-role", + table_name="test-table", + config_key="test-key", + runtime_id=runtime_id, + ) + + return runtime, mock_client + + return _create_runtime + + +def test_initialization(runtime_mcp_factory): + """Test RuntimeMCP initialization.""" + runtime, _ = runtime_mcp_factory() + + assert runtime.runtime_name == "test-runtime" + assert runtime.execution_role_arn == "arn:aws:iam::123456789012:role/test-role" + assert runtime.table_name == "test-table" + assert runtime.config_key == "test-key" + assert runtime.runtime_id is None + assert runtime.runtime_arn is None + + +def test_mcp_image_uri_valid(runtime_mcp_factory): + """Test mcp_image_uri property with valid ECR URI.""" + runtime, _ = runtime_mcp_factory() + + assert runtime.mcp_image_uri == "123456789012.dkr.ecr.us-east-1.amazonaws.com/mcp-server:latest" + + +def test_mcp_image_uri_missing(runtime_mcp_factory): + """Test mcp_image_uri property when ECR URI is missing.""" + runtime, _ = runtime_mcp_factory() + runtime.config = {} + + with pytest.raises(ValueError, match="EcrUri not found in MCP runtime configuration"): + _ = runtime.mcp_image_uri + + +def test_mcp_image_uri_no_tag(runtime_mcp_factory): + """Test mcp_image_uri property when ECR URI has no tag.""" + runtime, _ = runtime_mcp_factory() + runtime.config["ecr_uri"] = "123456789012.dkr.ecr.us-east-1.amazonaws.com/mcp-server" + + with pytest.raises(ValueError, match="Invalid ECR URI format.*must include a tag"): + _ = runtime.mcp_image_uri + + +def test_environment_variables(runtime_mcp_factory): + """Test environment_variables property.""" + runtime, _ = runtime_mcp_factory() + + env_vars = runtime.environment_variables + + assert env_vars["USE_CASE_CONFIG_TABLE_NAME"] == "test-table" + assert env_vars["USE_CASE_CONFIG_RECORD_KEY"] == "test-key" + assert env_vars["CUSTOM_VAR"] == "custom_value" + + +def test_environment_variables_no_custom(runtime_mcp_factory): + """Test environment_variables property without custom variables.""" + runtime, _ = runtime_mcp_factory() + runtime.config["environment_variables"] = {} + + env_vars = runtime.environment_variables + + assert env_vars["USE_CASE_CONFIG_TABLE_NAME"] == "test-table" + assert env_vars["USE_CASE_CONFIG_RECORD_KEY"] == "test-key" + assert "CUSTOM_VAR" not in env_vars + + +def test_base_runtime_params(runtime_mcp_factory): + """Test base_runtime_params property.""" + runtime, _ = runtime_mcp_factory() + + params = runtime.base_runtime_params + + assert params["agentRuntimeArtifact"]["containerConfiguration"]["containerUri"] == runtime.mcp_image_uri + assert params["roleArn"] == "arn:aws:iam::123456789012:role/test-role" + assert params["networkConfiguration"]["networkMode"] == "PUBLIC" + assert params["protocolConfiguration"]["serverProtocol"] == "MCP" + assert params["description"] == "Test runtime description" + assert "environmentVariables" in params + + +def test_create_runtime_params(runtime_mcp_factory): + """Test create_runtime_params property.""" + runtime, _ = runtime_mcp_factory() + + params = runtime.create_runtime_params + + assert params["agentRuntimeName"] == "test-runtime" + assert "agentRuntimeArtifact" in params + assert "roleArn" in params + + +def test_update_runtime_params(runtime_mcp_factory): + """Test update_runtime_params property.""" + runtime, _ = runtime_mcp_factory(runtime_id="runtime-123") + + params = runtime.update_runtime_params + + assert params["agentRuntimeId"] == "runtime-123" + assert "agentRuntimeArtifact" in params + assert "roleArn" in params + + +@patch("utils.runtime_mcp.retry_with_backoff") +def test_create_success(mock_retry, runtime_mcp_factory): + """Test successful runtime creation.""" + runtime, _ = runtime_mcp_factory() + + mock_retry.return_value = { + "agentRuntimeId": "runtime-abc123", + "agentRuntimeArn": "arn:aws:bedrock:us-east-1:123456789012:agent-runtime/runtime-abc123", + } + + runtime.create() + + assert runtime.runtime_id == "runtime-abc123" + assert runtime.runtime_arn == "arn:aws:bedrock:us-east-1:123456789012:agent-runtime/runtime-abc123" + mock_retry.assert_called_once() + + +@patch("utils.runtime_mcp.retry_with_backoff") +def test_create_missing_response_fields(mock_retry, runtime_mcp_factory): + """Test runtime creation with missing response fields.""" + runtime, _ = runtime_mcp_factory() + + mock_retry.return_value = {} + + with pytest.raises( + RuntimeError, match="Failed to create MCP runtime.*Runtime creation response missing required fields" + ): + runtime.create() + + +@patch("utils.runtime_mcp.retry_with_backoff") +def test_create_failure(mock_retry, runtime_mcp_factory): + """Test runtime creation failure.""" + runtime, _ = runtime_mcp_factory() + + mock_retry.side_effect = Exception("API Error") + + with pytest.raises(RuntimeError, match="Failed to create MCP runtime"): + runtime.create() + + +@patch("utils.runtime_mcp.retry_with_backoff") +def test_update_success(mock_retry, runtime_mcp_factory): + """Test successful runtime update.""" + runtime, _ = runtime_mcp_factory(runtime_id="runtime-123") + + mock_retry.return_value = { + "agentRuntimeArn": "arn:aws:bedrock:us-east-1:123456789012:agent-runtime/runtime-123", + "authorizerConfiguration": {"customJWTAuthorizer": {"allowedClients": ["-"]}}, + } + + runtime.update() + + assert runtime.runtime_arn == "arn:aws:bedrock:us-east-1:123456789012:agent-runtime/runtime-123" + mock_retry.assert_called() + + +@patch("utils.runtime_mcp.retry_with_backoff") +def test_update_missing_runtime_id(mock_retry, runtime_mcp_factory): + """Test runtime update without runtime ID.""" + runtime, _ = runtime_mcp_factory() + + with pytest.raises(RuntimeError, match="Failed to update MCP runtime.*Runtime ID is required for update operation"): + runtime.update() + + +@patch("utils.runtime_mcp.retry_with_backoff") +def test_update_failure(mock_retry, runtime_mcp_factory): + """Test runtime update failure.""" + runtime, _ = runtime_mcp_factory(runtime_id="runtime-123") + + mock_retry.side_effect = Exception("Update failed") + + with pytest.raises(RuntimeError, match="Failed to update MCP runtime"): + runtime.update() + + +@patch("utils.runtime_mcp.retry_with_backoff") +def test_delete_success(mock_retry, runtime_mcp_factory): + """Test successful runtime deletion.""" + runtime, _ = runtime_mcp_factory(runtime_id="runtime-123") + + runtime.delete() + + mock_retry.assert_called_once_with(runtime.agentcore_client.delete_agent_runtime, agentRuntimeId="runtime-123") + + +@patch("utils.runtime_mcp.retry_with_backoff") +def test_delete_missing_runtime_id(mock_retry, runtime_mcp_factory): + """Test runtime deletion with unknown runtime ID.""" + runtime, _ = runtime_mcp_factory(runtime_id="unknown") + + # Should not raise an exception, just log an error + runtime.delete() + + # Verify that retry_with_backoff was not called + mock_retry.assert_not_called() + + +@patch("utils.runtime_mcp.retry_with_backoff") +def test_delete_failure(mock_retry, runtime_mcp_factory): + """Test runtime deletion failure.""" + runtime, _ = runtime_mcp_factory(runtime_id="runtime-123") + + mock_retry.side_effect = Exception("Delete failed") + + with pytest.raises(RuntimeError, match="Failed to delete MCP runtime"): + runtime.delete() + + +def test_to_dict(runtime_mcp_factory): + """Test to_dict method.""" + runtime, _ = runtime_mcp_factory(runtime_id="runtime-123") + runtime.runtime_arn = "arn:aws:bedrock:us-east-1:123456789012:agent-runtime/runtime-123" + + result = runtime.to_dict() + + assert result["MCPRuntimeId"] == "runtime-123" + assert result["MCPRuntimeArn"] == "arn:aws:bedrock:us-east-1:123456789012:agent-runtime/runtime-123" + assert result["MCPAgentCoreName"] == "test-runtime" diff --git a/source/lambda/custom-resource/test/utils/test_smithy_target_creator.py b/source/lambda/custom-resource/test/utils/test_smithy_target_creator.py new file mode 100644 index 00000000..8152f26d --- /dev/null +++ b/source/lambda/custom-resource/test/utils/test_smithy_target_creator.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from utils.smithy_target_creator import SmithyTargetCreator + + +class TestSmithyTargetCreator: + + def test_initialization(self): + config = {"TargetName": "test-smithy", "TargetType": "smithyModel", "SchemaUri": "schemas/smithy-model.json"} + + creator = SmithyTargetCreator(config, "test-bucket") + assert creator.target_name == "test-smithy" + assert creator.target_type == "smithyModel" + assert creator.schema_uri == "schemas/smithy-model.json" + + def test_validate_configuration_success(self): + config = {"TargetName": "test-smithy", "TargetType": "smithyModel", "SchemaUri": "schemas/smithy-model.json"} + + creator = SmithyTargetCreator(config, "test-bucket") + assert creator.validate_configuration() is True + + def test_validate_configuration_missing_name(self): + config = {"TargetType": "smithyModel", "SchemaUri": "schemas/smithy-model.json"} + + creator = SmithyTargetCreator(config, "test-bucket") + with pytest.raises(ValueError, match="TargetName and SchemaUri are required"): + creator.validate_configuration() + + def test_validate_configuration_missing_schema_uri(self): + config = {"TargetName": "test-smithy", "TargetType": "smithyModel"} + + creator = SmithyTargetCreator(config, "test-bucket") + with pytest.raises(ValueError, match="TargetName and SchemaUri are required"): + creator.validate_configuration() + + def test_create_target_configuration(self): + config = {"TargetName": "test-smithy", "TargetType": "smithyModel", "SchemaUri": "schemas/smithy-model.json"} + + creator = SmithyTargetCreator(config, "test-bucket") + result = creator.create_target_configuration() + + expected = {"smithyModel": {"s3": {"uri": "s3://test-bucket/schemas/smithy-model.json"}}} + assert result == expected + + def test_build_credential_provider_configurations(self): + config = {"TargetName": "test-smithy", "TargetType": "smithyModel", "SchemaUri": "schemas/smithy-model.json"} + + creator = SmithyTargetCreator(config, "test-bucket") + result = creator.build_credential_provider_configurations() + + expected = [{"credentialProviderType": "GATEWAY_IAM_ROLE"}] + assert result == expected \ No newline at end of file diff --git a/source/lambda/custom-resource/utils/agent_core_utils.py b/source/lambda/custom-resource/utils/agent_core_utils.py new file mode 100644 index 00000000..dc1c33b5 --- /dev/null +++ b/source/lambda/custom-resource/utils/agent_core_utils.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from aws_lambda_powertools import Logger +from botocore.exceptions import ClientError, EndpointConnectionError, NoCredentialsError +from helper import get_service_client + +logger = Logger(utc=True) + + +def format_error_message(operation: str, error_code: str, error_message: str, context: dict = None) -> str: + """ + Format a descriptive error message with operation context. + + Args: + operation: The operation that failed (e.g., "create_agent_runtime") + error_code: AWS error code + error_message: AWS error message + context: Additional context information + + Returns: + str: Formatted error message with context + """ + base_message = f"Failed to {operation}: {error_code} - {error_message}" + + if context: + context_str = ", ".join([f"{k}={v}" for k, v in context.items() if v is not None]) + if context_str: + base_message += f" (Context: {context_str})" + + return base_message + + +def handle_client_error(e: ClientError, operation: str, context: dict = None) -> None: + """ + Handle ClientError with detailed logging and context-aware error messages. + + Args: + e: ClientError exception + operation: The operation that failed + context: Additional context information + + Raises: + ClientError: Re-raises the original exception with enhanced logging + """ + error_code = e.response["Error"]["Code"] + error_message = e.response["Error"]["Message"] + request_id = e.response.get("ResponseMetadata", {}).get("RequestId", "unknown") + + detailed_message = format_error_message(operation, error_code, error_message, context) + + logger.error( + f"AWS Service Error in {operation}", + extra={ + "error_code": error_code, + "error_message": error_message, + "request_id": request_id, + "operation": operation, + "context": context or {}, + "detailed_message": detailed_message, + }, + ) + + raise e + + +def validate_event_properties(event): + """Validate CloudFormation event properties.""" + if "ResourceProperties" not in event: + raise ValueError("Missing ResourceProperties in CloudFormation event") + if "RequestType" not in event: + raise ValueError("Missing RequestType in CloudFormation event") + + +def initialize_bedrock_client(): + """Initialize bedrock-agentcore client with error handling.""" + try: + bedrock_agentcore_client = get_service_client("bedrock-agentcore-control") + logger.info("Initialized bedrock-agentcore-control client") + return bedrock_agentcore_client + except NoCredentialsError: + error_msg = "AWS credentials not found. Ensure Lambda execution role has proper permissions." + logger.error(error_msg) + raise ValueError(error_msg) + except EndpointConnectionError as e: + error_msg = f"Cannot connect to bedrock-agentcore service: {str(e)}" + logger.error(error_msg) + raise ValueError(error_msg) diff --git a/source/lambda/custom-resource/utils/agentcore_mcp.py b/source/lambda/custom-resource/utils/agentcore_mcp.py new file mode 100644 index 00000000..f7b16f54 --- /dev/null +++ b/source/lambda/custom-resource/utils/agentcore_mcp.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from abc import ABC, abstractmethod +from typing import Dict, Any +from aws_lambda_powertools import Logger, Tracer +import os +from helper import get_service_client + +logger = Logger() +tracer = Tracer() + +AWS_REGION = os.environ.get("AWS_REGION") + + +class AgentcoreMCP(ABC): + """ + Abstract base class for MCP resource management through AWS Bedrock AgentCore. + + This class provides a common interface for managing MCP resources (gateways and runtimes) + with standardized lifecycle operations and error handling. + """ + + def __init__(self, config: Dict[str, Any], cognito_user_pool_id): + """ + Initialize the MCP resource manager. + + Args: + config: Configuration dictionary from MCPConfigManager + agentcore_client: AWS Bedrock AgentCore client + resource_id: Optional resource identifier (gateway_id or runtime_id) + """ + self.config = config + self.cognito_user_pool_id = cognito_user_pool_id + self.allowed_clients = ["-"] + + try: + self.agentcore_client = get_service_client("bedrock-agentcore-control", region_name=AWS_REGION) + except Exception as error: + logger.warning(f"Failed to initialize bedrock-agentcore-control client: {error}") + self.agentcore_client = get_service_client("bedrock-agentcore-control", region_name=AWS_REGION) + + @abstractmethod + def create(self): + """ + Create the MCP resource. + + Returns: + Dict containing creation result with resource details + + Raises: + RuntimeError: If creation fails + """ + pass + + @abstractmethod + def update(self): + """ + Update the MCP resource. + + Returns: + Dict containing update result with resource details + + Raises: + RuntimeError: If update fails + """ + pass + + @abstractmethod + def delete(self): + """ + Delete the MCP resource. + + Returns: + Dict containing deletion result + + Raises: + RuntimeError: If deletion fails + """ + pass + + @property + def base_auth_config(self): + if self.cognito_user_pool_id: + discovery_url = f"https://cognito-idp.{AWS_REGION}.amazonaws.com/{self.cognito_user_pool_id}/.well-known/openid-configuration" + return { + "authorizerConfiguration": { + "customJWTAuthorizer": { + "discoveryUrl": discovery_url, + "allowedClients": self.allowed_clients + } + }, + } + + + @property + def iam_client(self): + try: + return get_service_client("iam", region_name=AWS_REGION) + except Exception as error: + logger.warning(f"Failed to initialize iam client: {error}") + return get_service_client("iam", region_name=AWS_REGION) \ No newline at end of file diff --git a/source/lambda/custom-resource/utils/auth_manager.py b/source/lambda/custom-resource/utils/auth_manager.py new file mode 100644 index 00000000..0f56f5bc --- /dev/null +++ b/source/lambda/custom-resource/utils/auth_manager.py @@ -0,0 +1,159 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import re +import logging +from typing import Dict + +from utils.constants import EntityType + +from helper import get_service_client + +from aws_lambda_powertools import Logger, Tracer +from utils.data import MCPServerData + +logger = Logger(utc=True) +tracer = Tracer() + +class AuthManager: + """Manages authentication metadata and permissions for AgentCore components.""" + + def __init__(self, client_id: str, use_case_id: str, bedrock_client=None): + self.client_id = client_id + self.use_case_id = use_case_id + self.bedrock = bedrock_client or get_service_client('bedrock-agentcore-control') + self.logger = logging.getLogger(__name__) + + + @classmethod + @tracer.capture_method + def extract_values_regex(cls, arn: str) -> tuple[str, str]: + pattern = r"arn:aws:bedrock-agentcore:[^:]+:[^:]+:(\w+)/(.+)" + match = re.match(pattern, arn) + if match: + return match.group(1), match.group(2) + raise ValueError("Invalid ARN format") + + @tracer.capture_method + def _get_resource_tags(self, agentcore_arn: str) -> Dict[str, str]: + """Get resource tags as a dictionary.""" + response = self.bedrock.list_tags_for_resource(resourceArn=agentcore_arn) + return response.get('tags', {}) + + @tracer.capture_method + def _update_allowed_clients(self, mcp_server: MCPServerData, add: bool) -> None: + """Add client ID to agentcore resource.""" + if mcp_server.type == EntityType.GATEWAY.value: + self._update_gateway_permissions(mcp_server.agentcore_id, add) + elif mcp_server.type == EntityType.RUNTIME.value: + self._update_runtime_permissions(mcp_server.agentcore_id, add) + else: + logger.error(f"Invalid agentcore type: {mcp_server.type}") + raise ValueError("Invalid ARN. Type must be gateway or runtime.") + + + @tracer.capture_method + def _update_gateway_permissions(self, agentcore_id: str, add: bool) -> None: + """Update gateway with new client permissions.""" + response = self.bedrock.get_gateway(gatewayIdentifier=agentcore_id) + + allowed_clients = response['authorizerConfiguration']['customJWTAuthorizer'].get('allowedClients', []) + if add and self.client_id not in allowed_clients: + allowed_clients.append(self.client_id) + elif not add and self.client_id in allowed_clients: + allowed_clients.remove(self.client_id) + else: + logger.info("Permission already exists.") + + response['authorizerConfiguration']['customJWTAuthorizer']['allowedClients'] = allowed_clients + params = { + "gatewayIdentifier": agentcore_id, + "name": response['name'], + "description": response.get('description'), + "roleArn": response['roleArn'], + "protocolType": response['protocolType'], + "protocolConfiguration": response.get('protocolConfiguration'), + "authorizerType": response['authorizerType'], + "authorizerConfiguration": response['authorizerConfiguration'], + "kmsKeyArn": response.get('kmsKeyArn'), + "exceptionLevel": response.get('exceptionLevel') + } + return self.bedrock.update_gateway( + **{k: v for k, v in params.items() if v is not None} + ) + + @tracer.capture_method + def _update_runtime_permissions(self, agentcore_id: str, add: bool) -> None: + """Update agent runtime with new client permissions.""" + response = self.bedrock.get_agent_runtime(agentRuntimeId=agentcore_id) + allowed_clients = response['authorizerConfiguration']['customJWTAuthorizer'].get('allowedClients', []) + if add and self.client_id not in allowed_clients: + allowed_clients.append(self.client_id) + else: + logger.info("Permission already exists.") + + if not add and self.client_id in allowed_clients: + allowed_clients.remove(self.client_id) + else: + logger.info("Permission has already been removed.") + + + response['authorizerConfiguration']['customJWTAuthorizer']['allowedClients'] = allowed_clients + + params = { + "agentRuntimeId":agentcore_id, + "description":response.get('description'), + "agentRuntimeArtifact":response['agentRuntimeArtifact'], + "roleArn":response['roleArn'], + "networkConfiguration":response['networkConfiguration'], + "protocolConfiguration":response.get('protocolConfiguration'), + "environmentVariables": response.get('environmentVariables'), + "authorizerConfiguration": response['authorizerConfiguration'], + "requestHeaderConfiguration":response.get('requestHeaderConfiguration') + } + + return self.bedrock.update_agent_runtime( + **{k: v for k, v in params.items() if v is not None} + ) + + @tracer.capture_method + def add_permission(self, mcp_server: MCPServerData): + tags = self._get_resource_tags(mcp_server.agentcore_arn) + client_tag = tags.get(self.client_id, '') + if client_tag: + # Update existing client tag + use_case_ids = client_tag.split(':') + if self.use_case_id not in use_case_ids: + use_case_ids.append(self.use_case_id) + self.bedrock.tag_resource( + resourceArn=mcp_server.agentcore_arn, + tags={self.client_id: ':'.join(use_case_ids)} + ) + else: + # Add new client tag and update authorizer configuration + self.bedrock.tag_resource( + resourceArn=mcp_server.agentcore_arn, + tags={self.client_id: self.use_case_id} + ) + self._update_allowed_clients(mcp_server, True) + + @tracer.capture_method + def remove_permission(self, mcp_server: MCPServerData): + tags = self._get_resource_tags(mcp_server.agentcore_arn) + client_tag = tags.get(self.client_id, '') + if client_tag: + # Update existing client tag + use_case_ids = client_tag.split(':') + if self.use_case_id in use_case_ids: + use_case_ids.remove(self.use_case_id) + if use_case_ids: + self.bedrock.tag_resource( + resourceArn=mcp_server.agentcore_arn, + tags={self.client_id: ':'.join(use_case_ids)} + ) + else: + self.bedrock.untag_resource( + resourceArn=mcp_server.agentcore_arn, + tagKeys=[self.client_id] + ) + self._update_allowed_clients(mcp_server, False) \ No newline at end of file diff --git a/source/lambda/custom-resource/utils/constants.py b/source/lambda/custom-resource/utils/constants.py index 27c865fe..6e10be06 100644 --- a/source/lambda/custom-resource/utils/constants.py +++ b/source/lambda/custom-resource/utils/constants.py @@ -17,6 +17,7 @@ class CloudWatchNamespaces(str, Enum): USE_CASE_DEPLOYMENTS = "Solution/UseCaseDeployments" COLD_STARTS = "Solution/ColdStarts" FEEDBACK_MANAGEMENT = "Solution/FeedbackManagement" + FILE_HANDLING = "Solution/FileHandling" class CloudWatchMetrics(str, Enum): @@ -78,11 +79,23 @@ class CloudWatchMetrics(str, Enum): HARMFUL_FEEDBACK_COUNT = "HarmfulFeedbackCount" OTHER_NEGATIVE_FEEDBACK_COUNT = "OtherNegativeFeedbackCount" + # File Handling Metrics + FILES_UPLOADED = "FilesUploaded" + FILE_DELETE = "FileDelete" + FILE_DOWNLOAD = "FileDownload" + FILE_SIZE = "FileSize" + FILES_UPLOADED_WITH_EXTENSION = "FilesExtUploaded" + + +class EntityType(Enum): + RUNTIME = "runtime" + GATEWAY = "gateway" + METRICS_ENDPOINT = "https://metrics.awssolutionsbuilder.com/generic" PUBLISH_METRICS_PERIOD_IN_SECONDS = ( 60 * 60 * 3 -) # 3 hours. This is expected to match the runtime schedule defined by ANONYMOUS_METRICS_SCHEDULE +) # 3 hours. This is expected to match the runtime schedule defined by METRICS_SCHEDULE SSM_CONFIG_KEY = "SSM_CONFIG_KEY" USE_CASE_TYPE = "UseCaseType" @@ -124,3 +137,6 @@ class CloudWatchMetrics(str, Enum): "%Y-%m-%d %H:%M:%S.%f" # This is the required format for the metrics API. Any changes should be taken with care ) DEFAULT_API_GATEWAY_STAGE = "prod" +MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR = "MULTIMODAL_DATA_BUCKET" +MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR = "MULTIMODAL_METADATA_TABLE_NAME" +AGENTCORE_RUNTIME_IDLE_TIMEOUT_SECONDS = 3600 # 1hr diff --git a/source/lambda/custom-resource/utils/data.py b/source/lambda/custom-resource/utils/data.py index 2a4fa5a2..1527af5e 100644 --- a/source/lambda/custom-resource/utils/data.py +++ b/source/lambda/custom-resource/utils/data.py @@ -7,10 +7,11 @@ from datetime import datetime, timezone from decimal import Decimal from uuid import UUID - +from urllib.parse import unquote +import re import urllib3 from aws_lambda_powertools import Logger, Tracer -from utils.constants import METRICS_TIMESTAMP_FORMAT +from utils.constants import METRICS_TIMESTAMP_FORMAT, EntityType logger = Logger(utc=True) tracer = Tracer() @@ -18,6 +19,49 @@ http = urllib3.PoolManager() UUID_VERSION = 4 +# URL parsing constants +RUNTIME_URL_PATTERN = r"runtime/(.+?)/invocations" +GATEWAY_URL_PATTERN = r"https://([^.]+)\.gateway\.bedrock-agentcore" +RUNTIME_ARN_PATTERN = r"arn:[^:]+:bedrock-agentcore:[^:]+:[^:]+:runtime/[^/]+" +GATEWAY_ARN_PATTERN = r"https://([^.]+)\.gateway\.bedrock-agentcore\.([-\w]+)" + + +class AgentCoreUrlParser: + """Utility class for parsing AgentCore URLs and constructing ARNs.""" + + @staticmethod + def extract_runtime_id(url: str) -> str: + decoded_url = unquote(url) + match = re.search(RUNTIME_URL_PATTERN, decoded_url) + if match: + return match.group(1) + raise ValueError(f"Runtime ID could not be extracted from URL: {url}") + + @staticmethod + def extract_gateway_id(url: str) -> str: + match = re.search(GATEWAY_URL_PATTERN, url) + if match: + return match.group(1) + raise ValueError(f"Gateway ID could not be extracted from URL: {url}") + + @staticmethod + def extract_runtime_arn(url: str) -> str: + decoded_url = unquote(url) + match = re.search(RUNTIME_ARN_PATTERN, decoded_url) + if match: + return match.group(0) + raise ValueError(f"ARN could not be extracted from runtime URL: {url}") + + @staticmethod + def construct_gateway_arn(url: str, account_id: str) -> str: + match = re.search(GATEWAY_ARN_PATTERN, url) + if not match: + raise ValueError("Invalid gateway URL format") + + gateway_id = match.group(1) + region = match.group(2) + return f"arn:aws:bedrock-agentcore:{region}:{account_id}:gateway/{gateway_id}" + class DecimalEncoder(json.JSONEncoder): def default(self, o): @@ -33,13 +77,15 @@ class BuilderMetrics: data: dict timestamp: datetime uuid: UUID + account_id: str - def __init__(self, uuid: UUID, solution_id: str, version: str, data: dict = None): + def __init__(self, uuid: UUID, solution_id: str, version: str, data: dict = None, account_id: str = None): self.uuid = uuid self.solution_id = solution_id self.version = version self.data = data if data else {} self.timestamp = datetime.now(timezone.utc).strftime(METRICS_TIMESTAMP_FORMAT) + self.account_id = account_id if account_id else "unknown" def __post_init__(self): if not isinstance(self.solution_id, str): @@ -56,3 +102,39 @@ def __post_init__(self): UUID(self.uuid, version=UUID_VERSION) except ValueError: raise TypeError(f"Expected {self.uuid} to be a UUID") + + +@dataclass +class MCPServerData: + type: EntityType + url: str + use_case_id: str + use_case_name: str + agentcore_id: str + agentcore_arn: str + + def __init__(self, type: EntityType, url: str, use_case_id: str, use_case_name: str, account_id: str = None): + self.type = type + self.url = url + self.use_case_id = use_case_id + self.use_case_name = use_case_name + self.agentcore_id = self._extract_id() + self.agentcore_arn = self._construct_arn(account_id) + + def _extract_id(self) -> str: + if self.type == EntityType.RUNTIME.value: + return AgentCoreUrlParser.extract_runtime_id(self.url) + elif self.type == EntityType.GATEWAY.value: + return AgentCoreUrlParser.extract_gateway_id(self.url) + else: + raise ValueError(f"Invalid type {self.type}") + + def _construct_arn(self, account_id: str | None) -> str: + if self.type == EntityType.RUNTIME.value: + return AgentCoreUrlParser.extract_runtime_arn(self.url) + elif self.type == EntityType.GATEWAY.value: + if not account_id: + raise ValueError("Account ID is required to construct ARN for gateway") + return AgentCoreUrlParser.construct_gateway_arn(self.url, account_id) + else: + raise ValueError(f"Invalid type {self.type}") diff --git a/source/lambda/custom-resource/utils/gateway_mcp.py b/source/lambda/custom-resource/utils/gateway_mcp.py new file mode 100644 index 00000000..aa9b0959 --- /dev/null +++ b/source/lambda/custom-resource/utils/gateway_mcp.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from typing import Dict, Any +from aws_lambda_powertools import Logger, Tracer +from utils.agentcore_mcp import AgentcoreMCP +from utils.mcp_factory import MCPGatewayFactory +from utils.policy_manager import GatewayPolicyManager +from utils.lambda_target_creator import LambdaTargetCreator +from utils.smithy_target_creator import SmithyTargetCreator +from utils.openapi_target_creator import OpenAPITargetCreator +import uuid +import time +from operations.shared import retry_with_backoff + +logger = Logger() +tracer = Tracer() + +# Register target creators for the factory +MCPGatewayFactory.register_target_creator("lambda", LambdaTargetCreator) +MCPGatewayFactory.register_target_creator("smithyModel", SmithyTargetCreator) +MCPGatewayFactory.register_target_creator("openApiSchema", OpenAPITargetCreator) + + +class GatewayMCP(AgentcoreMCP): + """ + MCP Gateway resource manager. + + Handles the lifecycle of MCP gateways including creation, updates, and deletion + of gateways and their associated targets. + """ + + def __init__( + self, + config: Dict[str, Any], + cognito_user_pool_id, + gateway_role_arn, + gateway_name, + schema_bucket_name, + gateway_id = None + ): + """ + Initialize the Gateway MCP manager. + + Args: + config: Gateway configuration from MCPConfigManager + agentcore_client: AWS Bedrock AgentCore client + gateway_id: Optional gateway identifier for updates/deletes + """ + super().__init__(config, cognito_user_pool_id) + self.gateway_role_arn = gateway_role_arn + self.gateway_name = gateway_name + self.schema_bucket_name = schema_bucket_name + self.gateway_id = gateway_id + self.gateway_arn = None + self.gateway_url = None + self.targets = [] + # Extract role name from ARN (format: arn:aws:iam::account:role/role-name) + role_name = self.gateway_role_arn.split("/")[-1] + self.policy_manager = GatewayPolicyManager(role_name, self.agentcore_client) + + @property + def update_gateway_params(self): + gateway_params = self.base_gateway_params + gateway_params["gatewayIdentifier"] = self.gateway_id + return gateway_params + + @property + def create_gateway_params(self): + gateway_params = self.base_gateway_params + gateway_params["clientToken"] = str(uuid.uuid4()) + return gateway_params + + @property + def base_gateway_params(self): + return { + "name": self.gateway_name, + "roleArn": self.gateway_role_arn, + "protocolType": "MCP", + "exceptionLevel": "DEBUG", + **( + {} + if not self.config.get("use_case_description") + else {"description": self.config.get("use_case_description")} + ), + **self.gateway_auth_config, + } + + @property + def gateway_auth_config(self): + auth_config = self.base_auth_config + auth_config["authorizerType"] = "CUSTOM_JWT" + return auth_config + + @tracer.capture_method + def create(self): + """ + Create MCP gateway and its targets. + + Raises: + RuntimeError: If gateway creation fails + """ + + try: + response = retry_with_backoff(self.agentcore_client.create_gateway, **self.create_gateway_params) + + self.gateway_id = response.get("gatewayId") + self.gateway_arn = response.get("gatewayArn") + self.gateway_url = response.get("gatewayUrl") + logger.info(f"Gateway created successfully - ID: {self.gateway_id}") + + # Wait for gateway to become ACTIVE before creating targets + self._wait_for_gateway_active() + + self.create_targets() + + except Exception as e: + logger.error(f"Error in creating a gateway: {str(e)}") + raise + + def create_targets(self): + target_params = self.config.get("target_params", []) + + for target in target_params: + target_name = target.get("TargetName") + target_type = target.get("TargetType") + target_creator = MCPGatewayFactory.create_target_creator(target, self.schema_bucket_name) + + try: + response = retry_with_backoff( + self.agentcore_client.create_gateway_target, + **{ + "gatewayIdentifier": self.gateway_id, + "name": target_name, + "clientToken": str(uuid.uuid4()), + **( + {} + if not target.get("TargetDescription") + else {"description": target.get("TargetDescription")} + ), + "targetConfiguration": {"mcp": target_creator.create_target_configuration()}, + "credentialProviderConfigurations": target_creator.build_credential_provider_configurations(), + }, + ) + + target_id = response.get("targetId") + + self.targets.append( + { + "targetId": target_id, + "targetArn": response.get("targetArn"), + "targetName": target_name, + "targetType": target_type, + "status": response.get("status", "ACTIVE"), + } + ) + logger.info(f"Created ${target_name} with id: ${target_id}") + + # Add IAM policies based on target type + self.policy_manager.gateway_policy_factory(target_type, target) + + except Exception as error: + error_msg = f"Failed to create target {target_name}: {str(error)}" + logger.error(error_msg) + raise RuntimeError(error_msg) + + logger.info(f"Created {len(self.targets)}/{len(target_params)} target{'s' if len(self.targets) > 1 else ''}") + + def delete_targets(self): + + response = self.agentcore_client.list_gateway_targets(gatewayIdentifier=self.gateway_id) + target_params = response["items"] + + for target in target_params: + target_id = target.get("targetId") + target_name = target.get("name") + + try: + response = retry_with_backoff( + self.agentcore_client.delete_gateway_target, + **{ + "gatewayIdentifier": self.gateway_id, + "targetId": target_id, + }, + ) + + logger.info(f"Deleted Target ID: {target_id}") + + except Exception as error: + error_msg = f"Failed to delete target {target_name}: {str(error)}" + logger.error(error_msg) + raise RuntimeError(error_msg) + + # Wait for all targets to be fully deleted + if target_params: + self._wait_for_targets_cleared() + + @tracer.capture_method + def update(self): + """ + Update MCP gateway. + + Raises: + RuntimeError: If gateway update fails + """ + + try: + if not self.gateway_id: + raise ValueError("Gateway ID is required for update operation") + + response = self.agentcore_client.get_gateway(gatewayIdentifier=self.gateway_id) + self.gateway_arn = response["gatewayArn"] + self.gateway_url = response["gatewayUrl"] + self.allowed_clients = response["authorizerConfiguration"]["customJWTAuthorizer"]["allowedClients"] + if response.get("description") != self.config.get("use_case_description"): + response = retry_with_backoff(self.agentcore_client.update_gateway, **self.update_gateway_params) + # Wait for gateway to become ACTIVE before creating targets + self._wait_for_gateway_active() + logger.info(f"Gateway Description Updated successfully - ID: {self.gateway_id}") + + logger.info(f"Updating MCP gateway Targets: {self.gateway_id}") + self.delete_targets() + self.create_targets() + logger.info(f"Gateway updated successfully - ID: {self.gateway_id}") + + + except Exception as e: + error_msg = f"Failed to update MCP gateway {self.gateway_id}: {str(e)}" + logger.error(error_msg) + raise RuntimeError(error_msg) + + @tracer.capture_method + def delete(self): + """ + Delete MCP gateway and its targets. + + Raises: + RuntimeError: If gateway deletion fails + """ + + try: + if self.gateway_id == 'unknown': + logger.warning("No gateway ID provided - gateway was never created, skipping deletion") + else: + logger.info(f"Deleting MCP gateway: {self.gateway_id}") + self.delete_targets() + self.agentcore_client.delete_gateway(gatewayIdentifier=self.gateway_id) + + # Clean up all custom IAM policies + self.policy_manager.destroy_all_custom_policies() + except Exception as e: + error_msg = f"Failed to delete MCP gateway {self.gateway_id}: {str(e)}" + logger.error(error_msg) + raise RuntimeError(error_msg) + + def to_dict(self): + return { + "GatewayId": self.gateway_id, + "GatewayArn": self.gateway_arn, + "GatewayUrl": self.gateway_url, + "GatewayName": self.gateway_name, + "Targets": self.targets, + "TargetCount": len(self.targets) + } + + + def _wait_for_gateway_active(self, max_wait_time=300, poll_interval=5): + """ + Wait for gateway to reach READY status. + + Args: + max_wait_time: Maximum time to wait in seconds (default: 300) + poll_interval: Time between status checks in seconds (default: 5) + + Raises: + RuntimeError: If gateway enters a terminal failure state + TimeoutError: If gateway doesn't become READY within max_wait_time + """ + start_time = time.time() + + while time.time() - start_time < max_wait_time: + response = self.agentcore_client.get_gateway(gatewayIdentifier=self.gateway_id) + status = response.get("status") + + logger.info(f"Gateway status: {status}") + + if status == "READY": + logger.info(f"Gateway {self.gateway_id} is now READY") + return True + elif status in ["FAILED", "DELETING", "DELETED"]: + raise RuntimeError(f"Gateway entered terminal state: {status}") + + time.sleep(poll_interval) + + raise TimeoutError(f"Gateway did not become READY within {max_wait_time} seconds") + + def _wait_for_targets_cleared(self, max_wait_time=120, poll_interval=10): + """ + Wait until gateway has no targets. + + Args: + max_wait_time: Maximum time to wait in seconds (default: 120) + poll_interval: Time between status checks in seconds (default: 10) + + Raises: + TimeoutError: If targets still present after max_wait_time + """ + start_time = time.time() + + while time.time() - start_time < max_wait_time: + + response = self.agentcore_client.list_gateway_targets(gatewayIdentifier=self.gateway_id) + targets = response.get("items", []) + + logger.info(f"Waiting for {len(targets)} targets to delete") + time.sleep(poll_interval) + + if not targets: + logger.info("All targets deleted") + return True + + raise TimeoutError("Targets still present after timeout") \ No newline at end of file diff --git a/source/lambda/custom-resource/utils/lambda_target_creator.py b/source/lambda/custom-resource/utils/lambda_target_creator.py new file mode 100644 index 00000000..ffa2ad5f --- /dev/null +++ b/source/lambda/custom-resource/utils/lambda_target_creator.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os +import re +import json +import boto3 +from typing import Dict, Any, List +from aws_lambda_powertools import Logger, Tracer +from utils.mcp_factory import MCPTargetCreator + +logger = Logger() +tracer = Tracer() + +LAMBDA_ARN_PATTERN = re.compile(r"^arn:aws:lambda:[a-z0-9-]+:\d{12}:function:[a-zA-Z0-9-_]+(?::[a-zA-Z0-9-_]+)?$") + +class LambdaTargetCreator(MCPTargetCreator): + + def __init__(self, target_config: Dict[str, Any], schema_bucket_name: str): + super().__init__(target_config, schema_bucket_name) + self.lambda_arn = target_config.get("LambdaArn") + + def validate_configuration(self) -> bool: + if not self.target_name or not self.lambda_arn: + raise ValueError("TargetName and LambdaArn are required") + return True + + @tracer.capture_method + def create_target_configuration(self) -> Dict[str, Any]: + try: + self.validate_configuration() + + lambda_config = {"lambda": {"lambdaArn": self.lambda_arn, "toolSchema": self.s3_block}} + + return lambda_config + + except Exception as e: + error_msg = f"Failed to create Lambda target configuration: {str(e)}" + logger.error(error_msg) + raise RuntimeError(error_msg) + + def build_credential_provider_configurations(self) -> List[Dict[str, Any]]: + return [{"credentialProviderType": "GATEWAY_IAM_ROLE"}] diff --git a/source/lambda/custom-resource/utils/mcp_config_manager.py b/source/lambda/custom-resource/utils/mcp_config_manager.py new file mode 100644 index 00000000..f7466504 --- /dev/null +++ b/source/lambda/custom-resource/utils/mcp_config_manager.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os +from typing import Dict, Any, Optional +from aws_lambda_powertools import Logger, Tracer +from boto3.dynamodb.types import TypeDeserializer, TypeSerializer +from helper import get_service_client + +logger = Logger() +tracer = Tracer() + + +class MCPConfigManager: + + def __init__(self, table_name: str = None): + self.ddb_client = get_service_client("dynamodb") + self.deserializer = TypeDeserializer() + self.serializer = TypeSerializer() + self.table_name = table_name or os.environ.get("USE_CASE_CONFIG_TABLE_NAME") + if not self.table_name: + raise ValueError( + "Table name must be provided or USE_CASE_CONFIG_TABLE_NAME environment variable must be set" + ) + + @tracer.capture_method + def read_mcp_config(self, config_key: str) -> Dict[str, Any]: + try: + logger.info(f"Attempting to read config from table: {self.table_name}, key: {config_key}") + response = self.ddb_client.get_item(TableName=self.table_name, Key={"key": {"S": config_key}}) + + if "Item" not in response: + logger.warn(f"Configuration not found for key: {config_key} in table: {self.table_name}") + raise ValueError(f"Configuration not found for key: {config_key}") + + deserialized_response = { + key: self.deserializer.deserialize(value) for key, value in response.get("Item", {}).items() + } + config = deserialized_response.get("config", {}) + + logger.info(f"Successfully read configuration for key: {config_key}") + return config + + except Exception as error: + logger.error( + f"Error reading configuration - Table: {self.table_name}, Key: {config_key}, Error: {str(error)}" + ) + if isinstance(error, ValueError): + raise + raise RuntimeError(f"Failed to read configuration: {str(error)}") + + @tracer.capture_method + def validate_mcp_gateway_config(self, config: Dict[str, Any]) -> Dict[str, Any]: + use_case_type = config.get("UseCaseType") + if use_case_type != "MCPServer": + raise ValueError(f"Invalid UseCaseType: {use_case_type}. Expected 'MCPServer'") + + mcp_params = config.get("MCPParams") + if not mcp_params: + raise ValueError("MCPParams not found in configuration") + + # Extract Gateway parameters + gateway_params = mcp_params.get("GatewayParams") + if not gateway_params: + raise ValueError("GatewayParams not found in MCPParams") + + # Extract Target parameters + target_params = gateway_params.get("TargetParams") + if not target_params: + raise ValueError("TargetParams not found in GatewayParams") + + if not isinstance(target_params, list) or len(target_params) == 0: + raise ValueError("TargetParams must be a non-empty array") + + # Validate each target + for target_index, target in enumerate(target_params): + self.validate_target_params(target, target_index) + + return { + "use_case_name": config.get("UseCaseName"), + "use_case_description": config.get("UseCaseDescription"), + "gateway_params": gateway_params, + "target_params": target_params, + } + + def validate_target_params(self, target: Dict[str, Any], target_index: int) -> None: + required_fields = ["TargetName", "TargetType", "SchemaUri"] + + for field in required_fields: + if field not in target: + raise ValueError(f"Required field '{field}' missing in target {target_index}") + + target_type = target.get("TargetType") + if target_type not in ["lambda", "openApiSchema", "smithyModel"]: + raise ValueError(f"Invalid TargetType: {target_type}. Must be one of: lambda, openapi, smithyModel") + + if target_type == "lambda" and "LambdaArn" not in target: + raise ValueError(f"LambdaArn required for lambda target {target_index}") + + @tracer.capture_method + def get_mcp_gateway_config(self, config_key: str) -> Dict[str, Any]: + config = self.read_mcp_config(config_key) + return self.validate_mcp_gateway_config(config) + + def validate_runtime_params(self, runtime_params: Dict[str, Any]) -> None: + required_fields = ["EcrUri"] + + for field in required_fields: + if field not in runtime_params: + raise ValueError(f"Required field '{field}' missing in RuntimeParams") + + ecr_uri = runtime_params.get("EcrUri") + if not ecr_uri or not isinstance(ecr_uri, str): + raise ValueError("EcrUri must be a non-empty string") + + environment_variables = runtime_params.get("EnvironmentVariables") + if environment_variables is not None and not isinstance(environment_variables, dict): + raise ValueError("EnvironmentVariables must be a dictionary") + + # Validate environment variable names and values + for env_key, env_value in (environment_variables or {}).items(): + if not isinstance(env_key, str) or not env_key: + raise ValueError("Environment variable names must be non-empty strings") + if not isinstance(env_value, str): + raise ValueError(f"Environment variable '{env_key}' value must be a string") + + @tracer.capture_method + def validate_mcp_runtime_config(self, config: Dict[str, Any]) -> Dict[str, Any]: + use_case_type = config.get("UseCaseType") + if use_case_type != "MCPServer": + raise ValueError(f"Invalid UseCaseType: {use_case_type}. Expected 'MCPServer'") + + mcp_params = config.get("MCPParams") + if not mcp_params: + raise ValueError("MCPParams not found in configuration") + + runtime_params = mcp_params.get("RuntimeParams") + if not runtime_params: + raise ValueError("RuntimeParams not found in MCPParams for runtime deployment") + + # Validate runtime parameters + self.validate_runtime_params(runtime_params) + + return { + "use_case_name": config.get("UseCaseName"), + "use_case_description": config.get("UseCaseDescription"), + "runtime_params": runtime_params, + "ecr_uri": runtime_params.get("EcrUri"), + "environment_variables": runtime_params.get("EnvironmentVariables", {}), + } + + @tracer.capture_method + def get_mcp_runtime_config(self, config_key: str) -> Dict[str, Any]: + config = self.read_mcp_config(config_key) + return self.validate_mcp_runtime_config(config) + + @tracer.capture_method + def write_config(self, config_key: str, item: Dict[str, Any]) -> Dict[str, Any]: + try: + logger.info(f"Writing config to table: {self.table_name}, key: {config_key}") + + # Prepare the item for DynamoDB + ddb_item = {"key": {"S": config_key}, "config": self.serializer.serialize(item)} + + response = self.ddb_client.put_item(TableName=self.table_name, Item=ddb_item) + + logger.info(f"Successfully wrote configuration for key: {config_key}") + return {"success": True, "response": response} + + except Exception as error: + logger.error( + f"Error writing configuration - Table: {self.table_name}, Key: {config_key}, Error: {str(error)}" + ) + raise RuntimeError(f"Failed to write configuration: {str(error)}") + + @tracer.capture_method + def update_gateway_config(self, config_key: str, gateway_result: Dict[str, Any]) -> Dict[str, Any]: + """ + Update the MCP configuration in DynamoDB with new gateway and target information + """ + try: + logger.info(f"Updating MCP config for key: {config_key}") + + # Read the original config from database to get the full structure + original_config = self.read_mcp_config(config_key) + + # Update the original config's GatewayParams with new information + gateway_params = original_config["MCPParams"]["GatewayParams"] + + # Add missing gateway-level information + gateway_params["GatewayId"] = gateway_result.get("GatewayId") + gateway_params["GatewayArn"] = gateway_result.get("GatewayArn") + gateway_params["GatewayUrl"] = gateway_result.get("GatewayUrl") + gateway_params["GatewayName"] = gateway_result.get("GatewayName") + + targets = gateway_result.get("Targets", []) + + # Update TargetParams with TargetId information + if "TargetParams" in gateway_params and isinstance(gateway_params["TargetParams"], list): + target_params = gateway_params["TargetParams"] + + # Create a mapping of target names to target IDs from the result + target_id_map = {} + for target in targets: + target_name = target.get("targetName") + target_id = target.get("targetId") + if target_name and target_id: + target_id_map[target_name] = target_id + + # Update each target with its ID + for target_param in target_params: + target_name = target_param.get("TargetName") + if target_name and target_name in target_id_map: + target_param["TargetId"] = target_id_map[target_name] + logger.info(f"Updated target '{target_name}' with ID: {target_id_map[target_name]}") + + # Write the updated configuration back to the database + result = self.write_config(config_key, original_config) + + logger.info(f"Successfully updated database record for key: {config_key}") + return result + + except Exception as error: + logger.error(f"Error updating MCP configuration: {str(error)}") + # Don't raise here as the gateway was created successfully + # Just log the error and continue + logger.warning("Gateway created successfully but failed to update database record") + return {"success": False, "error": str(error)} + + @tracer.capture_method + def update_runtime_config(self, config_key: str, runtime_result: Dict[str, Any]) -> Dict[str, Any]: + """ + Update the MCP configuration in DynamoDB with runtime information (RuntimeId, RuntimeArn) + """ + try: + logger.info(f"Updating MCP runtime config for key: {config_key}") + + # Read the original config from database to get the full structure + original_config = self.read_mcp_config(config_key) + + # Update the original config's RuntimeParams with new information + runtime_params = original_config["MCPParams"]["RuntimeParams"] + + # Add runtime-level information (generated by AWS) + runtime_params["RuntimeId"] = runtime_result.get("MCPRuntimeId") + runtime_params["RuntimeArn"] = runtime_result.get("MCPRuntimeArn") + runtime_params["RuntimeName"] = runtime_result.get("MCPAgentCoreName") + runtime_params["RuntimeUrl"] = runtime_result.get("MCPRuntimeUrl") + + # EcrUri and EnvironmentVariables should already be in the config from the original request + # but we log them for verification + logger.info(f"Runtime config - EcrUri: {runtime_params.get('EcrUri')}, " + f"EnvVars count: {len(runtime_params.get('EnvironmentVariables', {}))}") + + # Write the updated configuration back to the database + result = self.write_config(config_key, original_config) + + logger.info(f"Successfully updated runtime database record for key: {config_key}") + return result + + except Exception as error: + logger.error(f"Error updating MCP runtime configuration: {str(error)}") + # Don't raise here as the runtime was created successfully + # Just log the error and continue + logger.warning("Runtime created successfully but failed to update database record") + return {"success": False, "error": str(error)} diff --git a/source/lambda/custom-resource/utils/mcp_factory.py b/source/lambda/custom-resource/utils/mcp_factory.py new file mode 100644 index 00000000..12887946 --- /dev/null +++ b/source/lambda/custom-resource/utils/mcp_factory.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Factory pattern implementation for MCP Gateway target creators +""" + +from abc import ABC, abstractmethod +from typing import Dict, Any, List +from aws_lambda_powertools import Logger + +logger = Logger() + + +class MCPTargetCreator(ABC): + + def __init__(self, target_config: Dict[str, Any], schema_bucket_name: str): + self.target_config = target_config + self.target_name = target_config.get("TargetName") + self.target_type = target_config.get("TargetType") + self.schema_uri = target_config.get("SchemaUri") + + # Only create s3_block if schema_uri is provided + if self.schema_uri: + self.s3_block = {"s3": {"uri": f"s3://{schema_bucket_name}/{self.schema_uri.lstrip('/')}"}} + else: + self.s3_block = None + + @abstractmethod + def validate_configuration(self) -> bool: + pass + + @abstractmethod + def create_target_configuration(self) -> Dict[str, Any]: + pass + + def get_target_info(self) -> Dict[str, Any]: + target_info = { + "name": self.target_name, + "type": self.target_type, + "schema_uri": self.schema_uri, + } + + description = self.target_config.get("TargetDescription") + if description: + target_info["description"] = description + + return target_info + + +class MCPGatewayFactory: + + # Registry of target creators by type + target_creators = {} + + @classmethod + def register_target_creator(cls, target_type: str, creator_class): + cls.target_creators[target_type] = creator_class + + @classmethod + def _ensure_default_creators_registered(cls): + """Ensure default creators are registered, avoiding circular imports.""" + if not cls.target_creators: + # Import here to avoid circular imports + from utils.lambda_target_creator import LambdaTargetCreator + from utils.openapi_target_creator import OpenAPITargetCreator + from utils.smithy_target_creator import SmithyTargetCreator + + cls.register_target_creator("lambda", LambdaTargetCreator) + cls.register_target_creator("openApiSchema", OpenAPITargetCreator) + cls.register_target_creator("smithyModel", SmithyTargetCreator) + + @classmethod + def create_target_creator(cls, target_config: Dict[str, Any], schema_bucket_name: str) -> MCPTargetCreator: + # Ensure default creators are registered + cls._ensure_default_creators_registered() + + target_type = target_config.get("TargetType") + + if not target_type: + raise ValueError("Target type is required") + + if target_type not in cls.target_creators: + available_types = list(cls.target_creators.keys()) + raise ValueError(f"Unsupported target type: {target_type}. Available types: {available_types}") + + creator_class = cls.target_creators[target_type] + + return creator_class(target_config, schema_bucket_name) + + @classmethod + def get_supported_target_types(cls) -> List[str]: + # Ensure default creators are registered + cls._ensure_default_creators_registered() + return list(cls.target_creators.keys()) + + +def register_default_creators(): + from utils.lambda_target_creator import LambdaTargetCreator + from utils.openapi_target_creator import OpenAPITargetCreator + from utils.smithy_target_creator import SmithyTargetCreator + + # Default creators are registered when needed to avoid circular imports + MCPGatewayFactory.register_target_creator("lambda", LambdaTargetCreator) + MCPGatewayFactory.register_target_creator("openApiSchema", OpenAPITargetCreator) + MCPGatewayFactory.register_target_creator("smithyModel", SmithyTargetCreator) diff --git a/source/lambda/custom-resource/utils/metrics.py b/source/lambda/custom-resource/utils/metrics.py index a742b690..8707664a 100644 --- a/source/lambda/custom-resource/utils/metrics.py +++ b/source/lambda/custom-resource/utils/metrics.py @@ -39,6 +39,7 @@ def push_builder_metrics(builder_metrics: BuilderMetrics): { "Solution": builder_metrics.solution_id, "Version": builder_metrics.version, + "AccountId": builder_metrics.account_id, "TimeStamp": datetime.now(timezone.utc).strftime(METRICS_TIMESTAMP_FORMAT), "Data": builder_metrics.data, "UUID": builder_metrics.uuid, diff --git a/source/lambda/custom-resource/utils/metrics_payload.py b/source/lambda/custom-resource/utils/metrics_payload.py index ae8facd4..995cbd49 100644 --- a/source/lambda/custom-resource/utils/metrics_payload.py +++ b/source/lambda/custom-resource/utils/metrics_payload.py @@ -103,6 +103,83 @@ def get_cloudwatch_metrics_queries(): ) ] + # Multimodal metrics queries + multimodal_metrics_queries = [ + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED.value})", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED.value}) FROM "{CloudWatchNamespaces.FILE_HANDLING.value}" WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILE_DELETE.value})", + f"""SELECT COUNT({CloudWatchMetrics.FILE_DELETE.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILE_DOWNLOAD.value})", + f"""SELECT COUNT({CloudWatchMetrics.FILE_DOWNLOAD.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement'""", + ), + ( + f"AVG({CloudWatchMetrics.FILE_SIZE.value})", + f"""SELECT AVG({CloudWatchMetrics.FILE_SIZE.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement'""", + ), + # File extensions + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}_gif)", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service, FileExtension) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement' AND FileExtension = 'gif'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}_jpg)", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service, FileExtension) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement' AND FileExtension = 'jpg'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}_jpeg)", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service, FileExtension) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement' AND FileExtension = 'jpeg'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}_png)", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service, FileExtension) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement' AND FileExtension = 'png'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}_webp)", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service, FileExtension) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement' AND FileExtension = 'webp'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}_pdf)", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service, FileExtension) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement' AND FileExtension = 'pdf'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}_csv)", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service, FileExtension) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement' AND FileExtension = 'csv'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}_doc)", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service, FileExtension) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement' AND FileExtension = 'doc'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}_docx)", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service, FileExtension) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement' AND FileExtension = 'docx'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}_xls)", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service, FileExtension) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement' AND FileExtension = 'xls'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}_xlsx)", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service, FileExtension) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement' AND FileExtension = 'xlsx'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}_html)", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service, FileExtension) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement' AND FileExtension = 'html'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}_txt)", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service, FileExtension) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement' AND FileExtension = 'txt'""", + ), + ( + f"COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}_md)", + f"""SELECT COUNT({CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION.value}) FROM SCHEMA("{CloudWatchNamespaces.FILE_HANDLING.value}", UseCaseId, service, FileExtension) WHERE UseCaseId = '{USE_CASE_UUID}' AND service = 'FilesManagement' AND FileExtension = 'md'""", + ), + ] + queries = [] formatted_queries = [] queries += langchain_metrics_queries if METRICS_SERVICE_NAME else [] @@ -112,6 +189,7 @@ def get_cloudwatch_metrics_queries(): queries += cognito_usecase_metrics_queries if USER_POOL_ID and USER_CLIENT_ID else [] queries += bedrock_metrics_queries if METRICS_SERVICE_NAME else [] queries += feedback_metrics_queries if FEEDBACK_ENABLED and USE_CASE_UUID else [] + queries += multimodal_metrics_queries if USE_CASE_UUID else [] for query_label_pair in queries: formatted_queries.append( diff --git a/source/lambda/custom-resource/utils/metrics_schema.py b/source/lambda/custom-resource/utils/metrics_schema.py index 4feea6f3..dc178884 100644 --- a/source/lambda/custom-resource/utils/metrics_schema.py +++ b/source/lambda/custom-resource/utils/metrics_schema.py @@ -25,6 +25,10 @@ class MetricsSchema: "MaxPromptTemplateLength": (float, None), } + multimodal_params_schema = { + "MultimodalEnabled": (bool, None), + } + bedrock_agent_params_schema = {"EnableTrace": (bool, None)} agent_params_schema = {"AgentType": (str, None), "BedrockAgentParams": (Dict, bedrock_agent_params_schema)} @@ -36,6 +40,7 @@ class MetricsSchema: "ModelProvider": (str, None), "BedrockLlmParams": (Dict, bedrock_llm_params_schema), "PromptParams": (Dict, prompt_params_schema), + "MultimodalParams": (Dict, multimodal_params_schema), } knowledge_base_params_schema = {"KnowledgeBaseType": (str, None)} @@ -44,6 +49,38 @@ class MetricsSchema: feedback_params_schema = {"FeedbackEnabled": (bool, None)} + mcp_gateway_params_schema = { + "TargetCount": (int, None), + "TargetParams": (list, None), + } + + mcp_runtime_params_schema = {} + + mcp_params_schema = { + "MCPType": (str, None), + "GatewayParams": (Dict, mcp_gateway_params_schema), + "RuntimeParams": (Dict, mcp_runtime_params_schema), + } + + memory_config_schema = { + "LongTermEnabled": (bool, None), + } + + agent_builder_params_schema = { + "MemoryConfig": (Dict, memory_config_schema), + "BuiltInToolsCount": (int, None), + "BuiltInTools": (list, None), + "MCPServersCount": (int, None), + "MCPServers": (list, None), + } + + workflow_params_schema = { + "OrchestrationPattern": (str, None), + "MemoryConfig": (Dict, memory_config_schema), + "AgentsCount": (int, None), + "Agents": (list, None), + } + metrics_schema = { "NEW_KENDRA_INDEX_CREATED": (str, None), "VPC_ENABLED": (str, None), @@ -53,11 +90,15 @@ class MetricsSchema: "UseCaseType": (str, None), "KnowledgeBaseType": (str, None), "DeployUI": (bool, None), + "ProvisionedConcurrencyValue": (int, None), "LlmParams": (Dict, llm_params_schema), "AgentParams": (Dict, agent_params_schema), "KnowledgeBaseParams": (Dict, knowledge_base_params_schema), "AuthenticationParams": (Dict, authentication_params_schema), "FeedbackParams": (Dict, feedback_params_schema), + "MCPParams": (Dict, mcp_params_schema), + "AgentBuilderParams": (Dict, agent_builder_params_schema), + "WorkflowParams": (Dict, workflow_params_schema), } def __init__(self, data): diff --git a/source/lambda/custom-resource/utils/openapi_target_creator.py b/source/lambda/custom-resource/utils/openapi_target_creator.py new file mode 100644 index 00000000..6e98eefc --- /dev/null +++ b/source/lambda/custom-resource/utils/openapi_target_creator.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import re +from typing import Dict, Any, List +from aws_lambda_powertools import Logger, Tracer +from utils.mcp_factory import MCPTargetCreator + +logger = Logger() +tracer = Tracer() + + +class OpenAPITargetCreator(MCPTargetCreator): + + def __init__(self, target_config: Dict[str, Any], schema_bucket_name: str): + super().__init__(target_config, schema_bucket_name) + + def validate_configuration(self) -> bool: + if not self.target_name or not self.schema_uri: + raise ValueError("TargetName and SchemaUri are required") + return True + + @tracer.capture_method + def create_target_configuration(self) -> Dict[str, Any]: + try: + self.validate_configuration() + + openapi_config = { + "openApiSchema": self.s3_block, + } + + return openapi_config + + except Exception as e: + error_msg = f"Failed to create OpenAPI target configuration: {str(e)}" + logger.error(error_msg) + raise RuntimeError(error_msg) + + def build_credential_provider_configurations(self) -> List[Dict[str, Any]]: + return self.build_openapi_credential_config() + + def build_openapi_credential_config(self) -> List[Dict[str, Any]]: + outbound_auth_params = self.target_config.get("OutboundAuthParams", {}) + auth_type = outbound_auth_params.get("OutboundAuthProviderType") + provider_arn = outbound_auth_params.get("OutboundAuthProviderArn") + + if auth_type == "OAUTH" and provider_arn: + return self.build_oauth_config(outbound_auth_params, provider_arn) + elif auth_type == "API_KEY" and provider_arn: + return self.build_api_key_config(outbound_auth_params, provider_arn) + + raise ValueError( + "OpenAPI targets require OutboundAuthParams with valid OutboundAuthProviderType and OutboundAuthProviderArn" + ) + + def build_oauth_config(self, outbound_auth_params: Dict[str, Any], provider_arn: str) -> List[Dict[str, Any]]: + additional_config = outbound_auth_params.get("AdditionalConfigParams", {}) + oauth_config_params = additional_config.get("OAuthAdditionalConfig", {}) + + custom_parameters = self.convert_custom_parameters(oauth_config_params.get("customParameters", [])) + oauth_provider_config = { + "providerArn": provider_arn, + } + + scopes = oauth_config_params.get("scopes", []) + oauth_provider_config["scopes"] = scopes + + # Add custom parameters if not empty + if custom_parameters: + oauth_provider_config["customParameters"] = custom_parameters + + return [ + { + "credentialProviderType": "OAUTH", + "credentialProvider": {"oauthCredentialProvider": oauth_provider_config}, + } + ] + + def build_api_key_config(self, outbound_auth_params: Dict[str, Any], provider_arn: str) -> List[Dict[str, Any]]: + additional_config = outbound_auth_params.get("AdditionalConfigParams", {}) + api_key_config = additional_config.get("ApiKeyAdditionalConfig", {}) + + api_key_provider_config = { + "providerArn": provider_arn, + **{ + key: value + for key, value in { + "credentialParameterName": api_key_config.get("parameterName"), + "credentialPrefix": api_key_config.get("prefix"), + "credentialLocation": api_key_config.get("location"), + }.items() + if value + }, + } + + return [ + { + "credentialProviderType": "API_KEY", + "credentialProvider": {"apiKeyCredentialProvider": api_key_provider_config}, + } + ] + + def convert_custom_parameters(self, custom_parameters_array: List[Dict[str, Any]]) -> Dict[str, str]: + custom_parameters = {} + if custom_parameters_array: + for param in custom_parameters_array: + if isinstance(param, dict) and "key" in param and "value" in param: + custom_parameters[param["key"]] = param["value"] + return custom_parameters diff --git a/source/lambda/custom-resource/utils/policy_manager.py b/source/lambda/custom-resource/utils/policy_manager.py new file mode 100644 index 00000000..d16e4e25 --- /dev/null +++ b/source/lambda/custom-resource/utils/policy_manager.py @@ -0,0 +1,306 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Policy Manager for MCP Gateway IAM role policies. + +Handles creation, validation, and deletion of IAM inline policies +for MCP Gateway roles based on target configurations. +""" +import os +import json +import urllib.parse +from typing import Dict, Any, List +from aws_lambda_powertools import Logger +from helper import get_service_client + +logger = Logger() +AWS_REGION = os.environ.get("AWS_REGION") + + +class GatewayPolicyManager: + """ + Manages IAM inline policies for MCP Gateway roles. + + Responsibilities: + - Create Lambda invoke policies + - Create OpenAPI authentication policies + - Check for duplicate policies + - Delete custom policies on cleanup + + Note: GatewayPolicyManager only handles IAM inline policies. CDK-managed policies + are attached as managed policies (not inline), so they won't appear in + list_role_policies() and are automatically safe from deletion. + """ + + def __init__(self, role_name: str, agentcore_client): + """ + Initialize the Policy Manager. + + Args: + role_name: Name of the IAM role to manage policies for + """ + + self.role_name = role_name + self.agentcore_client = agentcore_client + + try: + self.iam_client = get_service_client("iam", region_name=AWS_REGION) + except Exception as error: + logger.warning(f"Failed to initialize IAM client: {error}") + self.iam_client = get_service_client("iam", region_name=AWS_REGION) + + def gateway_policy_factory(self, target_type, target): + """ + Factory method to add IAM policies based on target type. + + Args: + target_type: Type of the target (lambda, openApiSchema, etc.) + target: Target configuration dictionary + """ + target_name = target.get("TargetName") + if not target_name: + raise ValueError("TargetName is required in target configuration") + + # Add IAM policies based on target type + if target_type == "lambda": + self.add_lambda_policy(target_name, target["LambdaArn"]) + elif target_type == "openApiSchema": + self._add_openapi_policy_for_target( + target_name=target_name, + outbound_auth_params=target.get("OutboundAuthParams", {}) + ) + + def add_lambda_policy(self, target_name: str, lambda_arn: str) -> None: + """ + Add a policy to allow Lambda function invocation. + + Args: + target_name: Name of the target + lambda_arn: ARN of the Lambda function + + Raises: + RuntimeError: If policy creation fails + """ + try: + policy_name = f"{target_name}-lambda-access-policy" + + policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": ["lambda:InvokeFunction"], + "Resource": [lambda_arn] + } + ] + } + + if self.is_duplicate_policy(policy_document, policy_name): + logger.info(f"Policy {policy_name} already exists, skipping") + return + + self.iam_client.put_role_policy( + RoleName=self.role_name, + PolicyName=policy_name, + PolicyDocument=json.dumps(policy_document) + ) + + logger.info(f"Successfully added Lambda policy {policy_name} to role {self.role_name}") + + except Exception as e: + error_msg = f"Failed to add Lambda policy for {target_name}: {str(e)}" + logger.error(error_msg) + raise RuntimeError(error_msg) + + def add_openapi_policy( + self, + target_name: str, + provider_arn: str, + auth_policy_action: str, + secret_arn: str + ) -> None: + """ + Add a policy to allow OpenAPI target authentication. + + Args: + target_name: Name of the target + provider_arn: ARN of the credential provider + auth_policy_action: IAM action for the auth type (GetResourceOauth2Token or GetResourceApiKey) + secret_arn: ARN of the secret in Secrets Manager + + Raises: + RuntimeError: If policy creation fails + """ + try: + provider_name = provider_arn.split("/")[-1] + policy_name = f"{target_name}-{provider_name}-access-policy" + + policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [auth_policy_action], + "Resource": [ + "/".join(provider_arn.split("/")[0:2]), # Token vault ARN + provider_arn # Credential provider ARN + ] + }, + { + "Effect": "Allow", + "Action": ["secretsmanager:GetSecretValue"], + "Resource": [secret_arn] + } + ] + } + + if self.is_duplicate_policy(policy_document, policy_name): + logger.info(f"Policy {policy_name} already exists, skipping") + return + + self.iam_client.put_role_policy( + RoleName=self.role_name, + PolicyName=policy_name, + PolicyDocument=json.dumps(policy_document) + ) + + logger.info(f"Successfully added OpenAPI policy {policy_name} to role {self.role_name}") + + except Exception as e: + error_msg = f"Failed to add OpenAPI policy for {target_name}: {str(e)}" + logger.error(error_msg) + raise RuntimeError(error_msg) + + def is_duplicate_policy(self, policy_document: Dict[str, Any], policy_name: str) -> bool: + """ + Check if a policy with the same name and content already exists. + + Args: + policy_document: The policy document to compare + policy_name: Name of the policy + + Returns: + True if policy exists with identical content, False otherwise + """ + try: + existing_policy = self.iam_client.get_role_policy( + RoleName=self.role_name, + PolicyName=policy_name + ) + + # Boto3 typically returns PolicyDocument as a dict, but handle both cases + policy_doc = existing_policy['PolicyDocument'] + + if isinstance(policy_doc, dict): + existing_doc = policy_doc + elif isinstance(policy_doc, str): + # Fallback: handle URL-encoded JSON string (older boto3 versions or edge cases) + existing_doc = json.loads(urllib.parse.unquote(policy_doc)) + else: + logger.warning(f"Unexpected PolicyDocument type: {type(policy_doc)}") + return False + + # Compare with new policy_document + if existing_doc == policy_document: + logger.debug(f"Policy {policy_name} already exists with identical permissions") + return True + else: + logger.info(f"Policy {policy_name} exists but differs, will update") + return False + + except self.iam_client.exceptions.NoSuchEntityException: + logger.debug(f"Policy {policy_name} does not exist") + return False + except Exception as e: + logger.warning(f"Error checking for duplicate policy {policy_name}: {e}") + return False + + def destroy_all_custom_policies(self) -> None: + """ + Delete all inline policies from the role. + + This is called during stack deletion to clean up policies created + by the custom resource. CDK-managed policies are safe because they + are managed policies (not inline) and won't appear in this list. + """ + try: + # List all inline policies on the role + response = self.iam_client.list_role_policies(RoleName=self.role_name) + policy_names = response.get("PolicyNames", []) + + if not policy_names: + logger.info(f"No inline policies to delete for role {self.role_name}") + return + + logger.info(f"Deleting {len(policy_names)} inline policies from role {self.role_name}") + + for policy_name in policy_names: + try: + self.iam_client.delete_role_policy( + RoleName=self.role_name, + PolicyName=policy_name + ) + logger.info(f"Deleted policy: {policy_name}") + except self.iam_client.exceptions.NoSuchEntityException: + logger.warning(f"Policy {policy_name} already deleted") + except Exception as e: + logger.error(f"Failed to delete policy {policy_name}: {e}") + # Continue deleting other policies even if one fails + + logger.info(f"Completed policy cleanup for role {self.role_name}") + + except Exception as e: + error_msg = f"Failed to destroy policies for role {self.role_name}: {str(e)}" + logger.error(error_msg) + # Don't raise - we want stack deletion to proceed even if policy cleanup fails + logger.warning("Continuing with stack deletion despite policy cleanup failure") + + + + def _add_openapi_policy_for_target( + self, + target_name: str, + outbound_auth_params: Dict[str, Any] + ) -> None: + """ + Add IAM policy for OpenAPI target by fetching credential provider details. + + This method handles AgentCore operations to get secret ARNs, then delegates + to GatewayPolicyManager for IAM policy creation. + + Args: + target_name: Name of the target + outbound_auth_params: Authentication parameters containing provider ARN and type + """ + auth_type = outbound_auth_params.get("OutboundAuthProviderType") + provider_arn = outbound_auth_params.get("OutboundAuthProviderArn") + if not provider_arn: + raise ValueError("OutboundAuthProviderArn is required") + provider_name = provider_arn.split("/")[-1] + + try: + # Fetch credential provider details from AgentCore + if auth_type == "OAUTH": + auth_policy_action = "bedrock-agentcore:GetResourceOauth2Token" + response = self.agentcore_client.get_oauth2_credential_provider(name=provider_name) + secret_arn = response["clientSecretArn"]["secretArn"] + else: # API_KEY + auth_policy_action = "bedrock-agentcore:GetResourceApiKey" + response = self.agentcore_client.get_api_key_credential_provider(name=provider_name) + secret_arn = response["apiKeySecretArn"]["secretArn"] + + # Delegate IAM policy creation to GatewayPolicyManager + self.add_openapi_policy( + target_name=target_name, + provider_arn=provider_arn, + auth_policy_action=auth_policy_action, + secret_arn=secret_arn + ) + + except Exception as e: + logger.error(f"Failed to add OpenAPI policy for target {target_name}: {e}") + raise + + diff --git a/source/lambda/custom-resource/utils/runtime_mcp.py b/source/lambda/custom-resource/utils/runtime_mcp.py new file mode 100644 index 00000000..afe9fd9e --- /dev/null +++ b/source/lambda/custom-resource/utils/runtime_mcp.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os +from typing import Dict, Any +from aws_lambda_powertools import Logger, Tracer +from utils.agentcore_mcp import AgentcoreMCP +from operations.shared import retry_with_backoff + +logger = Logger() +tracer = Tracer() +AWS_REGION = os.environ.get("AWS_REGION") + + +class RuntimeMCP(AgentcoreMCP): + """ + MCP Runtime resource manager. + + Handles the lifecycle of MCP runtimes including creation, updates, and deletion + of containerized MCP server runtimes. + """ + + def __init__( + self, + config: Dict[str, Any], + cognito_user_pool_id, + runtime_name, + execution_role_arn, + table_name, + config_key, + runtime_id=None, + ): + """ + Initialize the Runtime MCP manager. + + Args: + config: Runtime configuration from MCPConfigManager + cognito_user_pool_id: Cognito user pool ID for authentication + runtime_name: Name of the runtime + execution_role_arn: IAM role ARN for runtime execution + table_name: DynamoDB table name for config storage + config_key: Config record key in DynamoDB + allowed_clients: List of allowed client IDs + runtime_id: Optional runtime identifier for updates/deletes + """ + super().__init__(config, cognito_user_pool_id) + self.runtime_name = runtime_name + self.execution_role_arn = execution_role_arn + self.table_name = table_name + self.config_key = config_key + self.runtime_id = runtime_id + self.runtime_arn = None + + @property + def mcp_image_uri(self): + """Get and validate ECR image URI from config.""" + ecr_uri = self.config.get("ecr_uri") + if not ecr_uri: + raise ValueError("EcrUri not found in MCP runtime configuration") + + # Validate ECR URI format - must include a tag + ecr_uri = ecr_uri.strip() + if ":" not in ecr_uri.split("/")[-1]: + raise ValueError( + f"Invalid ECR URI format: '{ecr_uri}'. ECR URI must include a tag (e.g., :latest, :v1.0.0)" + ) + return ecr_uri + + @property + def environment_variables(self): + """Build runtime environment variables.""" + custom_env_vars = self.config.get("environment_variables", {}) + + # Base environment variables + env_vars = { + "USE_CASE_CONFIG_TABLE_NAME": self.table_name, + "USE_CASE_CONFIG_RECORD_KEY": self.config_key, + } + + # Add custom environment variables + env_vars.update(custom_env_vars) + + return env_vars + + @property + def base_runtime_params(self): + """Build base runtime parameters shared between create and update.""" + return { + "agentRuntimeArtifact": {"containerConfiguration": {"containerUri": self.mcp_image_uri}}, + "roleArn": self.execution_role_arn, + "networkConfiguration": {"networkMode": "PUBLIC"}, + "protocolConfiguration": {"serverProtocol": "MCP"}, + "environmentVariables": self.environment_variables, + **( + {} + if not self.config.get("use_case_description") + else {"description": self.config.get("use_case_description")} + ), + **self.base_auth_config, + } + + @property + def create_runtime_params(self): + """Build parameters for runtime creation.""" + params = self.base_runtime_params + params["agentRuntimeName"] = self.runtime_name + return params + + @property + def update_runtime_params(self): + """Build parameters for runtime update.""" + params = self.base_runtime_params + params["agentRuntimeId"] = self.runtime_id + return params + + @property + def runtime_url(self): + if self.runtime_arn: + encoded_arn = self.runtime_arn.replace(":", "%3A").replace("/", "%2F") + return f"https://bedrock-agentcore.{AWS_REGION}.amazonaws.com/runtimes/{encoded_arn}/invocations?qualifier=DEFAULT" + + @tracer.capture_method + def create(self): + """ + Create MCP runtime. + + Raises: + RuntimeError: If runtime creation fails + """ + try: + logger.info(f"Creating MCP runtime: {self.runtime_name}") + + response = retry_with_backoff(self.agentcore_client.create_agent_runtime, **self.create_runtime_params) + + self.runtime_id = response.get("agentRuntimeId") + self.runtime_arn = response.get("agentRuntimeArn") + + if not self.runtime_id or not self.runtime_arn: + raise ValueError( + "Runtime creation response missing required fields (agentRuntimeId or agentRuntimeArn)" + ) + + logger.info(f"Runtime created successfully - ID: {self.runtime_id}, ARN: {self.runtime_arn}") + + except Exception as e: + error_msg = f"Failed to create MCP runtime: {str(e)}" + logger.error(error_msg) + raise RuntimeError(error_msg) + + @tracer.capture_method + def update(self): + """ + Update MCP runtime. + + Raises: + RuntimeError: If runtime update fails + """ + try: + if not self.runtime_id: + raise ValueError("Runtime ID is required for update operation") + response = retry_with_backoff(self.agentcore_client.get_agent_runtime, agentRuntimeId=self.runtime_id) + self.allowed_clients = response["authorizerConfiguration"]["customJWTAuthorizer"]["allowedClients"] + logger.info(f"Updating MCP runtime: {self.runtime_id}") + + response = retry_with_backoff(self.agentcore_client.update_agent_runtime, **self.update_runtime_params) + + self.runtime_arn = response.get("agentRuntimeArn") + + logger.info(f"Runtime updated successfully - ID: {self.runtime_id}") + + except Exception as e: + error_msg = f"Failed to update MCP runtime {self.runtime_id}: {str(e)}" + logger.error(error_msg) + raise RuntimeError(error_msg) + + @tracer.capture_method + def delete(self): + """ + Delete MCP runtime. + + Raises: + RuntimeError: If runtime deletion fails + """ + try: + if self.runtime_id == "unknown": + logger.error("Runtime ID is required for delete operation") + else: + logger.info(f"Deleting MCP runtime: {self.runtime_id}") + retry_with_backoff(self.agentcore_client.delete_agent_runtime, agentRuntimeId=self.runtime_id) + logger.info(f"Runtime deleted successfully - ID: {self.runtime_id}") + + except Exception as e: + error_msg = f"Failed to delete MCP runtime {self.runtime_id}: {str(e)}" + logger.error(error_msg) + raise RuntimeError(error_msg) + + def to_dict(self): + """Convert runtime details to dictionary for CloudFormation response.""" + return { + "MCPRuntimeId": self.runtime_id, + "MCPRuntimeArn": self.runtime_arn, + "MCPAgentCoreName": self.runtime_name, + "MCPRuntimeUrl": self.runtime_url, + } diff --git a/source/lambda/custom-resource/utils/smithy_target_creator.py b/source/lambda/custom-resource/utils/smithy_target_creator.py new file mode 100644 index 00000000..3aec54e9 --- /dev/null +++ b/source/lambda/custom-resource/utils/smithy_target_creator.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import re +from typing import Dict, Any, List +from aws_lambda_powertools import Logger, Tracer +from utils.mcp_factory import MCPTargetCreator + +logger = Logger() +tracer = Tracer() + + +class SmithyTargetCreator(MCPTargetCreator): + + def __init__(self, target_config: Dict[str, Any], schema_bucket_name: str): + super().__init__(target_config, schema_bucket_name) + + def validate_configuration(self) -> bool: + if not self.target_name or not self.schema_uri: + raise ValueError("TargetName and SchemaUri are required") + return True + + @tracer.capture_method + def create_target_configuration(self) -> Dict[str, Any]: + try: + self.validate_configuration() + + smithy_config = {"smithyModel": self.s3_block} + + return smithy_config + + except Exception as e: + error_msg = f"Failed to create Smithy target configuration: {str(e)}" + logger.error(error_msg) + raise RuntimeError(error_msg) + + def build_credential_provider_configurations(self) -> List[Dict[str, Any]]: + return [{"credentialProviderType": "GATEWAY_IAM_ROLE"}] diff --git a/source/lambda/ext-idp-group-mapper/poetry.lock b/source/lambda/ext-idp-group-mapper/poetry.lock index ba36f371..0b1c9aeb 100644 --- a/source/lambda/ext-idp-group-mapper/poetry.lock +++ b/source/lambda/ext-idp-group-mapper/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "aws-lambda-powertools" @@ -47,27 +47,27 @@ wrapt = "*" [[package]] name = "boto3" -version = "1.40.15" +version = "1.40.53" description = "The AWS SDK for Python" optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "boto3-1.40.15-py3-none-any.whl", hash = "sha256:52b8aa78c9906c4e49dcec6817c041df33c9825073bf66e7df8fc00afbe47b4b"}, - {file = "boto3-1.40.15.tar.gz", hash = "sha256:271b379ce5ad35ca82f1009e917528a182eed0e2de197ccffb0c51acadec5c79"}, + {file = "boto3-1.40.53-py3-none-any.whl", hash = "sha256:65ded2738de259bd9030feb4772ec7b53d5b661befa88ce836117c3df8265309"}, + {file = "boto3-1.40.53.tar.gz", hash = "sha256:3f8cf56034cfde20dd0abca01349f64ab65734d90c3fbf7357e8a84cb64a62ee"}, ] [package.dependencies] -botocore = ">=1.40.15,<1.41.0" +botocore = ">=1.40.53,<1.41.0" jmespath = ">=0.7.1,<2.0.0" -s3transfer = ">=0.13.0,<0.14.0" +s3transfer = ">=0.14.0,<0.15.0" [package.extras] crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-layer" -version = "3.0.7" +version = "4.0.0" description = "Layer for AWS Boto3 python SDK" optional = false python-versions = "^3.13" @@ -76,8 +76,8 @@ files = [] develop = true [package.dependencies] -boto3 = "1.40.15" -botocore = "1.40.15" +boto3 = "1.40.53" +botocore = "1.40.53" urllib3 = "2.5.0" [package.source] @@ -86,14 +86,14 @@ url = "../layers/aws_boto3" [[package]] name = "botocore" -version = "1.40.15" +version = "1.40.53" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "botocore-1.40.15-py3-none-any.whl", hash = "sha256:b364e039d2b67e509cfb089cb39b295251e48a60cc68fd591defbe10b44d83f9"}, - {file = "botocore-1.40.15.tar.gz", hash = "sha256:4960800e4c5a7b43db22550979c22f5a324cbaf75ef494bbb2cf400ef1e6aca7"}, + {file = "botocore-1.40.53-py3-none-any.whl", hash = "sha256:840322b0af4be7a6e2effddb4eb388053c25af0618f627f37d8b03cc1edbc928"}, + {file = "botocore-1.40.53.tar.gz", hash = "sha256:4ebb9e6648c4896d3f0cdda5ff30b5de9a83aeb591be89a16f98cc5ee3cd371c"}, ] [package.dependencies] @@ -462,7 +462,7 @@ test-randomorder = ["pytest-randomly"] [[package]] name = "custom-boto3-init" -version = "3.0.7" +version = "4.0.0" description = "Initialize boto config for AWS Python SDK with custom configuration" optional = false python-versions = "^3.13" @@ -910,14 +910,14 @@ tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asy [[package]] name = "s3transfer" -version = "0.13.1" +version = "0.14.0" description = "An Amazon S3 Transfer Manager" optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "s3transfer-0.13.1-py3-none-any.whl", hash = "sha256:a981aa7429be23fe6dfc13e80e4020057cbab622b08c0315288758d67cabc724"}, - {file = "s3transfer-0.13.1.tar.gz", hash = "sha256:c3fdba22ba1bd367922f27ec8032d6a1cf5f10c934fb5d68cf60fd5a23d936cf"}, + {file = "s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456"}, + {file = "s3transfer-0.14.0.tar.gz", hash = "sha256:eff12264e7c8b4985074ccce27a3b38a485bb7f7422cc8046fee9be4983e4125"}, ] [package.dependencies] diff --git a/source/lambda/ext-idp-group-mapper/pyproject.toml b/source/lambda/ext-idp-group-mapper/pyproject.toml index d4632667..e8a65701 100644 --- a/source/lambda/ext-idp-group-mapper/pyproject.toml +++ b/source/lambda/ext-idp-group-mapper/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "ext-idp-group-mapper" -version = "3.0.7" +version = "4.0.0" authors = [ "Amazon Web Services" ] description = "Lambda implementation to change Cognito user groups to External Identity Provider groups in the JWT token" packages = [ diff --git a/source/lambda/feedback-management/package-lock.json b/source/lambda/feedback-management/package-lock.json index 0b930f39..30cbd776 100644 --- a/source/lambda/feedback-management/package-lock.json +++ b/source/lambda/feedback-management/package-lock.json @@ -1,12 +1,12 @@ { "name": "@amzn/feedback-management", - "version": "3.0.7", + "version": "4.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@amzn/feedback-management", - "version": "3.0.7", + "version": "4.0.0", "license": "Apache-2.0", "dependencies": { "jsonpath-plus": "^10.3.0", @@ -26,7 +26,7 @@ "eslint": "^9.16.0", "jest": "^29.7.0", "lodash": "^4.17.21", - "prettier": "^3.4.2", + "prettier": "^3.6.2", "ts-jest": "^29.2.5", "ts-node": "^10.9.2", "typescript": "^5.7.2" @@ -870,9 +870,9 @@ } }, "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, "license": "MIT", "dependencies": { @@ -4166,9 +4166,9 @@ "license": "MIT" }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -4841,11 +4841,10 @@ } }, "node_modules/prettier": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.3.tgz", - "integrity": "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==", + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", "dev": true, - "license": "MIT", "bin": { "prettier": "bin/prettier.cjs" }, diff --git a/source/lambda/feedback-management/package.json b/source/lambda/feedback-management/package.json index 9758b578..a760523b 100644 --- a/source/lambda/feedback-management/package.json +++ b/source/lambda/feedback-management/package.json @@ -1,6 +1,6 @@ { "name": "@amzn/feedback-management", - "version": "3.0.7", + "version": "4.0.0", "description": "This lambda supports backs the API to process chat response feedbacks", "main": "index.ts", "scripts": { @@ -13,7 +13,7 @@ "code-linter-js": "./node_modules/eslint/bin/eslint.js lambda --ext .js", "code-linter-ts": "./node_modules/eslint/bin/eslint.js bin lib --ext .ts", "code-linter": "npm run code-linter-ts && npm run code-linter-js", - "code-formatter": "./node_modules/prettier/bin-prettier.js --config .prettierrc.yml '**/*.ts' '**/*.js' --write" + "code-formatter": "prettier --config ../../../.prettierrc.yml --ignore-path ../../../.prettierignore --write '**/*.{js,ts,json,css,md}'" }, "author": { "name": "Amazon Web Services", @@ -33,7 +33,7 @@ "eslint": "^9.16.0", "jest": "^29.7.0", "lodash": "^4.17.21", - "prettier": "^3.4.2", + "prettier": "^3.6.2", "ts-jest": "^29.2.5", "ts-node": "^10.9.2", "typescript": "^5.7.2" diff --git a/source/lambda/feedback-management/services/conversation-retrieval-service.ts b/source/lambda/feedback-management/services/conversation-retrieval-service.ts index 2ef0d2a8..4e87195d 100644 --- a/source/lambda/feedback-management/services/conversation-retrieval-service.ts +++ b/source/lambda/feedback-management/services/conversation-retrieval-service.ts @@ -3,6 +3,7 @@ import { DynamoDBClient, GetItemCommand } from '@aws-sdk/client-dynamodb'; import { unmarshall } from '@aws-sdk/util-dynamodb'; +import { AWSClientManager } from 'aws-sdk-lib'; import { logger, tracer } from '../power-tools-init'; export interface ConversationMessage { @@ -26,10 +27,7 @@ export class ConversationRetrievalService { private readonly dynamoDBClient: DynamoDBClient; constructor(region?: string) { - this.dynamoDBClient = new DynamoDBClient({ - region: region || process.env.AWS_REGION, - maxAttempts: 3 - }); + this.dynamoDBClient = AWSClientManager.getServiceClient('dynamodb', tracer); } /** diff --git a/source/lambda/feedback-management/services/dynamodb-use-case-retriever.ts b/source/lambda/feedback-management/services/dynamodb-use-case-retriever.ts index 60ae30bf..f023a19c 100644 --- a/source/lambda/feedback-management/services/dynamodb-use-case-retriever.ts +++ b/source/lambda/feedback-management/services/dynamodb-use-case-retriever.ts @@ -3,6 +3,7 @@ import { DynamoDBClient, GetItemCommand } from '@aws-sdk/client-dynamodb'; import { unmarshall } from '@aws-sdk/util-dynamodb'; +import { AWSClientManager } from 'aws-sdk-lib'; import { logger, tracer } from '../power-tools-init'; import { TIME_5_MINS } from '../utils/constants'; import { CacheManager } from '../utils/cache-manager'; @@ -15,10 +16,7 @@ export class UseCaseRetriever { private static readonly CACHE_TTL = TIME_5_MINS; constructor(region?: string) { - this.dynamoDBClient = new DynamoDBClient({ - region: region || process.env.AWS_REGION, - maxAttempts: 3 - }); + this.dynamoDBClient = AWSClientManager.getServiceClient('dynamodb', tracer); if (!process.env.USE_CASE_CONFIG_TABLE_NAME) { throw new Error('USE_CASE_CONFIG_TABLE_NAME is required'); diff --git a/source/lambda/feedback-management/services/feedback-storage-service.ts b/source/lambda/feedback-management/services/feedback-storage-service.ts index 49694ba6..8e8e8570 100644 --- a/source/lambda/feedback-management/services/feedback-storage-service.ts +++ b/source/lambda/feedback-management/services/feedback-storage-service.ts @@ -3,6 +3,7 @@ import { S3Client, PutObjectCommand } from '@aws-sdk/client-s3'; import { v4 as uuidv4 } from 'uuid'; +import { AWSClientManager } from 'aws-sdk-lib'; import { logger, metrics, tracer } from '../power-tools-init'; import { FeedbackRequest, EnrichedFeedback } from '../model/data-model'; import { ConfigMappingService } from './config-mapping-service'; @@ -29,9 +30,7 @@ export class FeedbackStorageService { private readonly MESSAGE_CACHE_TTL = 3600000; // 1 hour in milliseconds constructor(options?: FeedbackStorageOptions) { - this.s3Client = new S3Client({ - region: options?.region || process.env.AWS_REGION - }); + this.s3Client = AWSClientManager.getServiceClient('s3', tracer); this.bucketName = options?.bucket || process.env.FEEDBACK_BUCKET_NAME || ''; this.configMappingService = new ConfigMappingService(); this.cacheManager = CacheManager.getInstance(); diff --git a/source/lambda/feedback-management/test/index.test.ts b/source/lambda/feedback-management/test/index.test.ts index e4c20a24..093642ab 100644 --- a/source/lambda/feedback-management/test/index.test.ts +++ b/source/lambda/feedback-management/test/index.test.ts @@ -1,6 +1,10 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn(() => ({ region: 'us-east-1' })) +})); + import { mockClient } from 'aws-sdk-client-mock'; import { DynamoDBClient, GetItemCommand } from '@aws-sdk/client-dynamodb'; import { S3Client, PutObjectCommand } from '@aws-sdk/client-s3'; @@ -32,7 +36,8 @@ jest.mock('../utils/cache-manager', () => { jest.mock('@aws-lambda-powertools/tracer', () => ({ Tracer: jest.fn().mockImplementation(() => ({ - getRootXrayTraceId: jest.fn().mockReturnValue('fake-trace-id') + getRootXrayTraceId: jest.fn().mockReturnValue('fake-trace-id'), + captureAWSv3Client: jest.fn((client) => client) })) })); diff --git a/source/lambda/feedback-management/test/services/conversation-retrieval-service.test.ts b/source/lambda/feedback-management/test/services/conversation-retrieval-service.test.ts index d9b8387f..2e925a48 100644 --- a/source/lambda/feedback-management/test/services/conversation-retrieval-service.test.ts +++ b/source/lambda/feedback-management/test/services/conversation-retrieval-service.test.ts @@ -1,6 +1,10 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn(() => ({ region: 'us-east-1' })) +})); + import { DynamoDBClient, GetItemCommand } from '@aws-sdk/client-dynamodb'; import { mockClient } from 'aws-sdk-client-mock'; import { ConversationRetrievalService } from '../../services/conversation-retrieval-service'; diff --git a/source/lambda/feedback-management/test/services/dynamodb-use-case-retriever.test.ts b/source/lambda/feedback-management/test/services/dynamodb-use-case-retriever.test.ts index b5fa9769..9983261e 100644 --- a/source/lambda/feedback-management/test/services/dynamodb-use-case-retriever.test.ts +++ b/source/lambda/feedback-management/test/services/dynamodb-use-case-retriever.test.ts @@ -1,6 +1,10 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn(() => ({ region: 'us-east-1' })) +})); + import { DynamoDBClient, GetItemCommand } from '@aws-sdk/client-dynamodb'; import { UseCaseRetriever } from '../../services/dynamodb-use-case-retriever'; import { mockClient } from 'aws-sdk-client-mock'; diff --git a/source/lambda/feedback-management/test/services/feedback-storage-service.test.ts b/source/lambda/feedback-management/test/services/feedback-storage-service.test.ts index 0ebdee00..b3f61076 100644 --- a/source/lambda/feedback-management/test/services/feedback-storage-service.test.ts +++ b/source/lambda/feedback-management/test/services/feedback-storage-service.test.ts @@ -1,6 +1,10 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn(() => ({ region: 'us-east-1' })) +})); + import { S3Client, PutObjectCommand } from '@aws-sdk/client-s3'; import { FeedbackStorageService } from '../../services/feedback-storage-service'; import { mockClient } from 'aws-sdk-client-mock'; diff --git a/source/lambda/feedback-management/tsconfig.json b/source/lambda/feedback-management/tsconfig.json index a3cfd760..d95f6a60 100644 --- a/source/lambda/feedback-management/tsconfig.json +++ b/source/lambda/feedback-management/tsconfig.json @@ -30,6 +30,9 @@ "moduleResolution": "node", "rootDir": ".", "paths": { + "aws-sdk-lib": [ + "../layers/aws-sdk-lib/dist" + ], "aws-node-user-agent-config": [ "../layers/aws-node-user-agent-config/dist" ], @@ -87,6 +90,6 @@ "@smithy/types": [ "../layers/aws-sdk-lib/node_modules/@smithy/types" ] - }, + } } } \ No newline at end of file diff --git a/source/lambda/feedback-management/utils/http-response-formatters.ts b/source/lambda/feedback-management/utils/http-response-formatters.ts index 2049fb83..d874da8a 100644 --- a/source/lambda/feedback-management/utils/http-response-formatters.ts +++ b/source/lambda/feedback-management/utils/http-response-formatters.ts @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 /** - * Utility function to convert any success response into a Http 200/201 response with the + * Utility function to convert any success response into a Http 200/201 response with * proper formatting and headers. * * @param {any} body Response message. This will be stringified and inserted into 'body' diff --git a/source/lambda/files-management/index.ts b/source/lambda/files-management/index.ts new file mode 100644 index 00000000..88cd7014 --- /dev/null +++ b/source/lambda/files-management/index.ts @@ -0,0 +1,115 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { APIGatewayProxyEvent, APIGatewayProxyResult } from 'aws-lambda'; +import { injectLambdaContext } from '@aws-lambda-powertools/logger/middleware'; +import { logMetrics } from '@aws-lambda-powertools/metrics/middleware'; +import { captureLambdaHandler } from '@aws-lambda-powertools/tracer/middleware'; +import { logger, tracer, metrics } from './power-tools-init'; +import { FileOperationTypes, AMZN_TRACE_ID_HEADER } from './utils/constants'; +import { checkEnv, extractUserId, setupMetricsDimensions } from './utils/utils'; +import { FileValidator } from './validators/file-validator'; +import { FileRequestFactory } from './models/files-factory'; +import { formatResponse, formatError } from './utils/http-response-formatters'; +import { FileUploadCommand, FileDeleteCommand, FileGetCommand, FileCommand } from './models/file-command'; +import middy from '@middy/core'; + +// Route mapping for file operations +export const fileRouteMap = new Map([ + ['POST:/files/{useCaseId}', FileOperationTypes.UPLOAD], + ['DELETE:/files/{useCaseId}', FileOperationTypes.DELETE], + ['GET:/files/{useCaseId}', FileOperationTypes.DOWNLOAD] +]); + +export const fileCommandsRegistry = new Map([ + [FileOperationTypes.UPLOAD, new FileUploadCommand()], + [FileOperationTypes.DELETE, new FileDeleteCommand()], + [FileOperationTypes.DOWNLOAD, new FileGetCommand()] +]); + +/** + * Gets the file operation type from the API Gateway event + * @param event - API Gateway event + * @returns The operation type string + */ +export const getFileOperation = (event: APIGatewayProxyEvent): FileOperationTypes => { + const routeKey = `${event.httpMethod}:${event.resource}`; + const operation = fileRouteMap.get(routeKey); + + if (!operation) { + logger.error(`Unsupported operation - HTTP method: ${event.httpMethod}, resource: ${event.resource}`); + throw new Error(`Unsupported operation: ${event.httpMethod} ${event.resource}`); + } + + if (!Object.values(FileOperationTypes).includes(operation)) { + logger.error(`Invalid operation type: ${operation}`); + throw new Error(`Unsupported operation: ${operation}`); + } + + return operation; +}; + +/** + * Main Lambda handler for file operations + */ +export const filesHandler = async (event: APIGatewayProxyEvent): Promise => { + tracer.getSegment(); + + let useCaseId: string; + const rootTraceId = tracer.getRootXrayTraceId(); + const errorMessage = `Internal Error - Please contact support and quote the following trace id: ${rootTraceId}`; + + try { + checkEnv(); + + logger.info( + `Processing file operation request - httpMethod: ${event.httpMethod}, path: ${event.path}, resource: ${event.resource}, traceId: ${rootTraceId}` + ); + + const fileOperation = getFileOperation(event); + const userId = extractUserId(event); + + // Parse request and extract useCaseId and useCaseRecordKey for metrics and validation + const request = FileRequestFactory.createRequest(event, fileOperation); + useCaseId = (request).useCaseId; + + // Validate that multimodality is enabled for this use case + const fileValidator = new FileValidator(); + await fileValidator.validateMultimodalCapability(useCaseId); + + setupMetricsDimensions(useCaseId); + + const command = fileCommandsRegistry.get(fileOperation); + if (!command) { + logger.error(`No command found for operation: ${fileOperation}, traceId: ${rootTraceId}`); + throw new Error(`Unsupported operation: ${fileOperation}`); + } + + const response = await command.execute(request, userId); + return formatResponse(response, 200, { [AMZN_TRACE_ID_HEADER]: rootTraceId as string }); + } catch (error) { + if (error instanceof Error) { + logger.error( + `File operation failed: ${error.message}, traceId: ${rootTraceId}, useCaseId: ${useCaseId!}, errorStack: ${error.stack}` + ); + } else { + logger.error(`File operation failed with unknown error, traceId: ${rootTraceId}, useCaseId: ${useCaseId!}`); + } + + return formatError({ + message: errorMessage, + extraHeaders: { [AMZN_TRACE_ID_HEADER]: rootTraceId as string } + }); + } finally { + metrics.publishStoredMetrics(); + } +}; + +/** + * Middy-wrapped handler with powertools middleware + */ +export const handler = middy(filesHandler).use([ + captureLambdaHandler(tracer), + injectLambdaContext(logger), + logMetrics(metrics) +]); diff --git a/source/lambda/files-management/jest.config.js b/source/lambda/files-management/jest.config.js new file mode 100644 index 00000000..318ba48d --- /dev/null +++ b/source/lambda/files-management/jest.config.js @@ -0,0 +1,26 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +module.exports = { + modulePaths: [ + '/../layers/', + '/../layers/aws-sdk-lib/node_modules/', + '/../layers/aws-node-user-agent-config/', + '/../layers/aws-node-user-agent-config/node_modules/' + ], + testMatch: ['**/*.test.ts'], + modulePathIgnorePatterns: ['/dist/'], + collectCoverage: true, + collectCoverageFrom: ['**/*.ts', '!**/test/*.ts', '!dist/'], + coverageReporters: ['text', ['lcov', { projectRoot: '../../../' }]], + preset: 'ts-jest', + testEnvironment: 'node', + // Limit concurrency to prevent worker exhaustion + maxWorkers: 2, + // Set reasonable timeouts + testTimeout: 30000, + // Force exit to prevent hanging processes + forceExit: true, + // Detect open handles to identify resource leaks + detectOpenHandles: true +}; diff --git a/source/lambda/files-management/models/file-command.ts b/source/lambda/files-management/models/file-command.ts new file mode 100644 index 00000000..65c27589 --- /dev/null +++ b/source/lambda/files-management/models/file-command.ts @@ -0,0 +1,249 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { MetricUnit } from '@aws-lambda-powertools/metrics'; +import { logger, tracer, metrics } from '../power-tools-init'; +import { CloudWatchMetrics, FileStatus } from '../utils/constants'; +import { + FileUploadRequest, + FileDeleteRequest, + FileGetRequest, + FileUploadResponse, + FileDeleteResponse, + FileGetResponse, + MultimodalUploadParams, + PresignedPostResponse +} from './types'; +import { S3PresignedUrlService } from '../services/s3-presigned-url-service'; +import { MetadataService } from '../services/ddb-metadata-service'; +import { validateFileUploadRequest, validateFileDeleteRequest } from '../validators/request-validators'; +import { extractFileInfo } from '../utils/utils'; + +/** + * Abstract base class for file management commands + */ +export abstract class FileCommand { + protected s3Service: S3PresignedUrlService; + protected metadataService: MetadataService; + + constructor() { + this.s3Service = new S3PresignedUrlService(); + this.metadataService = new MetadataService(); + } + + /** + * Execute method that must be implemented by concrete commands + */ + abstract execute(operation: any, userId: string): Promise; +} + +/** + * Command to handle file upload operations + */ +export class FileUploadCommand extends FileCommand { + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###fileUploadCommand' }) + public async execute(request: FileUploadRequest, userId: string): Promise { + logger.info( + `Processing file upload request - useCaseId: ${request.useCaseId}, fileCount: ${request.fileNames.length}, userId: ${userId}` + ); + + validateFileUploadRequest(request); + + try { + logger.info(`Processing ${request.fileNames.length} files`); + + // Process each file individually in parallel + const fileResults = await Promise.allSettled( + request.fileNames.map(async (fileName) => { + try { + const fileInfo = extractFileInfo(fileName); + + const params: MultimodalUploadParams = { + fileName: fileInfo.fileName, + userId, + contentType: fileInfo.contentType, + fileExtension: fileInfo.fileExtension, + useCaseId: request.useCaseId, + conversationId: request.conversationId, + messageId: request.messageId + }; + const s3Result = await this.s3Service.createFileUploadPresignedPost(params); + + // if generating presigned url succeeds, create metadata + await this.metadataService.createFileMetadata( + s3Result.fileKey, + s3Result.fileName, + s3Result.fileExtension, + s3Result.fileContentType, + s3Result.fileUuid + ); + + return { + uploadUrl: s3Result.uploadUrl, + formFields: s3Result.formFields, + fileName: s3Result.fileName, + expiresIn: s3Result.expiresIn, + createdAt: s3Result.createdAt, + error: s3Result.error || null + } as PresignedPostResponse; + } catch (error) { + logger.warn(`Failed to process file ${fileName}: ${(error as Error).message}`); + return { + uploadUrl: '', + formFields: {}, + fileName, + expiresIn: 0, + createdAt: new Date().toISOString(), + error: (error as Error).message + } as PresignedPostResponse; + } + }) + ); + + // Convert Promise.allSettled results to uploads array + const uploads = fileResults.map((result, index) => { + if (result.status === 'fulfilled') { + return result.value; + } else { + logger.error(`Unexpected rejection for file ${request.fileNames[index]}: ${result.reason}`); + return { + uploadUrl: '', + formFields: {}, + fileName: request.fileNames[index], + expiresIn: 0, + createdAt: new Date().toISOString(), + error: 'Failed due to unexpected error.' + } as PresignedPostResponse; + } + }); + + const successCount = uploads.filter((upload) => !upload.error).length; + const failureCount = uploads.filter((upload) => upload.error).length; + + metrics.addMetric(CloudWatchMetrics.FILE_UPLOAD_TRIGGERED, MetricUnit.Count, successCount); + if (failureCount > 0) { + metrics.addMetric(CloudWatchMetrics.FILE_UPLOAD_FAILURE, MetricUnit.Count, failureCount); + } + + logger.info( + `File upload processing completed - useCaseId: ${request.useCaseId}, userId: ${userId}, conversationId: ${request.conversationId}, successful: ${successCount}, failed: ${failureCount}` + ); + + return { uploads } as FileUploadResponse; + } catch (error) { + metrics.addMetric(CloudWatchMetrics.FILE_UPLOAD_FAILURE, MetricUnit.Count, 1); + logger.error( + `Failed to process file upload request for useCaseId: ${request.useCaseId}, userId: ${userId}, error: ${(error as Error).message}` + ); + throw error; + } + } +} + +/** + * Command to handle file deletion operations + */ +export class FileDeleteCommand extends FileCommand { + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###fileDeleteCommand' }) + public async execute(request: FileDeleteRequest, userId: string): Promise { + logger.info( + `Processing file delete request - useCaseId: ${request.useCaseId}, fileCount: ${request.fileNames.length}, userId: ${userId}` + ); + + validateFileDeleteRequest(request); + + // Construct fileKey with userId from the request context + const fileKey = `${request.useCaseId}/${userId}/${request.conversationId}/${request.messageId}`; + + try { + const fileKeys = request.fileNames.map((fileName) => ({ + fileKey, + fileName + })); + + // Delete multiple files parallely - mark as deleted and update TTL + const deletionResults = await this.metadataService.deleteMultipleFiles(fileKeys); + + const allSuccessful = deletionResults.every((result) => result.success); + const successfulDeletions = deletionResults.filter((result) => result.success).length; + const failureCount = deletionResults.filter((result) => !result.success).length; + + if (successfulDeletions > 0) { + metrics.addMetric(CloudWatchMetrics.FILE_DELETE, MetricUnit.Count, successfulDeletions); + } + if (failureCount > 0) { + metrics.addMetric(CloudWatchMetrics.FILE_ACCESS_FAILURES, MetricUnit.Count, failureCount); + } + logger.info( + `File deletion completed for useCaseId: ${request.useCaseId}, userId: ${userId}, conversationId: ${request.conversationId} - successful: ${successfulDeletions}, failed: ${failureCount}` + ); + + return { + deletions: deletionResults, + allSuccessful, + failureCount + }; + } catch (error) { + metrics.addMetric(CloudWatchMetrics.FILE_ACCESS_FAILURES, MetricUnit.Count, 1); + + logger.error( + `Failed to process file delete request for useCaseId: ${request.useCaseId}, userId: ${userId}, conversationId: ${request.conversationId}, error: ${(error as Error).message}` + ); + throw error; + } + } +} + +/** + * Command to handle file download/get operations + */ +export class FileGetCommand extends FileCommand { + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###fileGetCommand' }) + public async execute(request: FileGetRequest, userId: string): Promise { + logger.info( + `Processing file get request - useCaseId: ${request.useCaseId}, fileName: ${request.fileName}, userId: ${userId}` + ); + + // Construct fileKey with userId from the request context + const fileKey = `${request.useCaseId}/${userId}/${request.conversationId}/${request.messageId}`; + + try { + // Get file metadata to verify file exists and is accessible + const metadata = await this.metadataService.getExistingMetadataRecord(fileKey, request.fileName); + let errorMsg; + + if (!metadata) { + errorMsg = `File not found.`; + throw new Error(errorMsg); + } + + if (metadata.status !== FileStatus.UPLOADED) { + errorMsg = `File status is '${metadata.status}'. File cannot be retrieved.`; + throw new Error(errorMsg); + } + + const s3Key = `${request.useCaseId}/${userId}/${request.conversationId}/${request.messageId}/${metadata.fileUuid}.${metadata.fileExtension}`; + + const downloadUrl = await this.s3Service.generateDownloadUrl( + s3Key, + metadata.fileName, + metadata.fileContentType + ); + + metrics.addMetric(CloudWatchMetrics.FILE_DOWNLOAD, MetricUnit.Count, 1); + logger.debug( + `Generated download URL for useCaseId: ${request.useCaseId}, userId: ${userId}, fileName: ${request.fileName}` + ); + + return { + downloadUrl + }; + } catch (error) { + metrics.addMetric(CloudWatchMetrics.FILE_ACCESS_FAILURES, MetricUnit.Count, 1); + logger.error( + `Failed to process file get request for useCaseId: ${request.useCaseId}, userId: ${userId}, fileName: ${request.fileName}, error: ${(error as Error).message}` + ); + throw error; + } + } +} diff --git a/source/lambda/files-management/models/files-factory.ts b/source/lambda/files-management/models/files-factory.ts new file mode 100644 index 00000000..aa172ff3 --- /dev/null +++ b/source/lambda/files-management/models/files-factory.ts @@ -0,0 +1,132 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { APIGatewayProxyEvent } from 'aws-lambda'; +import { FileOperationTypes } from '../utils/constants'; +import { FileUploadRequest, FileDeleteRequest, FileGetRequest } from './types'; +import { parseEventBody, extractUseCaseId } from '../utils/utils'; + +/** + * Factory class for creating file operation request objects from API Gateway events + */ +export class FileRequestFactory { + /** + * Validates common required fields for file operations and ensures they are non-empty + * @param data - Object containing the fields to validate + * @param errors - Array to collect validation errors + */ + private static validateCommonFields(data: any, errors: string[]): void { + if (!data.conversationId) { + errors.push('conversationId is required'); + } + + if (!data.messageId) { + errors.push('messageId is required'); + } + } + + /** + * Creates the appropriate file request object based on the operation type + * @param event - API Gateway event + * @param operation - File operation type + * @returns Typed file request object + */ + public static createRequest( + event: APIGatewayProxyEvent, + operation: string + ): FileUploadRequest | FileDeleteRequest | FileGetRequest { + switch (operation) { + case FileOperationTypes.UPLOAD: + return FileRequestFactory.createUploadRequest(event); + case FileOperationTypes.DELETE: + return FileRequestFactory.createDeleteRequest(event); + case FileOperationTypes.DOWNLOAD: + return FileRequestFactory.createGetRequest(event); + default: + throw new Error(`Unsupported file operation: ${operation}`); + } + } + + /** + * Creates a file request with fileNames array from API Gateway event + * @param event - API Gateway event + * @returns FileUploadRequest or FileDeleteRequest object + */ + private static parseUserRequest(event: APIGatewayProxyEvent): FileUploadRequest | FileDeleteRequest { + const body = parseEventBody(event); + const errors: string[] = []; + + if (!body.fileNames || !Array.isArray(body.fileNames) || body.fileNames.length === 0) { + errors.push('fileNames field is required and must be a non-empty array'); + } else { + // Only check for uniqueness if fileNames is a valid array + const uniqueFileNames = new Set(body.fileNames); + + if (uniqueFileNames.size !== body.fileNames.length) { + errors.push(`fileNames field must have unique file name values`); + } + } + + FileRequestFactory.validateCommonFields(body, errors); + + if (errors.length > 0) { + throw new Error(`Validation failed: ${errors.join(', ')}`); + } + + const useCaseId = extractUseCaseId(event); + + return { + fileNames: body.fileNames, + conversationId: body.conversationId, + messageId: body.messageId, + useCaseId: useCaseId + }; + } + + /** + * Creates a file upload request from API Gateway event + * @param event - API Gateway event + * @returns FileUploadRequest object + */ + private static createUploadRequest(event: APIGatewayProxyEvent): FileUploadRequest { + return FileRequestFactory.parseUserRequest(event) as FileUploadRequest; + } + + /** + * Creates a file delete request from API Gateway event + * @param event - API Gateway event + * @returns FileDeleteRequest object + */ + private static createDeleteRequest(event: APIGatewayProxyEvent): FileDeleteRequest { + return FileRequestFactory.parseUserRequest(event) as FileDeleteRequest; + } + + /** + * Creates a file get request from API Gateway event query parameters + * @param event - API Gateway event + * @returns FileGetRequest object + */ + private static createGetRequest(event: APIGatewayProxyEvent): FileGetRequest { + const queryParams = event.queryStringParameters || {}; + const errors: string[] = []; + + if (!queryParams.fileName) { + errors.push('fileName is required'); + } + + FileRequestFactory.validateCommonFields(queryParams, errors); + + if (errors.length > 0) { + throw new Error(`Validation failed: ${errors.join(', ')}`); + } + + const useCaseId = extractUseCaseId(event); + + return { + fileName: queryParams.fileName!, + conversationId: queryParams.conversationId!, + messageId: queryParams.messageId!, + useCaseId: useCaseId + }; + } +} diff --git a/source/lambda/files-management/models/types.ts b/source/lambda/files-management/models/types.ts new file mode 100644 index 00000000..fdbbbb6c --- /dev/null +++ b/source/lambda/files-management/models/types.ts @@ -0,0 +1,122 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export interface RetrySettings { + maxRetries: number; + backOffRate: number; + initialDelayMs: number; +} + +// File operation request/response types +export interface FileUploadRequest { + fileNames: string[]; + conversationId: string; + messageId: string; + useCaseId: string; +} + +export interface FileDeleteRequest { + fileNames: string[]; + conversationId: string; + messageId: string; + useCaseId: string; +} + +export interface FileGetRequest { + fileName: string; + conversationId: string; + messageId: string; + useCaseId: string; +} + +// Response types +export interface ExtendedPresignedPostResponse { + uploadUrl: string; + formFields: Record; + fileName: string; + fileKey: string; + fileUuid: string; + fileExtension: string; + fileContentType: string; + expiresIn: number; + createdAt: string; + error?: string | null; +} + +export interface PresignedPostResponse { + uploadUrl: string; + formFields: Record; + fileName: string; + expiresIn: number; + createdAt: string; + error?: string | null; +} + +export interface FileUploadResponse { + uploads: PresignedPostResponse[]; +} + +export interface FileDeletionResult { + success: boolean; + fileName: string; + error?: string; +} + +export interface FileDeleteResponse { + deletions: FileDeletionResult[]; + allSuccessful: boolean; + failureCount: number; +} + +export interface FileGetResponse { + downloadUrl: string; +} + +// DynamoDB types +export interface FileMetadata { + fileKey: string; + fileName: string; + fileUuid: string; // Unique identifier for S3 object + fileExtension: string; + fileContentType: string; // MIME type + createdAt: number; + updatedAt: number; + status: string; + fileSize?: number; // File size in bytes + uploadTimestamp?: number; + TTL: number; +} + +// S3 Management types +export interface MultimodalUploadParams { + fileName: string; + userId: string; + contentType: string; + fileExtension: string; + useCaseId: string; + conversationId: string; + messageId: string; +} + +export interface FileUploadInfo { + fileName: string; + contentType: string; + fileExtension: string; +} + +// Use case validation types +export interface LLMConfig { + key: string; + config?: { + LlmParams?: { + MultimodalParams?: { + MultimodalEnabled?: boolean; + }; + }; + }; +} + +export interface UseCaseConfig { + UseCaseId: string; + UseCaseConfigRecordKey: string; +} diff --git a/source/lambda/files-management/package-lock.json b/source/lambda/files-management/package-lock.json new file mode 100644 index 00000000..d5eeaeba --- /dev/null +++ b/source/lambda/files-management/package-lock.json @@ -0,0 +1,5322 @@ +{ + "name": "@amzn/files-handler", + "version": "4.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "@amzn/files-handler", + "version": "4.0.0", + "license": "Apache-2.0", + "devDependencies": { + "@types/jest": "^29.5.14", + "@types/lodash": "^4.17.13", + "@types/node": "^22.10.1", + "@typescript-eslint/eslint-plugin": "^8.18.0", + "@typescript-eslint/parser": "^8.18.0", + "aws-sdk-client-mock": "^4.1.0", + "aws-sdk-client-mock-jest": "^4.1.0", + "eslint": "^9.16.0", + "jest": "^29.7.0", + "lodash": "^4.17.21", + "prettier": "^3.4.2", + "ts-jest": "^29.2.5", + "ts-node": "^10.9.2", + "typescript": "^5.7.2" + } + }, + "../layers/aws-node-user-agent-config": { + "name": "@amzn/aws-node-user-agent-config", + "version": "4.0.0", + "extraneous": true, + "license": "Apache-2.0", + "dependencies": { + "@aws-lambda-powertools/logger": "^2.11.0", + "@aws-lambda-powertools/metrics": "^2.11.0", + "@aws-lambda-powertools/tracer": "^2.11.0", + "@middy/core": "^4.7.0", + "@types/lodash": "^4.17.13", + "lodash": "^4.17.21" + }, + "devDependencies": { + "@types/jest": "^29.5.14", + "@types/node": "^22.10.1", + "eslint": "^9.16.0", + "jest": "^29.7.0", + "prettier": "^3.6.2", + "ts-jest": "^29.2.5", + "ts-node": "^10.9.2" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.4.tgz", + "integrity": "sha512-YsmSKC29MJwf0gF8Rjjrg5LQCmyh+j/nD8/eP7f+BeoQTKYqs9RoWbjGOdy0+1Ekr68RJZMUOPVQaQisnIo4Rw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.4.tgz", + "integrity": "sha512-2BCOP7TN8M+gVDj7/ht3hsaO/B/n5oDbiAyyvnRlNOs+u1o+JWNYTQrmpuNp1/Wq2gcFrI01JAW+paEKDMx/CA==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.3", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.28.3", + "@babel/helpers": "^7.28.4", + "@babel/parser": "^7.28.4", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.28.4", + "@babel/types": "^7.28.4", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.3.tgz", + "integrity": "sha512-3lSpxGgvnmZznmBkCRnVREPUFJv2wrv9iAoFDvADJc0ypmdOxdUtcLeBgBJ6zE0PMeTKnxeQzyk0xTBq4Ep7zw==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.28.3", + "@babel/types": "^7.28.2", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz", + "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "dev": true, + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz", + "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==", + "dev": true, + "dependencies": { + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.28.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz", + "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz", + "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz", + "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==", + "dev": true, + "dependencies": { + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.4" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.4.tgz", + "integrity": "sha512-yZbBqeM6TkpP9du/I2pUZnJsRMGGvOuIrhjzC1AwHwW+6he4mni6Bp/m8ijn0iOuZuPI2BfkCoSRunpyjnrQKg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.28.4" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz", + "integrity": "sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz", + "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz", + "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.4.tgz", + "integrity": "sha512-YEzuboP2qvQavAcjgQNVgsvHIDv6ZpwXvcvjmyySP2DIMuByS/6ioU5G9pYrWHM6T2YDfc7xga9iNzYOs12CFQ==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.3", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.4", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.4", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.4.tgz", + "integrity": "sha512-bkFqkLhh3pMBUQQkpVgWDWq/lqzc2678eUyDlTBhRqhCHFguYYGM0Efga7tYk4TogG/3x0EEl66/OQ+WGbWB/Q==", + "dev": true, + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true + }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz", + "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz", + "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==", + "dev": true, + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.0", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.0.tgz", + "integrity": "sha512-ENIdc4iLu0d93HeYirvKmrzshzofPw6VkZRKQGe9Nv46ZnWUzcF1xV01dcvEg/1wXUR61OmmlSfyeyO7EvjLxQ==", + "dev": true, + "dependencies": { + "@eslint/object-schema": "^2.1.6", + "debug": "^4.3.1", + "minimatch": "^3.1.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-array/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.3.1.tgz", + "integrity": "sha512-xR93k9WhrDYpXHORXpxVL5oHj3Era7wo6k/Wd8/IsQNnZUTzkGS29lyn3nAT05v6ltUuTFVCCYDEGfy2Or/sPA==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.15.2.tgz", + "integrity": "sha512-78Md3/Rrxh83gCxoUc0EiciuOHsIITzLy53m3d9UyiW8y9Dj2D29FeETqyKA+BRK76tnTp6RXWb3pCay8Oyomg==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz", + "integrity": "sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ==", + "dev": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/js": { + "version": "9.36.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.36.0.tgz", + "integrity": "sha512-uhCbYtYynH30iZErszX78U+nR3pJU3RHGQ57NXy5QupD4SBVwDeU8TNBy+MjMngc1UyIW9noKqsRqfjQTBU2dw==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.6.tgz", + "integrity": "sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.3.5.tgz", + "integrity": "sha512-Z5kJ+wU3oA7MMIqVR9tyZRtjYPr4OC004Q4Rw7pgOKUOKkJfZ3O24nz3WYfGRpMDNmcOi3TwQOmgm7B7Tpii0w==", + "dev": true, + "dependencies": { + "@eslint/core": "^0.15.2", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/console": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", + "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/core": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", + "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/reporters": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^29.7.0", + "jest-config": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-resolve-dependencies": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "jest-watcher": "^29.7.0", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/environment": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", + "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", + "dev": true, + "dependencies": { + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", + "dev": true, + "dependencies": { + "expect": "^29.7.0", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", + "dev": true, + "dependencies": { + "jest-get-type": "^29.6.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/fake-timers": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", + "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@sinonjs/fake-timers": "^10.0.2", + "@types/node": "*", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/globals": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", + "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/types": "^29.6.3", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/reporters": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", + "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", + "dev": true, + "dependencies": { + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^6.0.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "slash": "^3.0.0", + "string-length": "^4.0.1", + "strip-ansi": "^6.0.0", + "v8-to-istanbul": "^9.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/source-map": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", + "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-result": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", + "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-sequencer": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", + "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", + "dev": true, + "dependencies": { + "@jest/test-result": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", + "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true + }, + "node_modules/@sinonjs/commons": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz", + "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==", + "dev": true, + "dependencies": { + "type-detect": "4.0.8" + } + }, + "node_modules/@sinonjs/fake-timers": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", + "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", + "dev": true, + "dependencies": { + "@sinonjs/commons": "^3.0.0" + } + }, + "node_modules/@sinonjs/samsam": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/@sinonjs/samsam/-/samsam-8.0.3.tgz", + "integrity": "sha512-hw6HbX+GyVZzmaYNh82Ecj1vdGZrqVIn/keDTg63IgAwiQPO+xCz99uG6Woqgb4tM0mUiFENKZ4cqd7IX94AXQ==", + "dev": true, + "dependencies": { + "@sinonjs/commons": "^3.0.1", + "type-detect": "^4.1.0" + } + }, + "node_modules/@sinonjs/samsam/node_modules/type-detect": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.1.0.tgz", + "integrity": "sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@sinonjs/text-encoding": { + "version": "0.7.3", + "resolved": "https://registry.npmjs.org/@sinonjs/text-encoding/-/text-encoding-0.7.3.tgz", + "integrity": "sha512-DE427ROAphMQzU4ENbliGYrBSYPXF+TtLg9S8vzeA+OF4ZKzoDdzfL8sxuMUGS/lgRhM6j1URSk9ghf7Xo1tyA==", + "dev": true + }, + "node_modules/@tsconfig/node10": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz", + "integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==", + "dev": true + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", + "dev": true + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", + "dev": true + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", + "dev": true + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/chai": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.2.tgz", + "integrity": "sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==", + "dev": true, + "dependencies": { + "@types/deep-eql": "*" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.9", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", + "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "dev": true + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/jest": { + "version": "29.5.14", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz", + "integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==", + "dev": true, + "dependencies": { + "expect": "^29.0.0", + "pretty-format": "^29.0.0" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true + }, + "node_modules/@types/lodash": { + "version": "4.17.20", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.20.tgz", + "integrity": "sha512-H3MHACvFUEiujabxhaI/ImO6gUrd8oOurg7LQtS7mbwIXA/cUqWrvBsaeJ23aZEPk1TAYkurjfMbSELfoCXlGA==", + "dev": true + }, + "node_modules/@types/node": { + "version": "22.18.6", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.18.6.tgz", + "integrity": "sha512-r8uszLPpeIWbNKtvWRt/DbVi5zbqZyj1PTmhRMqBMvDnaz1QpmSKujUtJLrqGZeoM8v72MfYggDceY4K1itzWQ==", + "dev": true, + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/sinon": { + "version": "17.0.4", + "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-17.0.4.tgz", + "integrity": "sha512-RHnIrhfPO3+tJT0s7cFaXGZvsL4bbR3/k7z3P312qMS4JaS2Tk+KiwiLx1S0rQ56ERj00u1/BtdyVd0FY+Pdew==", + "dev": true, + "dependencies": { + "@types/sinonjs__fake-timers": "*" + } + }, + "node_modules/@types/sinonjs__fake-timers": { + "version": "8.1.5", + "resolved": "https://registry.npmjs.org/@types/sinonjs__fake-timers/-/sinonjs__fake-timers-8.1.5.tgz", + "integrity": "sha512-mQkU2jY8jJEF7YHjHvsQO8+3ughTL1mcnn96igfhONmR+fUPSKIkefQYpSe8bsly2Ep7oQbn/6VG5/9/0qcArQ==", + "dev": true + }, + "node_modules/@types/stack-utils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", + "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", + "dev": true + }, + "node_modules/@types/yargs": { + "version": "17.0.33", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.33.tgz", + "integrity": "sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==", + "dev": true, + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "dev": true + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.44.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.44.1.tgz", + "integrity": "sha512-molgphGqOBT7t4YKCSkbasmu1tb1MgrZ2szGzHbclF7PNmOkSTQVHy+2jXOSnxvR3+Xe1yySHFZoqMpz3TfQsw==", + "dev": true, + "dependencies": { + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "8.44.1", + "@typescript-eslint/type-utils": "8.44.1", + "@typescript-eslint/utils": "8.44.1", + "@typescript-eslint/visitor-keys": "8.44.1", + "graphemer": "^1.4.0", + "ignore": "^7.0.0", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.44.1", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.44.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.44.1.tgz", + "integrity": "sha512-EHrrEsyhOhxYt8MTg4zTF+DJMuNBzWwgvvOYNj/zm1vnaD/IC5zCXFehZv94Piqa2cRFfXrTFxIvO95L7Qc/cw==", + "dev": true, + "dependencies": { + "@typescript-eslint/scope-manager": "8.44.1", + "@typescript-eslint/types": "8.44.1", + "@typescript-eslint/typescript-estree": "8.44.1", + "@typescript-eslint/visitor-keys": "8.44.1", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.44.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.44.1.tgz", + "integrity": "sha512-ycSa60eGg8GWAkVsKV4E6Nz33h+HjTXbsDT4FILyL8Obk5/mx4tbvCNsLf9zret3ipSumAOG89UcCs/KRaKYrA==", + "dev": true, + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.44.1", + "@typescript-eslint/types": "^8.44.1", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.44.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.44.1.tgz", + "integrity": "sha512-NdhWHgmynpSvyhchGLXh+w12OMT308Gm25JoRIyTZqEbApiBiQHD/8xgb6LqCWCFcxFtWwaVdFsLPQI3jvhywg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "8.44.1", + "@typescript-eslint/visitor-keys": "8.44.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.44.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.44.1.tgz", + "integrity": "sha512-B5OyACouEjuIvof3o86lRMvyDsFwZm+4fBOqFHccIctYgBjqR3qT39FBYGN87khcgf0ExpdCBeGKpKRhSFTjKQ==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.44.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.44.1.tgz", + "integrity": "sha512-KdEerZqHWXsRNKjF9NYswNISnFzXfXNDfPxoTh7tqohU/PRIbwTmsjGK6V9/RTYWau7NZvfo52lgVk+sJh0K3g==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "8.44.1", + "@typescript-eslint/typescript-estree": "8.44.1", + "@typescript-eslint/utils": "8.44.1", + "debug": "^4.3.4", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "8.44.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.44.1.tgz", + "integrity": "sha512-Lk7uj7y9uQUOEguiDIDLYLJOrYHQa7oBiURYVFqIpGxclAFQ78f6VUOM8lI2XEuNOKNB7XuvM2+2cMXAoq4ALQ==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.44.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.44.1.tgz", + "integrity": "sha512-qnQJ+mVa7szevdEyvfItbO5Vo+GfZ4/GZWWDRRLjrxYPkhM+6zYB2vRYwCsoJLzqFCdZT4mEqyJoyzkunsZ96A==", + "dev": true, + "dependencies": { + "@typescript-eslint/project-service": "8.44.1", + "@typescript-eslint/tsconfig-utils": "8.44.1", + "@typescript-eslint/types": "8.44.1", + "@typescript-eslint/visitor-keys": "8.44.1", + "debug": "^4.3.4", + "fast-glob": "^3.3.2", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.44.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.44.1.tgz", + "integrity": "sha512-DpX5Fp6edTlocMCwA+mHY8Mra+pPjRZ0TfHkXI8QFelIKcbADQz1LUPNtzOFUriBB2UYqw4Pi9+xV4w9ZczHFg==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.7.0", + "@typescript-eslint/scope-manager": "8.44.1", + "@typescript-eslint/types": "8.44.1", + "@typescript-eslint/typescript-estree": "8.44.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.44.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.44.1.tgz", + "integrity": "sha512-576+u0QD+Jp3tZzvfRfxon0EA2lzcDt3lhUbsC6Lgzy9x2VR4E+JUiNyGHi5T8vk0TV+fpJ5GLG1JsJuWCaKhw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "8.44.1", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@vitest/expect": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.4.tgz", + "integrity": "sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==", + "dev": true, + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/pretty-format": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.2.4.tgz", + "integrity": "sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==", + "dev": true, + "dependencies": { + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.2.4.tgz", + "integrity": "sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==", + "dev": true, + "dependencies": { + "tinyspy": "^4.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.2.4.tgz", + "integrity": "sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==", + "dev": true, + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "loupe": "^3.1.4", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "dev": true, + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", + "dev": true + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/aws-sdk-client-mock": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/aws-sdk-client-mock/-/aws-sdk-client-mock-4.1.0.tgz", + "integrity": "sha512-h/tOYTkXEsAcV3//6C1/7U4ifSpKyJvb6auveAepqqNJl6TdZaPFEtKjBQNf8UxQdDP850knB2i/whq4zlsxJw==", + "dev": true, + "dependencies": { + "@types/sinon": "^17.0.3", + "sinon": "^18.0.1", + "tslib": "^2.1.0" + } + }, + "node_modules/aws-sdk-client-mock-jest": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/aws-sdk-client-mock-jest/-/aws-sdk-client-mock-jest-4.1.0.tgz", + "integrity": "sha512-+g4a5Hp+MmPqqNnvwfLitByggrqf+xSbk1pm6fBYHNcon6+aQjL5iB+3YB6HuGPemY+/mUKN34iP62S14R61bA==", + "dev": true, + "dependencies": { + "@vitest/expect": ">1.6.0", + "expect": ">28.1.3", + "tslib": "^2.1.0" + }, + "peerDependencies": { + "aws-sdk-client-mock": "4.1.0", + "vitest": ">1.6.0" + }, + "peerDependenciesMeta": { + "vitest": { + "optional": true + } + } + }, + "node_modules/babel-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", + "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", + "dev": true, + "dependencies": { + "@jest/transform": "^29.7.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.6.3", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", + "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", + "dev": true, + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.2.0.tgz", + "integrity": "sha512-E/VlAEzRrsLEb2+dv8yp3bo4scof3l9nR4lrld+Iy5NyVqgVYUJnDAmunkhPMisRI32Qc4iRiz425d8vM++2fg==", + "dev": true, + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5" + }, + "peerDependencies": { + "@babel/core": "^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/babel-preset-jest": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", + "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", + "dev": true, + "dependencies": { + "babel-plugin-jest-hoist": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/baseline-browser-mapping": { + "version": "2.8.7", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.8.7.tgz", + "integrity": "sha512-bxxN2M3a4d1CRoQC//IqsR5XrLh0IJ8TCv2x6Y9N0nckNz/rTjZB3//GGscZziZOxmjP55rzxg/ze7usFI9FqQ==", + "dev": true, + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.26.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.26.2.tgz", + "integrity": "sha512-ECFzp6uFOSB+dcZ5BK/IBaGWssbSYBHvuMeMt3MMFyhI0Z8SqGgEkBLARgpRH3hutIgPVsALcMwbDrJqPxQ65A==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "baseline-browser-mapping": "^2.8.3", + "caniuse-lite": "^1.0.30001741", + "electron-to-chromium": "^1.5.218", + "node-releases": "^2.0.21", + "update-browserslist-db": "^1.1.3" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bs-logger": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz", + "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==", + "dev": true, + "dependencies": { + "fast-json-stable-stringify": "2.x" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", + "dev": true, + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001745", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001745.tgz", + "integrity": "sha512-ywt6i8FzvdgrrrGbr1jZVObnVv6adj+0if2/omv9cmR2oiZs30zL4DIyaptKcbOrBdOIc74QTMoJvSE2QHh5UQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/chai": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", + "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", + "dev": true, + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/check-error": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", + "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", + "dev": true, + "engines": { + "node": ">= 16" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", + "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==", + "dev": true + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "dev": true, + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/collect-v8-coverage": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz", + "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", + "dev": true + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true + }, + "node_modules/create-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", + "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "prompts": "^2.0.1" + }, + "bin": { + "create-jest": "bin/create-jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", + "dev": true + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/dedent": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.7.0.tgz", + "integrity": "sha512-HGFtf8yhuhGhqO07SV79tRp+br4MnbdjeVxotpn1QBl30pcLLCQjX5b2295ll0fv8RKDKsmWYrl05usHM9CewQ==", + "dev": true, + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/detect-newline": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/diff": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.2.0.tgz", + "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==", + "dev": true, + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.224", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.224.tgz", + "integrity": "sha512-kWAoUu/bwzvnhpdZSIc6KUyvkI1rbRXMT0Eq8pKReyOyaPZcctMli+EgvcN1PAvwVc7Tdo4Fxi2PsLNDU05mdg==", + "dev": true + }, + "node_modules/emittery": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", + "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "dev": true, + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "9.36.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.36.0.tgz", + "integrity": "sha512-hB4FIzXovouYzwzECDcUkJ4OcfOEkXTv2zRY6B9bkwjx/cprAq0uvm1nl7zvQ0/TsUk0zQiN4uPfJpB9m+rPMQ==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.0", + "@eslint/config-helpers": "^0.3.1", + "@eslint/core": "^0.15.2", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.36.0", + "@eslint/plugin-kit": "^0.3.5", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "@types/json-schema": "^7.0.15", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/espree/node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esquery": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", + "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", + "dev": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", + "dev": true, + "dependencies": { + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true + }, + "node_modules/fastq": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", + "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", + "dev": true, + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", + "dev": true, + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true + }, + "node_modules/handlebars": { + "version": "4.7.8", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz", + "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==", + "dev": true, + "dependencies": { + "minimist": "^1.2.5", + "neo-async": "^2.6.2", + "source-map": "^0.6.1", + "wordwrap": "^1.0.0" + }, + "bin": { + "handlebars": "bin/handlebars" + }, + "engines": { + "node": ">=0.4.7" + }, + "optionalDependencies": { + "uglify-js": "^3.1.4" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-local": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.2.0.tgz", + "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==", + "dev": true, + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz", + "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", + "dev": true, + "dependencies": { + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", + "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", + "dev": true, + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", + "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", + "dev": true, + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/types": "^29.6.3", + "import-local": "^3.0.2", + "jest-cli": "^29.7.0" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-changed-files": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", + "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", + "dev": true, + "dependencies": { + "execa": "^5.0.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", + "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^1.0.0", + "is-generator-fn": "^2.0.0", + "jest-each": "^29.7.0", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0", + "pretty-format": "^29.7.0", + "pure-rand": "^6.0.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-cli": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", + "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", + "dev": true, + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "create-jest": "^29.7.0", + "exit": "^0.1.2", + "import-local": "^3.0.2", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "yargs": "^17.3.1" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-config": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", + "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/test-sequencer": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-jest": "^29.7.0", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-circus": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@types/node": "*", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-docblock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", + "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", + "dev": true, + "dependencies": { + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", + "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "jest-util": "^29.7.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", + "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", + "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", + "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-leak-detector": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", + "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", + "dev": true, + "dependencies": { + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-pnp-resolver": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", + "dev": true, + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "jest-resolve": "*" + }, + "peerDependenciesMeta": { + "jest-resolve": { + "optional": true + } + } + }, + "node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", + "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "resolve": "^1.20.0", + "resolve.exports": "^2.0.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve-dependencies": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", + "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", + "dev": true, + "dependencies": { + "jest-regex-util": "^29.6.3", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", + "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/environment": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-leak-detector": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-util": "^29.7.0", + "jest-watcher": "^29.7.0", + "jest-worker": "^29.7.0", + "p-limit": "^3.1.0", + "source-map-support": "0.5.13" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", + "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/globals": "^29.7.0", + "@jest/source-map": "^29.6.3", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", + "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-jsx": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/types": "^7.3.3", + "@jest/expect-utils": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "natural-compare": "^1.4.0", + "pretty-format": "^29.7.0", + "semver": "^7.5.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", + "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "leven": "^3.1.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate/node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-watcher": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", + "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", + "dev": true, + "dependencies": { + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "jest-util": "^29.7.0", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dev": true, + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/just-extend": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/just-extend/-/just-extend-6.2.0.tgz", + "integrity": "sha512-cYofQu2Xpom82S6qD778jBDpwvvy39s1l/hrYij2u9AMdQcGRpaBu6kY4mVhuno5kJVi1DAz4aiphA2WI1/OAw==", + "dev": true + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", + "dev": true + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, + "node_modules/loupe": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", + "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", + "dev": true + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true + }, + "node_modules/makeerror": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", + "dev": true, + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "dev": true + }, + "node_modules/nise": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/nise/-/nise-6.1.1.tgz", + "integrity": "sha512-aMSAzLVY7LyeM60gvBS423nBmIPP+Wy7St7hsb+8/fc1HmeoHJfLO8CKse4u3BtOZvQLJghYPI2i/1WZrEj5/g==", + "dev": true, + "dependencies": { + "@sinonjs/commons": "^3.0.1", + "@sinonjs/fake-timers": "^13.0.1", + "@sinonjs/text-encoding": "^0.7.3", + "just-extend": "^6.2.0", + "path-to-regexp": "^8.1.0" + } + }, + "node_modules/nise/node_modules/@sinonjs/fake-timers": { + "version": "13.0.5", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-13.0.5.tgz", + "integrity": "sha512-36/hTbH2uaWuGVERyC6da9YwGWnzUZXuPro/F2LfsdOsLnCojz/iSH8MxUt/FD2S5XBSVPhmArFUXcpCQ2Hkiw==", + "dev": true, + "dependencies": { + "@sinonjs/commons": "^3.0.1" + } + }, + "node_modules/node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "dev": true + }, + "node_modules/node-releases": { + "version": "2.0.21", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.21.tgz", + "integrity": "sha512-5b0pgg78U3hwXkCM8Z9b2FJdPZlr9Psr9V2gQPESdGHqbntyFJKFW4r5TeWGFzafGY3hzs1JC62VEQMbl1JFkw==", + "dev": true + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true + }, + "node_modules/path-to-regexp": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.3.0.tgz", + "integrity": "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==", + "dev": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/pathval": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", + "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", + "dev": true, + "engines": { + "node": ">= 14.16" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-dir/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prettier": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", + "dev": true, + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/pure-rand": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz", + "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ] + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.10", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", + "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", + "dev": true, + "dependencies": { + "is-core-module": "^2.16.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-cwd/node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve.exports": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz", + "integrity": "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "node_modules/sinon": { + "version": "18.0.1", + "resolved": "https://registry.npmjs.org/sinon/-/sinon-18.0.1.tgz", + "integrity": "sha512-a2N2TDY1uGviajJ6r4D1CyRAkzE9NNVlYOV1wX5xQDuAk0ONgzgRl0EjCQuRCPxOwp13ghsMwt9Gdldujs39qw==", + "dev": true, + "dependencies": { + "@sinonjs/commons": "^3.0.1", + "@sinonjs/fake-timers": "11.2.2", + "@sinonjs/samsam": "^8.0.0", + "diff": "^5.2.0", + "nise": "^6.0.0", + "supports-color": "^7" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/sinon" + } + }, + "node_modules/sinon/node_modules/@sinonjs/fake-timers": { + "version": "11.2.2", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-11.2.2.tgz", + "integrity": "sha512-G2piCSxQ7oWOxwGSAyFHfPIsyeJGXYtc6mFbnFA+kRXkiEnTl8c/8jul2S329iFBnDI9HGoeWWAZvuvOkZccgw==", + "dev": true, + "dependencies": { + "@sinonjs/commons": "^3.0.0" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.13", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", + "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", + "dev": true, + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "dev": true, + "dependencies": { + "escape-string-regexp": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/stack-utils/node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/test-exclude/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/test-exclude/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/tinyrainbow": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz", + "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==", + "dev": true, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-4.0.4.tgz", + "integrity": "sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==", + "dev": true, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tmpl": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", + "dev": true + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-api-utils": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz", + "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==", + "dev": true, + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, + "node_modules/ts-jest": { + "version": "29.4.4", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.4.4.tgz", + "integrity": "sha512-ccVcRABct5ZELCT5U0+DZwkXMCcOCLi2doHRrKy1nK/s7J7bch6TzJMsrY09WxgUUIP/ITfmcDS8D2yl63rnXw==", + "dev": true, + "dependencies": { + "bs-logger": "^0.2.6", + "fast-json-stable-stringify": "^2.1.0", + "handlebars": "^4.7.8", + "json5": "^2.2.3", + "lodash.memoize": "^4.1.2", + "make-error": "^1.3.6", + "semver": "^7.7.2", + "type-fest": "^4.41.0", + "yargs-parser": "^21.1.1" + }, + "bin": { + "ts-jest": "cli.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0" + }, + "peerDependencies": { + "@babel/core": ">=7.0.0-beta.0 <8", + "@jest/transform": "^29.0.0 || ^30.0.0", + "@jest/types": "^29.0.0 || ^30.0.0", + "babel-jest": "^29.0.0 || ^30.0.0", + "jest": "^29.0.0 || ^30.0.0", + "jest-util": "^29.0.0 || ^30.0.0", + "typescript": ">=4.3 <6" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "@jest/transform": { + "optional": true + }, + "@jest/types": { + "optional": true + }, + "babel-jest": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "jest-util": { + "optional": true + } + } + }, + "node_modules/ts-jest/node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "dev": true, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ts-node": { + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", + "dev": true, + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, + "node_modules/ts-node/node_modules/diff": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", + "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", + "dev": true, + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "dev": true + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.2.tgz", + "integrity": "sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A==", + "dev": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/uglify-js": { + "version": "3.19.3", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz", + "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==", + "dev": true, + "optional": true, + "bin": { + "uglifyjs": "bin/uglifyjs" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true + }, + "node_modules/update-browserslist-db": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz", + "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", + "dev": true + }, + "node_modules/v8-to-istanbul": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", + "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^2.0.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/walker": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", + "dev": true, + "dependencies": { + "makeerror": "1.0.12" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==", + "dev": true + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", + "dev": true, + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/yn": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", + "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/source/lambda/files-management/package.json b/source/lambda/files-management/package.json new file mode 100644 index 00000000..be63ac01 --- /dev/null +++ b/source/lambda/files-management/package.json @@ -0,0 +1,40 @@ +{ + "name": "@amzn/files-handler", + "version": "4.0.0", + "description": "This lambda supports APIs that provide export functionality for use cases", + "main": "index.ts", + "scripts": { + "test": "jest --coverage --silent --verbose", + "test-debug": "jest --coverage", + "test:no-cov": "jest --no-coverage --verbose", + "test:watch": "jest --watchAll --verbose", + "build": "npx tsc", + "clean": "rm -rf node_modules", + "clean-dev": "rm -rf node_modules && npm i --omit=dev", + "code-linter-js": "./node_modules/eslint/bin/eslint.js lambda --ext .js", + "code-linter-ts": "./node_modules/eslint/bin/eslint.js bin lib --ext .ts", + "code-linter": "npm run code-linter-ts && npm run code-linter-js", + "code-formatter": "./node_modules/prettier/bin-prettier.js --config .prettierrc.yml '**/*.ts' '**/*.js' --write" + }, + "author": { + "name": "Amazon Web Services", + "url": "https://aws.amazon.com/solutions" + }, + "license": "Apache-2.0", + "devDependencies": { + "@types/jest": "^29.5.14", + "@types/lodash": "^4.17.13", + "@types/node": "^22.10.1", + "@typescript-eslint/eslint-plugin": "^8.18.0", + "@typescript-eslint/parser": "^8.18.0", + "aws-sdk-client-mock": "^4.1.0", + "aws-sdk-client-mock-jest": "^4.1.0", + "eslint": "^9.16.0", + "jest": "^29.7.0", + "lodash": "^4.17.21", + "prettier": "^3.4.2", + "ts-jest": "^29.2.5", + "ts-node": "^10.9.2", + "typescript": "^5.7.2" + } +} diff --git a/source/lambda/files-management/power-tools-init.ts b/source/lambda/files-management/power-tools-init.ts new file mode 100644 index 00000000..60126f01 --- /dev/null +++ b/source/lambda/files-management/power-tools-init.ts @@ -0,0 +1,16 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Logger } from '@aws-lambda-powertools/logger'; +import { Metrics } from '@aws-lambda-powertools/metrics'; +import { Tracer } from '@aws-lambda-powertools/tracer'; +import { CloudWatchNamespace } from './utils/constants'; + +const serviceName = { serviceName: 'FilesManagement' }; + +export const tracer = new Tracer(serviceName); +export const logger = new Logger(serviceName); +export const metrics = new Metrics({ + namespace: CloudWatchNamespace.FILE_HANDLING, + serviceName: serviceName.serviceName +}); diff --git a/source/lambda/files-management/services/ddb-config-service.ts b/source/lambda/files-management/services/ddb-config-service.ts new file mode 100644 index 00000000..b4284a4f --- /dev/null +++ b/source/lambda/files-management/services/ddb-config-service.ts @@ -0,0 +1,112 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { DynamoDBClient, GetItemCommand } from '@aws-sdk/client-dynamodb'; +import { marshall, unmarshall } from '@aws-sdk/util-dynamodb'; +import { logger, tracer } from '../power-tools-init'; +import { USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, USE_CASES_TABLE_NAME_ENV_VAR } from '../utils/constants'; +import { AWSClientManager } from 'aws-sdk-lib'; +import { retryWithBackoff, getRetrySettings } from '../utils/utils'; + +/** + * Service for fetching configuration data from DynamoDB tables + */ +export class DdbConfigService { + private dynamoClient: DynamoDBClient; + private llmConfigTable: string; + private useCasesTable: string; + + constructor() { + this.dynamoClient = AWSClientManager.getServiceClient('dynamodb'); + this.llmConfigTable = process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]!; + this.useCasesTable = process.env[USE_CASES_TABLE_NAME_ENV_VAR]!; + } + + /** + * Fetches multimodal configuration for the given use case record key + * @param useCaseRecordKey - The use case record key to fetch config for + * @returns Promise - True if multimodal is enabled, false otherwise + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###fetchUseCaseMultimodalityConfig' }) + public async fetchUseCaseMultimodalityConfig(useCaseRecordKey: string): Promise { + try { + const operation = async () => { + const command = new GetItemCommand({ + TableName: this.llmConfigTable, + Key: marshall({ + key: useCaseRecordKey + }), + ProjectionExpression: 'config.LlmParams.MultimodalParams' + }); + + return await this.dynamoClient.send(command); + }; + + const ddbResult = await retryWithBackoff(operation, getRetrySettings()); + + if (!ddbResult.Item) { + const errorMsg = `Failed to get LLM config from table: ${this.llmConfigTable}, recordKey: ${useCaseRecordKey}`; + logger.error(errorMsg); + throw new Error('Failed due to unexpected error.'); + } + + const llmConfig = unmarshall(ddbResult.Item); + const multimodalEnabled = llmConfig.config?.LlmParams?.MultimodalParams?.MultimodalEnabled === true; + logger.debug( + `Fetched multimodal config for useCaseRecordKey: ${useCaseRecordKey}, multimodalEnabled: ${multimodalEnabled}` + ); + return multimodalEnabled; + } catch (error) { + logger.error( + `Failed to fetch multimodal config for useCaseRecordKey: ${useCaseRecordKey}, error: ${(error as Error).message}` + ); + throw new Error('Failed due to unexpected error.'); + } + } + + /** + * Fetches use case configuration record key from the use case config table + * @param useCaseId - The use case ID to fetch config for + * @returns Promise - The LLM config record key + * @throws Error if use case config is not found or invalid + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###fetchUseCaseConfigRecordKey' }) + public async fetchUseCaseConfigRecordKey(useCaseId: string): Promise { + try { + const operation = async () => { + const useCaseConfigCommand = new GetItemCommand({ + TableName: this.useCasesTable, + Key: marshall({ + UseCaseId: useCaseId + }), + ProjectionExpression: 'UseCaseConfigRecordKey' + }); + + return await this.dynamoClient.send(useCaseConfigCommand); + }; + + const useCaseConfigResult = await retryWithBackoff(operation, getRetrySettings()); + + if (!useCaseConfigResult.Item) { + logger.error(`Use case configuration not found for useCaseId: ${useCaseId}`); + throw new Error('Failed due to unexpected error.'); + } + + const useCaseConfig = unmarshall(useCaseConfigResult.Item); + const useCaseConfigRecordKey = useCaseConfig.UseCaseConfigRecordKey; + + if (!useCaseConfigRecordKey) { + logger.error(`UseCaseConfigRecordKey not found in use case config for useCaseId: ${useCaseId}`); + throw new Error('Failed due to unexpected error.'); + } + + logger.debug(`Retrieved use case config record key: ${useCaseConfigRecordKey} for useCaseId: ${useCaseId}`); + return useCaseConfigRecordKey; + } catch (error) { + logger.error( + `Failed to fetch use case config for useCaseId: ${useCaseId}, error: ${(error as Error).message}` + ); + throw new Error('Failed due to unexpected error.'); + } + } +} diff --git a/source/lambda/files-management/services/ddb-metadata-service.ts b/source/lambda/files-management/services/ddb-metadata-service.ts new file mode 100644 index 00000000..d22cefef --- /dev/null +++ b/source/lambda/files-management/services/ddb-metadata-service.ts @@ -0,0 +1,269 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { DynamoDBClient, PutItemCommand, GetItemCommand, UpdateItemCommand } from '@aws-sdk/client-dynamodb'; +import { S3Client, DeleteObjectCommand } from '@aws-sdk/client-s3'; +import { marshall, unmarshall } from '@aws-sdk/util-dynamodb'; +import { logger, tracer } from '../power-tools-init'; +import { AWSClientManager } from 'aws-sdk-lib'; +import { FileMetadata, FileDeletionResult } from '../models/types'; +import { + FileStatus, + MULTIMODAL_FILE_UPLOAD_CONSTRAINTS, + FILE_OPERATION_CONSTRAINTS, + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR, + MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR +} from '../utils/constants'; +import { retryWithBackoff, getRetrySettings } from '../utils/utils'; + +/** + * Metadata service class for file metadata operations in DynamoDB + */ +export class MetadataService { + private dynamoClient: DynamoDBClient; + private s3Client: S3Client; + private tableName: string; + + constructor() { + this.dynamoClient = AWSClientManager.getServiceClient('dynamodb'); + this.s3Client = AWSClientManager.getServiceClient('s3'); + this.tableName = process.env[MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR]!; + } + + /** + * Creates initial file metadata record with "pending" status + * Allows creation for new files or overwriting files with "deleted"/"invalid" status + * Rejects requests for files with "pending"/"uploaded" status to prevent duplicate uploads + * @param fileKey - The file key (useCaseId/user-uuid/conversation-uuid/message-uuid) + * @param fileName - Original filename + * @param fileExtension - File extension + * @param contentType - MIME type + * @param fileUuid - Unique identifier for S3 object + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###createFileMetadata' }) + public async createFileMetadata( + fileKey: string, + fileName: string, + fileExtension: string, + contentType: string, + fileUuid: string + ): Promise { + const now = Date.now(); + const ttl = Math.floor(now / 1000) + MULTIMODAL_FILE_UPLOAD_CONSTRAINTS.TTL_SECONDS; + + const metadata: FileMetadata = { + fileKey, + fileName, + fileUuid, + fileExtension, + fileContentType: contentType, + createdAt: now, + updatedAt: now, + status: FileStatus.PENDING, + TTL: ttl + }; + + try { + const operation = async () => { + const command = new PutItemCommand({ + TableName: this.tableName, + Item: marshall(metadata), + ConditionExpression: ` + (attribute_not_exists(fileKey) AND attribute_not_exists(fileName)) + OR + (#status = :deletedStatus OR #status = :invalidStatus) + `, + ExpressionAttributeNames: { + '#status': 'status' + }, + ExpressionAttributeValues: marshall({ + ':deletedStatus': FileStatus.DELETED, + ':invalidStatus': FileStatus.INVALID + }) + }); + + return await this.dynamoClient.send(command); + }; + + await retryWithBackoff(operation, getRetrySettings()); + logger.info(`Created file metadata record - fileKey: ${fileKey}, fileName: ${fileName}`); + } catch (error) { + let errorMessage = 'Failed due to unexpected error.'; + if (error instanceof Error && error.name === 'ConditionalCheckFailedException') { + errorMessage = `File already exists with "pending"/"uploaded" status. Upload not allowed.`; + } + + logger.error(`Created file metadata record - fileKey: ${fileKey}, fileName: ${fileName}. Error: ${error}`); + throw new Error(errorMessage); + } + } + + /** + * Gets file metadata using fileKey and fileName + * @param fileKey - The file key + * @param fileName - The filename + * @returns Promise - File metadata or null if not found + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###getFileMetadata' }) + public async getExistingMetadataRecord(fileKey: string, fileName: string): Promise { + try { + const command = new GetItemCommand({ + TableName: this.tableName, + Key: marshall({ + fileKey, + fileName + }) + }); + + const result = await this.dynamoClient.send(command); + + if (!result.Item) { + return null; + } + + return unmarshall(result.Item) as FileMetadata; + } catch (error) { + logger.error( + `Failed to get file metadata - fileKey: ${fileKey}, fileName: ${fileName}, error: ${(error as Error).message}` + ); + throw new Error(`Failed due to unexpected error.`); + } + } + + /** + * Deletes multiple files using individual operations in parallel + * This approach gets and updates each record individually with retry logic + * Preserves: fileUuid, fileExtension while updating status to DELETED + * @param fileKeys - Array of {fileKey, fileName} pairs (max FILE_OPERATION_LIMITS.MAX_FILES_PER_DELETE_REQUEST number of files) + * @returns Promise - Array of deletion results + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###deleteMultipleFiles' }) + public async deleteMultipleFiles( + fileKeys: Array<{ fileKey: string; fileName: string }> + ): Promise { + logger.info(`Starting deletion of ${fileKeys.length} files`); + + const deletionPromises = fileKeys.map(({ fileKey, fileName }) => this.deleteIndividualFile(fileKey, fileName)); + + // Promise.allSettled waits for ALL promises to complete (either resolve or reject) + // Convert Promise.allSettled results to FileDeletionResult format + const results = await Promise.allSettled(deletionPromises); + const deletionResults: FileDeletionResult[] = results.map((result, index) => { + const fileName = fileKeys[index].fileName; + + if (result.status === 'fulfilled') { + return result.value; + } else { + logger.error(`Failed to delete file ${fileName}: ${result.reason}`); + return { + success: false, + fileName, + error: 'Failed due to unexpected error.' + }; + } + }); + + return deletionResults; + } + + /** + * Deletes an individual file by first deleting from S3, then updating metadata to DELETED status + * This order ensures that if S3 deletion fails, we can retry the entire operation + * @param fileKey - The file key + * @param fileName - The file name + * @returns Promise - Deletion result for this file + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###deleteIndividualFile' }) + private async deleteIndividualFile(fileKey: string, fileName: string): Promise { + try { + // Get the existing record to preserve existing fields + const existingRecord = await this.getExistingMetadataRecord(fileKey, fileName); + const now = Date.now(); + + if (!existingRecord) { + return { + success: false, + fileName, + error: 'File not found. Cannot perform deletion.' + }; + } + + const s3Key = `${fileKey}/${existingRecord.fileUuid}.${existingRecord.fileExtension}`; + const retrySettings = getRetrySettings(); + + // Delete from S3 first with retry + const s3DeleteOperation = async () => { + await this.deleteFileFromS3(s3Key, fileName); + }; + await retryWithBackoff(s3DeleteOperation, retrySettings); + + // Update the record to mark it as deleted while preserving existing fields + const ddbUpdateOperation = async () => { + const command = new UpdateItemCommand({ + TableName: this.tableName, + Key: marshall({ + fileKey, + fileName + }), + UpdateExpression: 'SET #status = :status, updatedAt = :updatedAt, #ttl = :ttl', + ExpressionAttributeNames: { + '#status': 'status', + '#ttl': 'TTL' + }, + ExpressionAttributeValues: marshall({ + ':status': FileStatus.DELETED, + ':updatedAt': now, + ':ttl': Math.floor(now / 1000) + FILE_OPERATION_CONSTRAINTS.DELETION_RECORD_TTL_SECONDS + }) + }); + + return await this.dynamoClient.send(command); + }; + + await retryWithBackoff(ddbUpdateOperation, retrySettings); + + logger.debug(`Successfully deleted file from S3 and marked as deleted in metadata: ${fileName}`); + return { + success: true, + fileName, + error: undefined + }; + } catch (error) { + let errorMessage = error instanceof Error ? error.message : String(error); + logger.error(`Failed to delete individual file ${fileName}: ${errorMessage}`); + + if (errorMessage.includes('ConditionalCheckFailedException')) { + errorMessage = 'File record was modified or deleted by another process and is unavailable.'; + } + + return { + success: false, + fileName, + error: errorMessage + }; + } + } + + /** + * Deletes a file from S3 + * @param s3Key - The S3 key of the file to delete + * @param fileName - The file name (for logging) + * @throws Error if S3 deletion fails (allows retry of entire operation) + */ + private async deleteFileFromS3(s3Key: string, fileName: string): Promise { + const bucketName = process.env[MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR]!; + + const command = new DeleteObjectCommand({ + Bucket: bucketName, + Key: s3Key + }); + + try { + await this.s3Client.send(command); + logger.debug(`Successfully deleted file from S3: ${s3Key}`); + } catch (error) { + logger.error(`Failed to delete file from S3 (${s3Key}): ${(error as Error).message}`); + throw new Error('Failed to delete file. Please retry.'); + } + } +} diff --git a/source/lambda/files-management/services/s3-presigned-url-service.ts b/source/lambda/files-management/services/s3-presigned-url-service.ts new file mode 100644 index 00000000..d3010cc8 --- /dev/null +++ b/source/lambda/files-management/services/s3-presigned-url-service.ts @@ -0,0 +1,169 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { S3Client, GetObjectCommand } from '@aws-sdk/client-s3'; +import { createPresignedPost } from '@aws-sdk/s3-presigned-post'; +import { getSignedUrl } from '@aws-sdk/s3-request-presigner'; +import { logger, tracer } from '../power-tools-init'; +import { AWSClientManager } from 'aws-sdk-lib'; +import { MULTIMODAL_FILE_UPLOAD_CONSTRAINTS } from '../utils/constants'; +import { generateUUID, retryWithBackoff, getRetrySettings } from '../utils/utils'; +import { MultimodalUploadParams, ExtendedPresignedPostResponse } from '../models/types'; + +/** + * S3 presigned URL service class for multimodal file operations + * Handles S3 presigned POST generation for file uploads and presigned GET URLs for downloads + */ +export class S3PresignedUrlService { + private s3Client: S3Client; + + constructor() { + this.s3Client = AWSClientManager.getServiceClient('s3'); + } + + /** + * Creates a presigned POST for multimodal file upload to S3 + * Replicates createSchemaUploadPresignedPost logic for multimodal files + * @param params - Multimodal upload parameters + * @returns Promise - The presigned POST response + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###createFileUploadPresignedPost' }) + public async createFileUploadPresignedPost(params: MultimodalUploadParams): Promise { + const bucketName = process.env.MULTIMODAL_DATA_BUCKET!; + const keyPrefix = `${params.useCaseId}/${params.userId}/${params.conversationId}/${params.messageId}/`; + + // Generate unique s3Key for the file + const uuid = generateUUID(); + const generatedFileName = `${uuid}.${params.fileExtension}`; + const s3Key = `${keyPrefix}${generatedFileName}`; + + try { + // Create XML object tagging for metadata and apply source tag for file validation + const createTag = (tagKey: string, tagValue: string): string => { + return `${tagKey}${tagValue}`; + }; + + const createTagSet = (tags: string[]): string => { + return `${tags.join('')}`; + }; + + const useCaseIdTag = createTag('useCaseId', params.useCaseId); + const uploadedByTag = createTag('uploadedBy', params.userId); + const sourceTag = createTag('source', 'gaab'); + + const tags = [useCaseIdTag, uploadedByTag, sourceTag]; + const tagging = createTagSet(tags); + + const presignedPost = await createPresignedPost(this.s3Client, { + Bucket: bucketName, + Key: s3Key, + Conditions: [ + // Ensure key starts with expected prefix to prevent path traversal + ['starts-with', '$key', keyPrefix], + [ + 'content-length-range', + MULTIMODAL_FILE_UPLOAD_CONSTRAINTS.MIN_FILE_SIZE_BYTES, + MULTIMODAL_FILE_UPLOAD_CONSTRAINTS.MAX_FILE_SIZE_BYTES + ], + ['eq', '$x-amz-meta-userid', params.userId], + ['eq', '$x-amz-meta-filename', params.fileName], + ['eq', '$x-amz-meta-fileextension', params.fileExtension], + ['eq', '$x-amz-meta-usecaseid', params.useCaseId], + ['eq', '$x-amz-meta-conversationid', params.conversationId], + ['eq', '$x-amz-meta-messageid', params.messageId], + ['eq', '$x-amz-meta-source', 'gaab'], + // Enforce content type to prevent MIME type confusion attacks + ['eq', '$Content-Type', params.contentType], + ['eq', '$tagging', tagging] + ], + Fields: { + key: s3Key, + 'x-amz-meta-userid': params.userId, + 'x-amz-meta-filename': params.fileName, + 'x-amz-meta-fileextension': params.fileExtension, + 'x-amz-meta-usecaseid': params.useCaseId, + 'x-amz-meta-conversationid': params.conversationId, + 'x-amz-meta-messageid': params.messageId, + 'x-amz-meta-source': 'gaab', + 'Content-Type': params.contentType, + 'tagging': tagging + }, + Expires: MULTIMODAL_FILE_UPLOAD_CONSTRAINTS.PRESIGNED_URL_EXPIRY_SECONDS + }); + + // Construct fileKey for tracking: "useCaseId/user-uuid/conversation-uuid/message-uuid" + const fileKey = `${params.useCaseId}/${params.userId}/${params.conversationId}/${params.messageId}`; + + logger.info( + `Generated S3 presigned POST for multimodal file upload - s3Key: ${s3Key}, fileName: ${params.fileName}`, + { + useCaseId: params.useCaseId, + userId: params.userId + } + ); + + return { + uploadUrl: presignedPost.url, + formFields: presignedPost.fields, + fileName: params.fileName, + fileKey: fileKey, + fileUuid: uuid, + fileExtension: params.fileExtension, + fileContentType: params.contentType, + expiresIn: MULTIMODAL_FILE_UPLOAD_CONSTRAINTS.PRESIGNED_URL_EXPIRY_SECONDS, + createdAt: new Date().toISOString() + }; + } catch (error) { + logger.error( + `Failed to generate S3 presigned POST - bucketName: ${bucketName}, key: ${s3Key}, error: ${(error as Error).message}`, + { + useCaseId: params.useCaseId, + userId: params.userId, + errorStack: (error as Error).stack + } + ); + logger.error(`S3PresignedUrlService presigned POST generation failed: ${(error as Error).message}`); + throw new Error('Failed due to unexpected error.'); + } + } + + /** + * Creates a presigned download URL for a file in S3 + * @param s3Key - The complete S3 key for the file + * @param fileName - The original file name for download disposition + * @param contentType - The file content type + * @returns Promise - The presigned download URL + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###createFileDownloadPresignedUrl' }) + public async generateDownloadUrl(s3Key: string, fileName: string, contentType: string): Promise { + const bucketName = process.env.MULTIMODAL_DATA_BUCKET!; + + try { + const operation = async () => { + const getObjectCommand = new GetObjectCommand({ + Bucket: bucketName, + Key: s3Key, + // ResponseContentDisposition HTTP header tells the browser how to handle the file when it's downloaded + // i.e. download the file rather than display it inline and + // it also specifies what the name of the downloaded file should be + ResponseContentDisposition: `attachment; filename="${fileName}"`, + ResponseContentType: contentType + }); + + return await getSignedUrl(this.s3Client, getObjectCommand, { + expiresIn: MULTIMODAL_FILE_UPLOAD_CONSTRAINTS.PRESIGNED_URL_EXPIRY_SECONDS + }); + }; + + const downloadUrl = await retryWithBackoff(operation, getRetrySettings()); + + logger.info(`Generated presigned download URL for file: ${fileName}, s3Key: ${s3Key}`); + return downloadUrl; + } catch (error) { + logger.error( + `Failed to generate presigned download URL - bucketName: ${bucketName}, key: ${s3Key}, error: ${(error as Error).message}` + ); + throw new Error(`Failed due to unexpected error.`); + } + } +} diff --git a/source/lambda/files-management/test/commands/file-command.test.ts b/source/lambda/files-management/test/commands/file-command.test.ts new file mode 100644 index 00000000..0746c7ff --- /dev/null +++ b/source/lambda/files-management/test/commands/file-command.test.ts @@ -0,0 +1,1176 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { FileUploadCommand, FileDeleteCommand, FileGetCommand } from '../../models/file-command'; +import { S3PresignedUrlService } from '../../services/s3-presigned-url-service'; +import { MetadataService } from '../../services/ddb-metadata-service'; +import { FileValidator } from '../../validators/file-validator'; +import { FileStatus } from '../../utils/constants'; +import { FileUploadRequest, FileDeleteRequest, FileGetRequest, FileMetadata } from '../../models/types'; +import { validateFileUploadRequest, validateFileDeleteRequest } from '../../validators/request-validators'; +import { metrics } from '../../power-tools-init'; +import { CloudWatchMetrics } from '../../utils/constants'; + +// Mock services +jest.mock('../../services/s3-presigned-url-service'); +jest.mock('../../services/ddb-metadata-service'); +jest.mock('../../validators/file-validator'); + +// Mock validators, powertools +jest.mock('../../validators/request-validators', () => ({ + validateFileUploadRequest: jest.fn(), + validateFileDeleteRequest: jest.fn() +})); +jest.mock('../../power-tools-init', () => ({ + logger: { + info: jest.fn(), + warn: jest.fn(), + error: jest.fn(), + debug: jest.fn() + }, + tracer: { + captureMethod: () => (target: any, propertyKey: string, descriptor: PropertyDescriptor) => descriptor + }, + metrics: { + addMetric: jest.fn() + } +})); + +describe('File Commands', () => { + let mockS3Service: jest.Mocked; + let mockMetadataService: jest.Mocked; + let mockFileValidator: jest.Mocked; + let mockMetrics: any; + + beforeEach(() => { + process.env.AWS_SDK_USER_AGENT = '{ "customUserAgent": "AWSSOLUTION/SO0084/v1.0.0" }'; + process.env.AWS_REGION = 'us-east-1'; + + mockMetrics = { + addMetric: jest.fn() + }; + + mockS3Service = { + createFileUploadPresignedPost: jest.fn(), + generateDownloadUrl: jest.fn() + } as any; + + mockMetadataService = { + createFileMetadata: jest.fn(), + deleteMultipleFiles: jest.fn(), + getExistingMetadataRecord: jest.fn() + } as any; + + mockFileValidator = { + validateMultimodalEnabled: jest.fn() + } as any; + + (S3PresignedUrlService as jest.Mock).mockImplementation(() => mockS3Service); + (MetadataService as jest.Mock).mockImplementation(() => mockMetadataService); + (FileValidator as jest.Mock).mockImplementation(() => mockFileValidator); + }); + + afterEach(() => { + jest.clearAllMocks(); + }); + + describe('FileUploadCommand', () => { + let uploadCommand: FileUploadCommand; + + beforeEach(() => { + uploadCommand = new FileUploadCommand(); + }); + + it('should execute file upload successfully', async () => { + const request: FileUploadRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['test1.txt', 'test2.pdf'] + }; + + const mockS3Response1 = { + uploadUrl: 'https://bucket.s3.amazonaws.com', + formFields: { key: 'key1' }, + fileName: 'test1.txt', + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileUuid: 'uuid1', + fileExtension: 'txt', + fileContentType: 'text/plain', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z' + }; + + const mockS3Response2 = { + uploadUrl: 'https://bucket.s3.amazonaws.com', + formFields: { key: 'key2' }, + fileName: 'test2.pdf', + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileUuid: 'uuid2', + fileExtension: 'pdf', + fileContentType: 'application/pdf', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z' + }; + + mockS3Service.createFileUploadPresignedPost + .mockResolvedValueOnce(mockS3Response1) + .mockResolvedValueOnce(mockS3Response2); + mockMetadataService.createFileMetadata.mockResolvedValue(); + + const result = await uploadCommand.execute(request, 'user-123'); + + expect(validateFileUploadRequest).toHaveBeenCalledWith(request); + expect(mockS3Service.createFileUploadPresignedPost).toHaveBeenCalledTimes(2); + expect(mockMetadataService.createFileMetadata).toHaveBeenCalledTimes(2); + + expect(mockMetadataService.createFileMetadata).toHaveBeenCalledWith( + 'use-case-123/user-123/conv-123/msg-123', + 'test1.txt', + 'txt', + 'text/plain', + 'uuid1' + ); + expect(mockMetadataService.createFileMetadata).toHaveBeenCalledWith( + 'use-case-123/user-123/conv-123/msg-123', + 'test2.pdf', + 'pdf', + 'application/pdf', + 'uuid2' + ); + + expect(result).toEqual({ + uploads: [ + { + uploadUrl: mockS3Response1.uploadUrl, + formFields: mockS3Response1.formFields, + fileName: mockS3Response1.fileName, + expiresIn: mockS3Response1.expiresIn, + createdAt: mockS3Response1.createdAt, + error: null + }, + { + uploadUrl: mockS3Response2.uploadUrl, + formFields: mockS3Response2.formFields, + fileName: mockS3Response2.fileName, + expiresIn: mockS3Response2.expiresIn, + createdAt: mockS3Response2.createdAt, + error: null + } + ] + }); + expect(result.uploads).toHaveLength(2); + }); + + it('should handle individual file processing correctly', async () => { + const request: FileUploadRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['test.txt'] + }; + + const mockS3Response = { + uploadUrl: 'https://bucket.s3.amazonaws.com', + formFields: { key: 'key1' }, + fileName: 'test.txt', + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileUuid: 'uuid1', + fileExtension: 'txt', + fileContentType: 'text/plain', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z' + }; + + mockS3Service.createFileUploadPresignedPost.mockResolvedValue(mockS3Response); + mockMetadataService.createFileMetadata.mockResolvedValue(); + + const result = await uploadCommand.execute(request, 'user-123'); + + expect(mockS3Service.createFileUploadPresignedPost).toHaveBeenCalledTimes(1); + expect(mockMetadataService.createFileMetadata).toHaveBeenCalledTimes(1); + expect(mockMetadataService.createFileMetadata).toHaveBeenCalledWith( + 'use-case-123/user-123/conv-123/msg-123', + 'test.txt', + 'txt', + 'text/plain', + 'uuid1' + ); + expect(result.uploads).toHaveLength(1); + expect(result.uploads[0]).toEqual({ + uploadUrl: mockS3Response.uploadUrl, + formFields: mockS3Response.formFields, + fileName: mockS3Response.fileName, + expiresIn: mockS3Response.expiresIn, + createdAt: mockS3Response.createdAt, + error: null + }); + }); + + it('should handle S3 service failures gracefully', async () => { + const request: FileUploadRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['test.txt'] + }; + + const error = new Error('S3 service error'); + mockS3Service.createFileUploadPresignedPost.mockRejectedValue(error); + + const result = await uploadCommand.execute(request, 'user-123'); + + expect(result.uploads).toHaveLength(1); + expect(result.uploads[0]).toEqual({ + uploadUrl: '', + formFields: {}, + fileName: 'test.txt', + expiresIn: 0, + createdAt: expect.any(String), + error: 'S3 service error' + }); + }); + + it('should handle metadata service failures by marking uploads as failed', async () => { + const request: FileUploadRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['test.txt'] + }; + + const mockS3Response = { + uploadUrl: 'https://bucket.s3.amazonaws.com', + formFields: { key: 'key1' }, + fileName: 'test.txt', + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileUuid: 'uuid1', + fileExtension: 'txt', + fileContentType: 'text/plain', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z' + }; + + mockS3Service.createFileUploadPresignedPost.mockResolvedValue(mockS3Response); + + const error = new Error('Metadata service error'); + mockMetadataService.createFileMetadata.mockRejectedValue(error); + + const result = await uploadCommand.execute(request, 'user-123'); + + expect(result.uploads).toHaveLength(1); + expect(result.uploads[0].error).toBe('Metadata service error'); + expect(result.uploads[0].uploadUrl).toBe(''); + expect(result.uploads[0].formFields).toEqual({}); + }); + + it('should handle mixed success and failure scenarios', async () => { + const request: FileUploadRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['success.txt', 'failure.txt'] + }; + + const mockS3Response = { + uploadUrl: 'https://bucket.s3.amazonaws.com', + formFields: { key: 'key1' }, + fileName: 'success.txt', + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileUuid: 'uuid1', + fileExtension: 'txt', + fileContentType: 'text/plain', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z' + }; + + // First file succeeds, second file fails + mockS3Service.createFileUploadPresignedPost + .mockResolvedValueOnce(mockS3Response) + .mockRejectedValueOnce(new Error('S3 service error')); + mockMetadataService.createFileMetadata.mockResolvedValue(); + + const result = await uploadCommand.execute(request, 'user-123'); + + expect(result.uploads).toHaveLength(2); + expect(result.uploads[0]).toEqual({ + uploadUrl: mockS3Response.uploadUrl, + formFields: mockS3Response.formFields, + fileName: mockS3Response.fileName, + expiresIn: mockS3Response.expiresIn, + createdAt: mockS3Response.createdAt, + error: null + }); + expect(result.uploads[1]).toEqual({ + uploadUrl: '', + formFields: {}, + fileName: 'failure.txt', + expiresIn: 0, + createdAt: expect.any(String), + error: 'S3 service error' + }); + }); + + it('should pass correct parameters to S3 service', async () => { + const request: FileUploadRequest = { + useCaseId: 'my-use-case', + conversationId: 'my-conversation', + messageId: 'my-message', + fileNames: ['file1.txt'] + }; + + const mockS3Response = { + uploadUrl: 'https://bucket.s3.amazonaws.com', + formFields: { key: 'key1' }, + fileName: 'file1.txt', + fileKey: 'my-use-case/my-user/my-conversation/my-message', + fileUuid: 'uuid1', + fileExtension: 'txt', + fileContentType: 'text/plain', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z' + }; + + mockS3Service.createFileUploadPresignedPost.mockResolvedValue(mockS3Response); + mockMetadataService.createFileMetadata.mockResolvedValue(); + + await uploadCommand.execute(request, 'my-user'); + + expect(mockS3Service.createFileUploadPresignedPost).toHaveBeenCalledWith({ + fileName: 'file1.txt', + userId: 'my-user', + contentType: 'text/plain', + fileExtension: 'txt', + useCaseId: 'my-use-case', + conversationId: 'my-conversation', + messageId: 'my-message' + }); + }); + + it('should handle parallel processing correctly', async () => { + const request: FileUploadRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['file1.txt', 'file2.txt', 'file3.txt'] + }; + + const mockS3Responses = [ + { + uploadUrl: 'https://bucket.s3.amazonaws.com', + formFields: { key: 'key1' }, + fileName: 'file1.txt', + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileUuid: 'uuid1', + fileExtension: 'txt', + fileContentType: 'text/plain', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z' + }, + { + uploadUrl: 'https://bucket.s3.amazonaws.com', + formFields: { key: 'key2' }, + fileName: 'file2.txt', + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileUuid: 'uuid2', + fileExtension: 'txt', + fileContentType: 'text/plain', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z' + }, + { + uploadUrl: 'https://bucket.s3.amazonaws.com', + formFields: { key: 'key3' }, + fileName: 'file3.txt', + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileUuid: 'uuid3', + fileExtension: 'txt', + fileContentType: 'text/plain', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z' + } + ]; + + mockS3Service.createFileUploadPresignedPost + .mockResolvedValueOnce(mockS3Responses[0]) + .mockResolvedValueOnce(mockS3Responses[1]) + .mockResolvedValueOnce(mockS3Responses[2]); + mockMetadataService.createFileMetadata.mockResolvedValue(); + + const result = await uploadCommand.execute(request, 'user-123'); + + expect(mockS3Service.createFileUploadPresignedPost).toHaveBeenCalledTimes(3); + expect(mockMetadataService.createFileMetadata).toHaveBeenCalledTimes(3); + expect(result.uploads).toHaveLength(3); + expect(result.uploads.every((upload) => upload.error === null)).toBe(true); + }); + + it('should handle individual file processing with correct parameters', async () => { + const request: FileUploadRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['document.pdf'] + }; + + const mockS3Response = { + uploadUrl: 'https://bucket.s3.amazonaws.com', + formFields: { key: 'key1' }, + fileName: 'document.pdf', + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileUuid: 'unique-uuid', + fileExtension: 'pdf', + fileContentType: 'application/pdf', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z' + }; + + mockS3Service.createFileUploadPresignedPost.mockResolvedValue(mockS3Response); + mockMetadataService.createFileMetadata.mockResolvedValue(); + + await uploadCommand.execute(request, 'user-123'); + + expect(mockS3Service.createFileUploadPresignedPost).toHaveBeenCalledWith({ + fileName: 'document.pdf', + userId: 'user-123', + contentType: 'application/pdf', + fileExtension: 'pdf', + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123' + }); + expect(mockMetadataService.createFileMetadata).toHaveBeenCalledWith( + 'use-case-123/user-123/conv-123/msg-123', + 'document.pdf', + 'pdf', + 'application/pdf', + 'unique-uuid' + ); + }); + + it('should handle individual file processing with mixed success and failure', async () => { + const request: FileUploadRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['success.txt', 'failure.txt'] + }; + + const mockS3Response = { + uploadUrl: 'https://bucket.s3.amazonaws.com', + formFields: { key: 'key1' }, + fileName: 'success.txt', + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileUuid: 'uuid1', + fileExtension: 'txt', + fileContentType: 'text/plain', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z' + }; + + // First file succeeds, second file fails + mockS3Service.createFileUploadPresignedPost + .mockResolvedValueOnce(mockS3Response) + .mockRejectedValueOnce(new Error('S3 service error')); + + mockMetadataService.createFileMetadata.mockResolvedValue(); + + const result = await uploadCommand.execute(request, 'user-123'); + + expect(result.uploads).toHaveLength(2); + expect(result.uploads[0]).toEqual({ + uploadUrl: mockS3Response.uploadUrl, + formFields: mockS3Response.formFields, + fileName: mockS3Response.fileName, + expiresIn: mockS3Response.expiresIn, + createdAt: mockS3Response.createdAt, + error: null + }); + expect(result.uploads[1]).toEqual({ + uploadUrl: '', + formFields: {}, + fileName: 'failure.txt', + expiresIn: 0, + createdAt: expect.any(String), + error: 'S3 service error' + }); + + // Only successful file should have metadata created + expect(mockMetadataService.createFileMetadata).toHaveBeenCalledTimes(1); + }); + + it('should handle large number of files with individual processing', async () => { + const fileCount = 20; + const files = Array.from({ length: fileCount }, (_, i) => `file${i + 1}.txt`); + + const request: FileUploadRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: files + }; + + // Mock individual S3 responses for each file + files.forEach((fileName, index) => { + mockS3Service.createFileUploadPresignedPost.mockResolvedValueOnce({ + uploadUrl: 'https://bucket.s3.amazonaws.com', + formFields: { key: `key${index + 1}` }, + fileName, + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileUuid: `uuid${index + 1}`, + fileExtension: 'txt', + fileContentType: 'text/plain', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z' + }); + }); + + mockMetadataService.createFileMetadata.mockResolvedValue(); + + const result = await uploadCommand.execute(request, 'user-123'); + + // All uploads should succeed with individual processing + expect(result.uploads).toHaveLength(fileCount); + expect(mockS3Service.createFileUploadPresignedPost).toHaveBeenCalledTimes(fileCount); + expect(mockMetadataService.createFileMetadata).toHaveBeenCalledTimes(fileCount); + result.uploads.forEach((upload) => { + expect(upload.error).toBeNull(); + expect(upload.uploadUrl).toBe('https://bucket.s3.amazonaws.com'); + }); + }); + it('should record correct metrics for individual file processing', async () => { + const request: FileUploadRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['success.txt', 'metadata-fail.txt'] + }; + + const mockS3Response = { + uploadUrl: 'https://bucket.s3.amazonaws.com', + formFields: { key: 'key1' }, + fileName: 'success.txt', + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileUuid: 'uuid1', + fileExtension: 'txt', + fileContentType: 'text/plain', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z' + }; + + // First file succeeds, second file fails at metadata creation + mockS3Service.createFileUploadPresignedPost.mockResolvedValueOnce(mockS3Response).mockResolvedValueOnce({ + uploadUrl: 'https://bucket.s3.amazonaws.com', + formFields: { key: 'key2' }, + fileName: 'metadata-fail.txt', + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileUuid: 'uuid2', + fileExtension: 'txt', + fileContentType: 'text/plain', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z' + }); + + mockMetadataService.createFileMetadata + .mockResolvedValueOnce() // First file metadata succeeds + .mockRejectedValueOnce(new Error('Metadata creation failed')); // Second file metadata fails + + await uploadCommand.execute(request, 'user-123'); + + // Should record 1 success and 1 failure + expect(metrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.FILE_UPLOAD_TRIGGERED, 'Count', 1); + expect(metrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.FILE_UPLOAD_FAILURE, 'Count', 1); + }); + }); + + describe('FileDeleteCommand', () => { + let deleteCommand: FileDeleteCommand; + + beforeEach(() => { + deleteCommand = new FileDeleteCommand(); + }); + + it('should execute file deletion successfully', async () => { + const request: FileDeleteRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['test1.txt', 'test2.pdf'] + }; + + const mockDeletionResults = [ + { success: true, fileName: 'test1.txt', error: undefined }, + { success: true, fileName: 'test2.pdf', error: undefined } + ]; + + mockMetadataService.deleteMultipleFiles.mockResolvedValue(mockDeletionResults); + + const result = await deleteCommand.execute(request, 'user-123'); + + expect(validateFileDeleteRequest).toHaveBeenCalledWith(request); + expect(mockMetadataService.deleteMultipleFiles).toHaveBeenCalledWith([ + { fileKey: 'use-case-123/user-123/conv-123/msg-123', fileName: 'test1.txt' }, + { fileKey: 'use-case-123/user-123/conv-123/msg-123', fileName: 'test2.pdf' } + ]); + expect(result).toEqual({ + deletions: mockDeletionResults, + allSuccessful: true, + failureCount: 0 + }); + }); + + it('should handle partial failures', async () => { + const request: FileDeleteRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['test1.txt', 'test2.pdf'] + }; + + const mockDeletionResults = [ + { success: true, fileName: 'test1.txt', error: undefined }, + { success: false, fileName: 'test2.pdf', error: 'File not found' } + ]; + + mockMetadataService.deleteMultipleFiles.mockResolvedValue(mockDeletionResults); + + const result = await deleteCommand.execute(request, 'user-123'); + + expect(result).toEqual({ + deletions: mockDeletionResults, + allSuccessful: false, + failureCount: 1 + }); + }); + + it('should handle individual file deletion retry failures', async () => { + const request: FileDeleteRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['retry-fail.txt', 'success.txt'] + }; + + const mockDeletionResults = [ + { success: false, fileName: 'retry-fail.txt', error: 'DynamoDB throttling - retries exhausted' }, + { success: true, fileName: 'success.txt', error: undefined } + ]; + + mockMetadataService.deleteMultipleFiles.mockResolvedValue(mockDeletionResults); + + const result = await deleteCommand.execute(request, 'user-123'); + + expect(result).toEqual({ + deletions: mockDeletionResults, + allSuccessful: false, + failureCount: 1 + }); + expect(result.deletions[0].error).toBe('DynamoDB throttling - retries exhausted'); + }); + + it('should handle conditional check failures during deletion', async () => { + const request: FileDeleteRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['concurrent-delete.txt'] + }; + + const mockDeletionResults = [ + { + success: false, + fileName: 'concurrent-delete.txt', + error: 'File record was modified or deleted by another process' + } + ]; + + mockMetadataService.deleteMultipleFiles.mockResolvedValue(mockDeletionResults); + + const result = await deleteCommand.execute(request, 'user-123'); + + expect(result.deletions[0].error).toBe('File record was modified or deleted by another process'); + expect(result.allSuccessful).toBe(false); + expect(result.failureCount).toBe(1); + }); + + it('should handle parallel deletion processing correctly', async () => { + const request: FileDeleteRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['file1.txt', 'file2.txt', 'file3.txt', 'file4.txt', 'file5.txt'] + }; + + // Simulate mixed results from parallel processing + const mockDeletionResults = [ + { success: true, fileName: 'file1.txt', error: undefined }, + { success: false, fileName: 'file2.txt', error: 'File record not found. Cannot perform deletion.' }, + { success: true, fileName: 'file3.txt', error: undefined }, + { success: false, fileName: 'file4.txt', error: 'Update failed' }, + { success: true, fileName: 'file5.txt', error: undefined } + ]; + + mockMetadataService.deleteMultipleFiles.mockResolvedValue(mockDeletionResults); + + const result = await deleteCommand.execute(request, 'user-123'); + + expect(result.deletions).toHaveLength(5); + expect(result.allSuccessful).toBe(false); + expect(result.failureCount).toBe(2); + + // Verify successful deletions + const successfulDeletions = result.deletions.filter((d) => d.success); + expect(successfulDeletions).toHaveLength(3); + expect(successfulDeletions.map((d) => d.fileName)).toEqual(['file1.txt', 'file3.txt', 'file5.txt']); + + // Verify failed deletions + const failedDeletions = result.deletions.filter((d) => !d.success); + expect(failedDeletions).toHaveLength(2); + expect(failedDeletions[0].fileName).toBe('file2.txt'); + expect(failedDeletions[1].fileName).toBe('file4.txt'); + }); + + it('should throw error when metadata service fails', async () => { + const request: FileDeleteRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['test.txt'] + }; + + const error = new Error('Metadata service error'); + mockMetadataService.deleteMultipleFiles.mockRejectedValue(error); + + await expect(deleteCommand.execute(request, 'user-123')).rejects.toThrow(error); + }); + + it('should handle all files failing deletion', async () => { + const request: FileDeleteRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['test1.txt', 'test2.pdf'] + }; + + const mockDeletionResults = [ + { success: false, fileName: 'test1.txt', error: 'File record not found. Cannot perform deletion.' }, + { success: false, fileName: 'test2.pdf', error: 'Update failed' } + ]; + + mockMetadataService.deleteMultipleFiles.mockResolvedValue(mockDeletionResults); + + const result = await deleteCommand.execute(request, 'user-123'); + + expect(result).toEqual({ + deletions: mockDeletionResults, + allSuccessful: false, + failureCount: 2 + }); + }); + + it('should correctly format response with mixed success/failure results', async () => { + const request: FileDeleteRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['success.txt', 'failure.pdf'] + }; + + const mockDeletionResults = [ + { success: true, fileName: 'success.txt', error: undefined }, + { success: false, fileName: 'failure.pdf', error: 'Some error' } + ]; + + mockMetadataService.deleteMultipleFiles.mockResolvedValue(mockDeletionResults); + + const result = await deleteCommand.execute(request, 'user-123'); + + // Test command-level response formatting + expect(result).toEqual({ + deletions: mockDeletionResults, + allSuccessful: false, + failureCount: 1 + }); + expect(result.deletions).toHaveLength(2); + expect(result.deletions[0].success).toBe(true); + expect(result.deletions[1].success).toBe(false); + }); + + it('should correctly calculate allSuccessful flag', async () => { + const testCases = [ + { + name: 'all successful', + results: [ + { success: true, fileName: 'file1.txt', error: undefined }, + { success: true, fileName: 'file2.txt', error: undefined } + ], + expectedAllSuccessful: true + }, + { + name: 'partial failure', + results: [ + { success: true, fileName: 'file1.txt', error: undefined }, + { success: false, fileName: 'file2.txt', error: 'Error' } + ], + expectedAllSuccessful: false + }, + { + name: 'all failures', + results: [ + { success: false, fileName: 'file1.txt', error: 'Error 1' }, + { success: false, fileName: 'file2.txt', error: 'Error 2' } + ], + expectedAllSuccessful: false + } + ]; + + for (const testCase of testCases) { + const request: FileDeleteRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: testCase.results.map((r) => r.fileName) + }; + + mockMetadataService.deleteMultipleFiles.mockResolvedValue(testCase.results); + + const result = await deleteCommand.execute(request, 'user-123'); + + expect(result.allSuccessful).toBe(testCase.expectedAllSuccessful); + } + }); + + it('should correctly count failures in response', async () => { + const request: FileDeleteRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['file1.txt', 'file2.txt', 'file3.txt', 'file4.txt'] + }; + + const mockDeletionResults = [ + { success: true, fileName: 'file1.txt', error: undefined }, + { success: false, fileName: 'file2.txt', error: 'Error 1' }, + { success: false, fileName: 'file3.txt', error: 'Error 2' }, + { success: true, fileName: 'file4.txt', error: undefined } + ]; + + mockMetadataService.deleteMultipleFiles.mockResolvedValue(mockDeletionResults); + + const result = await deleteCommand.execute(request, 'user-123'); + + expect(result.failureCount).toBe(2); + expect(result.allSuccessful).toBe(false); + }); + + it('should properly construct fileKey for service calls', async () => { + const request: FileDeleteRequest = { + useCaseId: 'my-use-case', + conversationId: 'my-conversation', + messageId: 'my-message', + fileNames: ['test.txt'] + }; + + const mockDeletionResults = [{ success: true, fileName: 'test.txt', error: undefined }]; + + mockMetadataService.deleteMultipleFiles.mockResolvedValue(mockDeletionResults); + + await deleteCommand.execute(request, 'my-user'); + + // Verify the command constructs the correct fileKey format + expect(mockMetadataService.deleteMultipleFiles).toHaveBeenCalledWith([ + { fileKey: 'my-use-case/my-user/my-conversation/my-message', fileName: 'test.txt' } + ]); + }); + + it('should record correct metrics for deletion retry scenarios', async () => { + const request: FileDeleteRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileNames: ['success.txt', 'retry-fail.txt', 'success2.txt'] + }; + + const mockDeletionResults = [ + { success: true, fileName: 'success.txt', error: undefined }, + { success: false, fileName: 'retry-fail.txt', error: 'DynamoDB throttling - retries exhausted' }, + { success: true, fileName: 'success2.txt', error: undefined } + ]; + + mockMetadataService.deleteMultipleFiles.mockResolvedValue(mockDeletionResults); + + await deleteCommand.execute(request, 'user-123'); + + // Should record both success and failure metrics + expect(metrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.FILE_DELETE, 'Count', 2); + expect(metrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.FILE_ACCESS_FAILURES, 'Count', 1); + }); + }); + + describe('FileGetCommand', () => { + let getCommand: FileGetCommand; + + beforeEach(() => { + getCommand = new FileGetCommand(); + }); + + it('should execute file download successfully', async () => { + const request: FileGetRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileName: 'test.txt' + }; + + const mockMetadata: FileMetadata = { + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileName: 'test.txt', + fileUuid: 'uuid-123', + fileExtension: 'txt', + fileContentType: 'text/plain', + createdAt: Date.now(), + updatedAt: Date.now(), + status: FileStatus.UPLOADED, + TTL: Math.floor(Date.now() / 1000) + 3600 + }; + + const mockDownloadUrl = 'https://bucket.s3.amazonaws.com/download-url'; + + mockMetadataService.getExistingMetadataRecord.mockResolvedValue(mockMetadata); + mockS3Service.generateDownloadUrl.mockResolvedValue(mockDownloadUrl); + + const result = await getCommand.execute(request, 'user-123'); + + expect(mockMetadataService.getExistingMetadataRecord).toHaveBeenCalledWith( + 'use-case-123/user-123/conv-123/msg-123', + 'test.txt' + ); + expect(mockS3Service.generateDownloadUrl).toHaveBeenCalledWith( + 'use-case-123/user-123/conv-123/msg-123/uuid-123.txt', + 'test.txt', + 'text/plain' + ); + expect(result).toEqual({ + downloadUrl: mockDownloadUrl + }); + }); + + it('should throw error when file not found', async () => { + const request: FileGetRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileName: 'test.txt' + }; + + mockMetadataService.getExistingMetadataRecord.mockResolvedValue(null); + + await expect(getCommand.execute(request, 'user-123')).rejects.toThrow('File not found.'); + }); + + it('should throw error when file is deleted', async () => { + const request: FileGetRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileName: 'test.txt' + }; + + const mockMetadata: FileMetadata = { + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileName: 'test.txt', + fileUuid: 'uuid-123', + fileExtension: 'txt', + fileContentType: 'text/plain', + createdAt: Date.now(), + updatedAt: Date.now(), + status: FileStatus.DELETED, + TTL: Math.floor(Date.now() / 1000) + 3600 + }; + + mockMetadataService.getExistingMetadataRecord.mockResolvedValue(mockMetadata); + + await expect(getCommand.execute(request, 'user-123')).rejects.toThrow( + "File status is 'deleted'. File cannot be retrieved." + ); + }); + + it('should throw error when file is not uploaded', async () => { + const request: FileGetRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileName: 'test.txt' + }; + + const mockMetadata: FileMetadata = { + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileName: 'test.txt', + fileUuid: 'uuid-123', + fileExtension: 'txt', + fileContentType: 'text/plain', + createdAt: Date.now(), + updatedAt: Date.now(), + status: FileStatus.PENDING, + TTL: Math.floor(Date.now() / 1000) + 3600 + }; + + mockMetadataService.getExistingMetadataRecord.mockResolvedValue(mockMetadata); + + await expect(getCommand.execute(request, 'user-123')).rejects.toThrow( + "File status is 'pending'. File cannot be retrieved." + ); + }); + + it('should throw error when file status is invalid', async () => { + const request: FileGetRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileName: 'test.txt' + }; + + const mockMetadata: FileMetadata = { + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileName: 'test.txt', + fileUuid: 'uuid-123', + fileExtension: 'txt', + fileContentType: 'text/plain', + createdAt: Date.now(), + updatedAt: Date.now(), + status: FileStatus.INVALID, + TTL: Math.floor(Date.now() / 1000) + 3600 + }; + + mockMetadataService.getExistingMetadataRecord.mockResolvedValue(mockMetadata); + + await expect(getCommand.execute(request, 'user-123')).rejects.toThrow( + "File status is 'invalid'. File cannot be retrieved." + ); + }); + + it('should throw error when S3 service fails', async () => { + const request: FileGetRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileName: 'test.txt' + }; + + const mockMetadata: FileMetadata = { + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileName: 'test.txt', + fileUuid: 'uuid-123', + fileExtension: 'txt', + fileContentType: 'text/plain', + createdAt: Date.now(), + updatedAt: Date.now(), + status: FileStatus.UPLOADED, + TTL: Math.floor(Date.now() / 1000) + 3600 + }; + + mockMetadataService.getExistingMetadataRecord.mockResolvedValue(mockMetadata); + + const error = new Error('S3 service error'); + mockS3Service.generateDownloadUrl.mockRejectedValue(error); + + await expect(getCommand.execute(request, 'user-123')).rejects.toThrow(error); + }); + + it('should properly construct fileKey for metadata lookup', async () => { + const request: FileGetRequest = { + useCaseId: 'my-use-case', + conversationId: 'my-conversation', + messageId: 'my-message', + fileName: 'my-file.txt' + }; + + const mockMetadata: FileMetadata = { + fileKey: 'my-use-case/my-user/my-conversation/my-message', + fileName: 'my-file.txt', + fileUuid: 'uuid-123', + fileExtension: 'txt', + fileContentType: 'text/plain', + createdAt: Date.now(), + updatedAt: Date.now(), + status: FileStatus.UPLOADED, + TTL: Math.floor(Date.now() / 1000) + 3600 + }; + + mockMetadataService.getExistingMetadataRecord.mockResolvedValue(mockMetadata); + mockS3Service.generateDownloadUrl.mockResolvedValue('https://download-url'); + + await getCommand.execute(request, 'my-user'); + + // Verify the command constructs the correct fileKey format + expect(mockMetadataService.getExistingMetadataRecord).toHaveBeenCalledWith( + 'my-use-case/my-user/my-conversation/my-message', + 'my-file.txt' + ); + }); + + it('should pass correct parameters to S3 service from metadata', async () => { + const request: FileGetRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileName: 'test.pdf' + }; + + const mockMetadata: FileMetadata = { + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileName: 'test.pdf', + fileUuid: 'unique-uuid-456', + fileExtension: 'pdf', + fileContentType: 'application/pdf', + createdAt: Date.now(), + updatedAt: Date.now(), + status: FileStatus.UPLOADED, + TTL: Math.floor(Date.now() / 1000) + 3600 + }; + + mockMetadataService.getExistingMetadataRecord.mockResolvedValue(mockMetadata); + mockS3Service.generateDownloadUrl.mockResolvedValue('https://download-url'); + + await getCommand.execute(request, 'user-123'); + + expect(mockS3Service.generateDownloadUrl).toHaveBeenCalledWith( + 'use-case-123/user-123/conv-123/msg-123/unique-uuid-456.pdf', + 'test.pdf', + 'application/pdf' + ); + }); + + it('should format response correctly with download URL', async () => { + const request: FileGetRequest = { + useCaseId: 'use-case-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileName: 'document.docx' + }; + + const mockMetadata: FileMetadata = { + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileName: 'document.docx', + fileUuid: 'uuid-123', + fileExtension: 'docx', + fileContentType: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', + createdAt: Date.now(), + updatedAt: Date.now(), + status: FileStatus.UPLOADED, + TTL: Math.floor(Date.now() / 1000) + 3600 + }; + + const mockDownloadUrl = 'https://presigned-download-url.com/document.docx'; + + mockMetadataService.getExistingMetadataRecord.mockResolvedValue(mockMetadata); + mockS3Service.generateDownloadUrl.mockResolvedValue(mockDownloadUrl); + + const result = await getCommand.execute(request, 'user-123'); + + expect(result).toEqual({ + downloadUrl: mockDownloadUrl + }); + }); + }); +}); diff --git a/source/lambda/files-management/test/factories/files-factory.test.ts b/source/lambda/files-management/test/factories/files-factory.test.ts new file mode 100644 index 00000000..b4532b92 --- /dev/null +++ b/source/lambda/files-management/test/factories/files-factory.test.ts @@ -0,0 +1,499 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { APIGatewayProxyEvent } from 'aws-lambda'; +import { FileRequestFactory } from '../../models/files-factory'; +import { FileOperationTypes } from '../../utils/constants'; +import { FileUploadRequest, FileDeleteRequest, FileGetRequest } from '../../models/types'; + +// Mock utils +jest.mock('../../utils/utils', () => ({ + parseEventBody: jest.fn(), + extractUseCaseId: jest.fn() +})); + +import { parseEventBody, extractUseCaseId } from '../../utils/utils'; + +const createMockAPIGatewayEvent = (overrides: Partial = {}): APIGatewayProxyEvent => ({ + body: null, + headers: {}, + multiValueHeaders: {}, + httpMethod: 'GET', + isBase64Encoded: false, + path: '/test', + pathParameters: null, + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: { + accountId: '123456789012', + apiId: 'test-api', + authorizer: {}, + httpMethod: 'GET', + identity: { sourceIp: '127.0.0.1', userAgent: 'test-agent' } as any, + path: '/test', + protocol: 'HTTP/1.1', + requestId: 'test-request-id', + requestTime: '01/Jan/2023:00:00:00 +0000', + requestTimeEpoch: 1672531200000, + resourceId: 'test-resource', + resourcePath: '/test', + stage: 'test' + }, + resource: '/test', + ...overrides +}); + +describe('FileRequestFactory', () => { + const mockParseEventBody = parseEventBody as jest.MockedFunction; + const mockExtractUseCaseId = extractUseCaseId as jest.MockedFunction; + + beforeEach(() => { + jest.clearAllMocks(); + process.env.AWS_SDK_USER_AGENT = '{ "customUserAgent": "AWSSOLUTION/SO0084/v1.0.0" }'; + process.env.AWS_REGION = 'us-east-1'; + mockExtractUseCaseId.mockReturnValue('test-use-case-123'); + }); + + describe('createRequest', () => { + const mockEvent = createMockAPIGatewayEvent({ + pathParameters: { useCaseId: 'test-use-case-123' } + }); + + it('should create upload request for UPLOAD operation', () => { + const mockBody = { + fileNames: ['test1.txt', 'test2.txt'], + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + const result = FileRequestFactory.createRequest(mockEvent, FileOperationTypes.UPLOAD); + + expect(result).toEqual({ + fileNames: mockBody.fileNames, + conversationId: mockBody.conversationId, + messageId: mockBody.messageId, + useCaseId: 'test-use-case-123' + }); + expect(mockExtractUseCaseId).toHaveBeenCalledWith(mockEvent); + }); + + it('should create delete request for DELETE operation', () => { + const mockBody = { + fileNames: ['test1.txt', 'test2.txt'], + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + const result = FileRequestFactory.createRequest(mockEvent, FileOperationTypes.DELETE); + + expect(result).toEqual({ + fileNames: mockBody.fileNames, + conversationId: mockBody.conversationId, + messageId: mockBody.messageId, + useCaseId: 'test-use-case-123' + }); + expect(mockExtractUseCaseId).toHaveBeenCalledWith(mockEvent); + }); + + it('should create get request for DOWNLOAD operation', () => { + const mockEvent = createMockAPIGatewayEvent({ + pathParameters: { useCaseId: 'test-use-case-123' }, + queryStringParameters: { + fileName: 'test.txt', + conversationId: 'conv-123', + messageId: 'msg-123' + } + }); + + const result = FileRequestFactory.createRequest(mockEvent, FileOperationTypes.DOWNLOAD); + + expect(result).toEqual({ + fileName: 'test.txt', + conversationId: 'conv-123', + messageId: 'msg-123', + useCaseId: 'test-use-case-123' + }); + expect(mockExtractUseCaseId).toHaveBeenCalledWith(mockEvent); + }); + + it('should throw error for unsupported operation', () => { + expect(() => FileRequestFactory.createRequest(mockEvent, 'INVALID_OPERATION')).toThrow( + 'Unsupported file operation: INVALID_OPERATION' + ); + }); + }); + + describe('createUploadRequest', () => { + const mockEvent = createMockAPIGatewayEvent({ + pathParameters: { useCaseId: 'test-use-case-123' } + }); + + it('should create valid upload request with all required fields', () => { + const mockBody = { + fileNames: ['test.txt'], + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + const result = FileRequestFactory.createRequest(mockEvent, FileOperationTypes.UPLOAD) as FileUploadRequest; + + expect(result.fileNames).toEqual(mockBody.fileNames); + expect(result.conversationId).toBe(mockBody.conversationId); + expect(result.messageId).toBe(mockBody.messageId); + expect(result.useCaseId).toBe('test-use-case-123'); + }); + + it('should handle multiple files', () => { + const mockBody = { + fileNames: ['test1.txt', 'test2.pdf', 'test3.jpg'], + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + const result = FileRequestFactory.createRequest(mockEvent, FileOperationTypes.UPLOAD) as FileUploadRequest; + + expect(result.fileNames).toEqual(['test1.txt', 'test2.pdf', 'test3.jpg']); + }); + + it('should throw error when fileNames field is missing', () => { + const mockBody = { + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.UPLOAD)).toThrow( + 'Validation failed: fileNames field is required and must be a non-empty array' + ); + }); + + it('should throw error when fileNames field is not an array', () => { + const mockBody = { + fileNames: 'not-an-array', + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.UPLOAD)).toThrow( + 'Validation failed: fileNames field is required and must be a non-empty array' + ); + }); + + it('should throw error when conversationId is missing', () => { + const mockBody = { + fileNames: ['test.txt'], + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.UPLOAD)).toThrow( + 'Validation failed: conversationId is required' + ); + }); + + it('should throw error when messageId is missing', () => { + const mockBody = { + fileNames: ['test.txt'], + conversationId: 'conv-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.UPLOAD)).toThrow( + 'Validation failed: messageId is required' + ); + }); + + it('should throw error when extractUseCaseId fails', () => { + const mockBody = { + fileNames: ['test.txt'], + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + mockExtractUseCaseId.mockImplementation(() => { + throw new Error('Missing useCaseId in path parameters'); + }); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.UPLOAD)).toThrow( + 'Missing useCaseId in path parameters' + ); + }); + + it('should reject empty fileNames array', () => { + const mockBody = { + fileNames: [], + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.UPLOAD)).toThrow( + 'Validation failed: fileNames field is required and must be a non-empty array' + ); + }); + + it('should throw error when fileNames contains duplicates', () => { + const mockBody = { + fileNames: ['test.txt', 'test2.txt', 'test.txt'], // duplicate 'test.txt' + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.UPLOAD)).toThrow( + 'fileNames field must have unique file name values' + ); + }); + + it('should accept fileNames with all unique values', () => { + const mockBody = { + fileNames: ['test1.txt', 'test2.txt', 'test3.txt'], // all unique + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + const result = FileRequestFactory.createRequest(mockEvent, FileOperationTypes.UPLOAD) as FileUploadRequest; + + expect(result.fileNames).toEqual(['test1.txt', 'test2.txt', 'test3.txt']); + expect(result.conversationId).toBe('conv-123'); + expect(result.messageId).toBe('msg-123'); + expect(result.useCaseId).toBe('test-use-case-123'); + }); + + it('should handle single file in fileNames array', () => { + const mockBody = { + fileNames: ['single-file.txt'], + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + const result = FileRequestFactory.createRequest(mockEvent, FileOperationTypes.UPLOAD) as FileUploadRequest; + + expect(result.fileNames).toEqual(['single-file.txt']); + expect(result.conversationId).toBe('conv-123'); + expect(result.messageId).toBe('msg-123'); + expect(result.useCaseId).toBe('test-use-case-123'); + }); + + it('should throw error with multiple validation failures', () => { + const mockBody = { + fileNames: ['test.txt', 'test.txt'] // duplicate files + // missing conversationId and messageId + }; + + mockParseEventBody.mockReturnValue(mockBody); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.UPLOAD)).toThrow( + 'fileNames field must have unique file name values, conversationId is required, messageId is required' + ); + }); + }); + + describe('createDeleteRequest', () => { + const mockEvent = createMockAPIGatewayEvent({ + pathParameters: { useCaseId: 'test-use-case-123' } + }); + + it('should create valid delete request with all required fields', () => { + const mockBody = { + fileNames: ['test1.txt', 'test2.txt'], + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + const result = FileRequestFactory.createRequest(mockEvent, FileOperationTypes.DELETE) as FileDeleteRequest; + + expect(result.fileNames).toEqual(mockBody.fileNames); + expect(result.conversationId).toBe(mockBody.conversationId); + expect(result.messageId).toBe(mockBody.messageId); + expect(result.useCaseId).toBe('test-use-case-123'); + }); + + it('should throw error when fileNames field is missing', () => { + const mockBody = { + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.DELETE)).toThrow( + 'Validation failed: fileNames field is required and must be a non-empty array' + ); + }); + + it('should throw error when fileNames field is not an array', () => { + const mockBody = { + fileNames: 'not-an-array', + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.DELETE)).toThrow( + 'Validation failed: fileNames field is required and must be a non-empty array' + ); + }); + + it('should reject empty fileNames array', () => { + const mockBody = { + fileNames: [], + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.DELETE)).toThrow( + 'Validation failed: fileNames field is required and must be a non-empty array' + ); + }); + + it('should throw error when fileNames contains duplicates', () => { + const mockBody = { + fileNames: ['file1.txt', 'file2.txt', 'file1.txt'], // duplicate 'file1.txt' + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.DELETE)).toThrow( + 'fileNames field must have unique file name values' + ); + }); + + it('should accept fileNames with all unique values', () => { + const mockBody = { + fileNames: ['file1.txt', 'file2.pdf', 'file3.jpg'], // all unique + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + const result = FileRequestFactory.createRequest(mockEvent, FileOperationTypes.DELETE) as FileDeleteRequest; + + expect(result.fileNames).toEqual(['file1.txt', 'file2.pdf', 'file3.jpg']); + expect(result.conversationId).toBe('conv-123'); + expect(result.messageId).toBe('msg-123'); + expect(result.useCaseId).toBe('test-use-case-123'); + }); + + it('should handle case-sensitive duplicate detection', () => { + const mockBody = { + fileNames: ['File.txt', 'file.txt'], // different case, should be treated as unique + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + mockParseEventBody.mockReturnValue(mockBody); + + const result = FileRequestFactory.createRequest(mockEvent, FileOperationTypes.DELETE) as FileDeleteRequest; + + expect(result.fileNames).toEqual(['File.txt', 'file.txt']); + expect(result.conversationId).toBe('conv-123'); + expect(result.messageId).toBe('msg-123'); + expect(result.useCaseId).toBe('test-use-case-123'); + }); + }); + + describe('createGetRequest', () => { + it('should create valid get request with all required fields', () => { + const mockEvent = createMockAPIGatewayEvent({ + pathParameters: { useCaseId: 'test-use-case-123' }, + queryStringParameters: { + fileName: 'test.txt', + conversationId: 'conv-123', + messageId: 'msg-123' + } + }); + + const result = FileRequestFactory.createRequest(mockEvent, FileOperationTypes.DOWNLOAD) as FileGetRequest; + + expect(result.fileName).toBe('test.txt'); + expect(result.conversationId).toBe('conv-123'); + expect(result.messageId).toBe('msg-123'); + expect(result.useCaseId).toBe('test-use-case-123'); + }); + + it('should throw error when fileName is missing', () => { + const mockEvent = createMockAPIGatewayEvent({ + pathParameters: { useCaseId: 'test-use-case-123' }, + queryStringParameters: { + conversationId: 'conv-123', + messageId: 'msg-123' + } + }); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.DOWNLOAD)).toThrow( + 'Validation failed: fileName is required' + ); + }); + + it('should handle null queryStringParameters', () => { + const mockEvent = createMockAPIGatewayEvent({ + pathParameters: { useCaseId: 'test-use-case-123' }, + queryStringParameters: null + }); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.DOWNLOAD)).toThrow( + 'Validation failed: fileName is required, conversationId is required, messageId is required' + ); + }); + + it('should handle empty string values in query parameters', () => { + const mockEvent = createMockAPIGatewayEvent({ + pathParameters: { useCaseId: 'test-use-case-123' }, + queryStringParameters: { + fileName: '', + conversationId: '', + messageId: '' + } + }); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.DOWNLOAD)).toThrow( + 'Validation failed: fileName is required, conversationId is required, messageId is required' + ); + }); + + it('should handle partial query parameters', () => { + const mockEvent = createMockAPIGatewayEvent({ + pathParameters: { useCaseId: 'test-use-case-123' }, + queryStringParameters: { + fileName: 'test.txt', + conversationId: 'conv-123' + // missing messageId + } + }); + + expect(() => FileRequestFactory.createRequest(mockEvent, FileOperationTypes.DOWNLOAD)).toThrow( + 'Validation failed: messageId is required' + ); + }); + }); +}); diff --git a/source/lambda/files-management/test/index.test.ts b/source/lambda/files-management/test/index.test.ts new file mode 100644 index 00000000..e86ae653 --- /dev/null +++ b/source/lambda/files-management/test/index.test.ts @@ -0,0 +1,438 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { APIGatewayProxyEvent, APIGatewayProxyResult } from 'aws-lambda'; +import { filesHandler, handler, getFileOperation, fileRouteMap, fileCommandsRegistry } from '../index'; +import { FileOperationTypes } from '../utils/constants'; +import { checkEnv, extractUserId, setupMetricsDimensions } from '../utils/utils'; +import { FileValidator } from '../validators/file-validator'; +import { FileRequestFactory } from '../models/files-factory'; +import { formatResponse, formatError } from '../utils/http-response-formatters'; +import { FileUploadCommand, FileDeleteCommand, FileGetCommand } from '../models/file-command'; +import { AMZN_TRACE_ID_HEADER } from '../utils/constants'; +import { metrics } from '../power-tools-init'; + +// Mock all dependencies +jest.mock('../utils/utils', () => ({ + ...jest.requireActual('../utils/utils'), + checkEnv: jest.fn(), + extractUserId: jest.fn(), + setupMetricsDimensions: jest.fn() +})); + +jest.mock('../validators/file-validator'); +jest.mock('../models/files-factory'); +jest.mock('../utils/http-response-formatters'); +jest.mock('../models/file-command'); + +// Mock AWS SDK +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn().mockReturnValue({}) +})); + +jest.mock('@aws-sdk/client-dynamodb'); +jest.mock('@aws-sdk/client-s3'); +jest.mock('@aws-sdk/s3-presigned-post'); + +jest.mock('../power-tools-init', () => ({ + logger: { + info: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + warn: jest.fn() + }, + metrics: { + publishStoredMetrics: jest.fn(), + setDefaultDimensions: jest.fn() + }, + tracer: { + getSegment: jest.fn(), + getRootXrayTraceId: jest.fn().mockReturnValue('test-trace-id'), + captureMethod: jest.fn(() => () => {}), + captureAWSv3Client: jest.fn() + } +})); + +describe('Files Management Index', () => { + // Factory function for creating test events + const createMockEvent = (overrides: Partial = {}): APIGatewayProxyEvent => ({ + httpMethod: 'POST', + resource: '/files/{useCaseId}', + path: '/files/test-use-case-123', + pathParameters: { + useCaseId: 'test-use-case-123' + }, + body: JSON.stringify({ + fileNames: ['test-file.jpg'], + conversationId: 'test-conversation-123', + messageId: 'test-message-123' + }), + headers: { + 'Content-Type': 'application/json' + }, + multiValueHeaders: {}, + isBase64Encoded: false, + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: { + authorizer: { + UserId: 'test-user-123' + }, + requestId: 'test-request-id', + stage: 'test' + } as any, + ...overrides + }); + + const mockEvent = createMockEvent(); + + beforeEach(() => { + jest.clearAllMocks(); + + // Setup environment variables + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-metadata-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-multimodal-bucket'; + process.env.USE_CASE_CONFIG_TABLE_NAME = 'test-config-table'; + process.env._X_AMZN_TRACE_ID = 'test-trace-id'; + + // Setup mocks + (extractUserId as jest.Mock).mockReturnValue('test-user-123'); + (checkEnv as jest.Mock).mockImplementation(() => {}); + (setupMetricsDimensions as jest.Mock).mockImplementation(() => {}); + (formatResponse as jest.Mock).mockImplementation((data, statusCode, headers) => ({ + statusCode, + headers: { 'Content-Type': 'application/json', ...headers }, + body: JSON.stringify(data) + })); + (formatError as jest.Mock).mockReturnValue({ + statusCode: 400, + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ message: 'Error occurred' }) + }); + }); + + afterEach(() => { + delete process.env.MULTIMODAL_METADATA_TABLE_NAME; + delete process.env.MULTIMODAL_DATA_BUCKET; + delete process.env.USE_CASE_CONFIG_TABLE_NAME; + delete process.env._X_AMZN_TRACE_ID; + }); + + describe('getFileOperation', () => { + it('should return correct operation for POST request', () => { + const event = createMockEvent({ + httpMethod: 'POST', + resource: '/files/{useCaseId}' + }); + + const operation = getFileOperation(event); + expect(operation).toBe(FileOperationTypes.UPLOAD); + }); + + it('should return correct operation for DELETE request', () => { + const event = createMockEvent({ + httpMethod: 'DELETE', + resource: '/files/{useCaseId}' + }); + + const operation = getFileOperation(event); + expect(operation).toBe(FileOperationTypes.DELETE); + }); + + it('should return correct operation for GET request', () => { + const event = createMockEvent({ + httpMethod: 'GET', + resource: '/files/{useCaseId}' + }); + + const operation = getFileOperation(event); + expect(operation).toBe(FileOperationTypes.DOWNLOAD); + }); + + it('should throw error for unsupported HTTP method', () => { + const event = createMockEvent({ + httpMethod: 'PUT', + resource: '/files/{useCaseId}' + }); + + expect(() => getFileOperation(event)).toThrow('Unsupported operation: PUT /files/{useCaseId}'); + }); + + it('should throw error for unsupported resource path', () => { + const event = createMockEvent({ + httpMethod: 'POST', + resource: '/invalid/path' + }); + + expect(() => getFileOperation(event)).toThrow('Unsupported operation: POST /invalid/path'); + }); + }); + + describe('filesHandler', () => { + let mockFileValidator: jest.Mocked; + let mockCommand: jest.Mocked; + + beforeEach(() => { + mockFileValidator = { + validateMultimodalCapability: jest.fn() + } as any; + (FileValidator as jest.Mock).mockImplementation(() => mockFileValidator); + + mockCommand = { + execute: jest.fn() + }; + + // Mock FileRequestFactory + (FileRequestFactory.createRequest as jest.Mock).mockReturnValue({ + useCaseId: 'test-use-case-123', + conversationId: 'test-conversation-123', + fileNames: ['test-file.jpg'] + }); + }); + + it('should handle successful file upload operation', async () => { + mockFileValidator.validateMultimodalCapability.mockResolvedValue(undefined); + + // Set up upload response + mockCommand.execute.mockResolvedValue({ + uploads: [ + { + uploadUrl: 'https://test-bucket.s3.amazonaws.com', + formFields: { key: 'test-key' }, + fileName: 'test-file.jpg', + fileKey: 'test-use-case-123/test-user-123/test-conversation-123/test-message-123', + fileUuid: 'test-uuid-123', + fileExtension: 'jpg', + fileContentType: 'image/jpeg', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00.000Z', + error: null + } + ] + }); + + // Mock the command registry to return our mock command + const originalGet = fileCommandsRegistry.get; + fileCommandsRegistry.get = jest.fn().mockReturnValue(mockCommand); + + const result = await filesHandler(mockEvent); + + expect(checkEnv).toHaveBeenCalled(); + expect(extractUserId).toHaveBeenCalledWith(mockEvent); + expect(FileRequestFactory.createRequest).toHaveBeenCalledWith(mockEvent, FileOperationTypes.UPLOAD); + expect(mockFileValidator.validateMultimodalCapability).toHaveBeenCalledWith('test-use-case-123'); + expect(setupMetricsDimensions).toHaveBeenCalledWith('test-use-case-123'); + expect(mockCommand.execute).toHaveBeenCalled(); + expect(result.statusCode).toBe(200); + expect(result.headers).toHaveProperty(AMZN_TRACE_ID_HEADER, 'test-trace-id'); + + const responseBody = JSON.parse(result.body); + expect(responseBody).toHaveProperty('uploads'); + expect(responseBody.uploads).toHaveLength(1); + expect(responseBody.uploads[0]).toHaveProperty('uploadUrl'); + expect(responseBody.uploads[0]).toHaveProperty('fileName', 'test-file.jpg'); + + // Restore original method + fileCommandsRegistry.get = originalGet; + }); + + it('should handle successful file delete operation', async () => { + const deleteEvent = createMockEvent({ + httpMethod: 'DELETE', + resource: '/files/{useCaseId}' + }); + + mockFileValidator.validateMultimodalCapability.mockResolvedValue(undefined); + + // Set up delete response + mockCommand.execute.mockResolvedValue({ + deletions: [ + { + success: true, + fileName: 'test-file.jpg' + } + ], + allSuccessful: true, + failureCount: 0 + }); + + const originalGet = fileCommandsRegistry.get; + fileCommandsRegistry.get = jest.fn().mockReturnValue(mockCommand); + + const result = await filesHandler(deleteEvent); + + expect(FileRequestFactory.createRequest).toHaveBeenCalledWith(deleteEvent, FileOperationTypes.DELETE); + expect(mockCommand.execute).toHaveBeenCalled(); + expect(result.statusCode).toBe(200); + + const responseBody = JSON.parse(result.body); + expect(responseBody).toHaveProperty('deletions'); + expect(responseBody).toHaveProperty('allSuccessful', true); + expect(responseBody).toHaveProperty('failureCount', 0); + + fileCommandsRegistry.get = originalGet; + }); + + it('should handle successful file download operation', async () => { + const downloadEvent = createMockEvent({ + httpMethod: 'GET', + resource: '/files/{useCaseId}' + }); + + mockFileValidator.validateMultimodalCapability.mockResolvedValue(undefined); + + // Set up download response + mockCommand.execute.mockResolvedValue({ + downloadUrl: 'https://test-bucket.s3.amazonaws.com/presigned-download-url' + }); + + const originalGet = fileCommandsRegistry.get; + fileCommandsRegistry.get = jest.fn().mockReturnValue(mockCommand); + + const result = await filesHandler(downloadEvent); + + expect(FileRequestFactory.createRequest).toHaveBeenCalledWith(downloadEvent, FileOperationTypes.DOWNLOAD); + expect(mockCommand.execute).toHaveBeenCalled(); + expect(result.statusCode).toBe(200); + + const responseBody = JSON.parse(result.body); + expect(responseBody).toHaveProperty( + 'downloadUrl', + 'https://test-bucket.s3.amazonaws.com/presigned-download-url' + ); + + fileCommandsRegistry.get = originalGet; + }); + + it('should handle environment validation errors', async () => { + (checkEnv as jest.Mock).mockImplementation(() => { + throw new Error('Missing environment variables'); + }); + + const result = await filesHandler(mockEvent); + + expect(formatError).toHaveBeenCalledWith({ + message: 'Internal Error - Please contact support and quote the following trace id: test-trace-id', + extraHeaders: { [AMZN_TRACE_ID_HEADER]: 'test-trace-id' } + }); + }); + + it('should handle unsupported operation errors', async () => { + const invalidEvent = createMockEvent({ + httpMethod: 'PATCH', + resource: '/files/{useCaseId}' + }); + + const result = await filesHandler(invalidEvent); + + expect(formatError).toHaveBeenCalledWith({ + message: 'Internal Error - Please contact support and quote the following trace id: test-trace-id', + extraHeaders: { [AMZN_TRACE_ID_HEADER]: 'test-trace-id' } + }); + }); + + it('should handle multimodal validation errors', async () => { + mockFileValidator.validateMultimodalCapability.mockRejectedValue( + new Error('Multimodal capability not enabled') + ); + + const originalGet = fileCommandsRegistry.get; + fileCommandsRegistry.get = jest.fn().mockReturnValue(mockCommand); + + const result = await filesHandler(mockEvent); + + expect(formatError).toHaveBeenCalledWith({ + message: 'Internal Error - Please contact support and quote the following trace id: test-trace-id', + extraHeaders: { [AMZN_TRACE_ID_HEADER]: 'test-trace-id' } + }); + + fileCommandsRegistry.get = originalGet; + }); + + it('should handle command execution errors', async () => { + mockFileValidator.validateMultimodalCapability.mockResolvedValue(undefined); + mockCommand.execute.mockRejectedValue(new Error('Command execution failed')); + + const originalGet = fileCommandsRegistry.get; + fileCommandsRegistry.get = jest.fn().mockReturnValue(mockCommand); + + const result = await filesHandler(mockEvent); + + expect(formatError).toHaveBeenCalledWith({ + message: 'Internal Error - Please contact support and quote the following trace id: test-trace-id', + extraHeaders: { [AMZN_TRACE_ID_HEADER]: 'test-trace-id' } + }); + + fileCommandsRegistry.get = originalGet; + }); + + it('should handle missing command for operation', async () => { + mockFileValidator.validateMultimodalCapability.mockResolvedValue(undefined); + + const originalGet = fileCommandsRegistry.get; + fileCommandsRegistry.get = jest.fn().mockReturnValue(undefined); + + const result = await filesHandler(mockEvent); + + expect(formatError).toHaveBeenCalledWith({ + message: 'Internal Error - Please contact support and quote the following trace id: test-trace-id', + extraHeaders: { [AMZN_TRACE_ID_HEADER]: 'test-trace-id' } + }); + + fileCommandsRegistry.get = originalGet; + }); + + it('should handle non-Error exceptions', async () => { + (checkEnv as jest.Mock).mockImplementation(() => { + throw 'String error'; + }); + + const result = await filesHandler(mockEvent); + + expect(formatError).toHaveBeenCalledWith({ + message: 'Internal Error - Please contact support and quote the following trace id: test-trace-id', + extraHeaders: { [AMZN_TRACE_ID_HEADER]: 'test-trace-id' } + }); + expect(result.body).toEqual('{"message":"Error occurred"}'); + }); + + it('should always publish metrics in finally block', async () => { + mockFileValidator.validateMultimodalCapability.mockResolvedValue(undefined); + + const originalGet = fileCommandsRegistry.get; + fileCommandsRegistry.get = jest.fn().mockReturnValue(mockCommand); + + await filesHandler(mockEvent); + + expect(metrics.publishStoredMetrics).toHaveBeenCalled(); + + fileCommandsRegistry.get = originalGet; + }); + + it('should publish metrics even when error occurs', async () => { + (checkEnv as jest.Mock).mockImplementation(() => { + throw new Error('Environment error'); + }); + + await filesHandler(mockEvent); + + expect(metrics.publishStoredMetrics).toHaveBeenCalled(); + }); + }); + + describe('Middleware Integration', () => { + it('should export middy-wrapped handler', () => { + expect(handler).toBeDefined(); + expect(typeof handler).toBe('function'); + }); + + it('should have middleware configuration', () => { + // Verify the handler has middy middleware attached + expect(handler).toHaveProperty('use'); + expect(handler).toHaveProperty('before'); + expect(handler).toHaveProperty('after'); + expect(handler).toHaveProperty('onError'); + }); + }); +}); diff --git a/source/lambda/files-management/test/services/ddb-config-service.test.ts b/source/lambda/files-management/test/services/ddb-config-service.test.ts new file mode 100644 index 00000000..5231cded --- /dev/null +++ b/source/lambda/files-management/test/services/ddb-config-service.test.ts @@ -0,0 +1,195 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn(() => ({ region: 'us-east-1' })) +})); + +import { DdbConfigService } from '../../services/ddb-config-service'; +import { AWSClientManager } from 'aws-sdk-lib'; +import { marshall } from '@aws-sdk/util-dynamodb'; + +// Mock the dependencies +jest.mock('../../power-tools-init', () => ({ + logger: { + debug: jest.fn(), + error: jest.fn(), + info: jest.fn() + }, + tracer: { + captureMethod: () => (target: any, propertyKey: string, descriptor: PropertyDescriptor) => descriptor, + captureAWSv3Client: jest.fn((client) => client) + } +})); + +const mockDynamoClient = { + send: jest.fn() +}; + +describe('DdbConfigService', () => { + let ddbConfigService: DdbConfigService; + + beforeEach(() => { + jest.clearAllMocks(); + process.env.AWS_SDK_USER_AGENT = '{ "customUserAgent": "AWSSOLUTION/SO0084/v1.0.0" }'; + process.env.AWS_REGION = 'us-east-1'; + jest.spyOn(AWSClientManager, 'getServiceClient').mockReturnValue(mockDynamoClient as any); + + // Set up environment variables + process.env.USE_CASE_CONFIG_TABLE_NAME = 'test-llm-config-table'; + process.env.USE_CASES_TABLE_NAME = 'test-use-cases-table'; + + ddbConfigService = new DdbConfigService(); + }); + + afterEach(() => { + delete process.env.USE_CASE_CONFIG_TABLE_NAME; + delete process.env.USE_CASES_TABLE_NAME; + }); + + describe('fetchUseCaseMultimodalityConfig', () => { + const useCaseRecordKey = 'test-record-key'; + + it('should return true when multimodal is enabled', async () => { + const mockItem = { + config: { + LlmParams: { + MultimodalParams: { + MultimodalEnabled: true + } + } + } + }; + + mockDynamoClient.send.mockResolvedValue({ + Item: marshall(mockItem) + }); + + const result = await ddbConfigService.fetchUseCaseMultimodalityConfig(useCaseRecordKey); + + expect(result).toBe(true); + expect(mockDynamoClient.send).toHaveBeenCalledWith( + expect.objectContaining({ + input: expect.objectContaining({ + TableName: 'test-llm-config-table', + Key: marshall({ key: useCaseRecordKey }), + ProjectionExpression: 'config.LlmParams.MultimodalParams' + }) + }) + ); + }); + + it('should return false when multimodal is disabled', async () => { + const mockItem = { + config: { + LlmParams: { + MultimodalParams: { + MultimodalEnabled: false + } + } + } + }; + + mockDynamoClient.send.mockResolvedValue({ + Item: marshall(mockItem) + }); + + const result = await ddbConfigService.fetchUseCaseMultimodalityConfig(useCaseRecordKey); + + expect(result).toBe(false); + }); + + it('should return false when multimodal params are missing', async () => { + const mockItem = { + config: { + LlmParams: {} + } + }; + + mockDynamoClient.send.mockResolvedValue({ + Item: marshall(mockItem) + }); + + const result = await ddbConfigService.fetchUseCaseMultimodalityConfig(useCaseRecordKey); + + expect(result).toBe(false); + }); + + it('should throw error when item is not found', async () => { + mockDynamoClient.send.mockResolvedValue({ + Item: undefined + }); + + await expect(ddbConfigService.fetchUseCaseMultimodalityConfig(useCaseRecordKey)).rejects.toThrow( + 'Failed due to unexpected error.' + ); + }); + + it('should throw error when DynamoDB operation fails', async () => { + mockDynamoClient.send.mockRejectedValue(new Error('DynamoDB error')); + + await expect(ddbConfigService.fetchUseCaseMultimodalityConfig(useCaseRecordKey)).rejects.toThrow( + 'Failed due to unexpected error.' + ); + }); + }); + + describe('fetchUseCaseConfigRecordKey', () => { + const useCaseId = 'test-use-case-id'; + + it('should return use case config record key successfully', async () => { + const mockItem = { + UseCaseConfigRecordKey: 'test-config-key' + }; + + mockDynamoClient.send.mockResolvedValue({ + Item: marshall(mockItem) + }); + + const result = await ddbConfigService.fetchUseCaseConfigRecordKey(useCaseId); + + expect(result).toBe('test-config-key'); + expect(mockDynamoClient.send).toHaveBeenCalledWith( + expect.objectContaining({ + input: expect.objectContaining({ + TableName: 'test-use-cases-table', + Key: marshall({ UseCaseId: useCaseId }), + ProjectionExpression: 'UseCaseConfigRecordKey' + }) + }) + ); + }); + + it('should throw error when use case config is not found', async () => { + mockDynamoClient.send.mockResolvedValue({ + Item: undefined + }); + + await expect(ddbConfigService.fetchUseCaseConfigRecordKey(useCaseId)).rejects.toThrow( + 'Failed due to unexpected error.' + ); + }); + + it('should throw error when UseCaseConfigRecordKey is missing', async () => { + const mockItem = { + SomeOtherField: 'value' + }; + + mockDynamoClient.send.mockResolvedValue({ + Item: marshall(mockItem) + }); + + await expect(ddbConfigService.fetchUseCaseConfigRecordKey(useCaseId)).rejects.toThrow( + 'Failed due to unexpected error.' + ); + }); + + it('should throw error when DynamoDB operation fails', async () => { + mockDynamoClient.send.mockRejectedValue(new Error('DynamoDB error')); + + await expect(ddbConfigService.fetchUseCaseConfigRecordKey(useCaseId)).rejects.toThrow( + 'Failed due to unexpected error.' + ); + }); + }); +}); diff --git a/source/lambda/files-management/test/services/ddb-metadata-service.test.ts b/source/lambda/files-management/test/services/ddb-metadata-service.test.ts new file mode 100644 index 00000000..4aeb35bc --- /dev/null +++ b/source/lambda/files-management/test/services/ddb-metadata-service.test.ts @@ -0,0 +1,485 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn(() => ({ region: 'us-east-1' })) +})); + +jest.mock('aws-sdk-lib', () => { + const mockGetServiceClient = jest.fn(); + return { + AWSClientManager: { + getServiceClient: mockGetServiceClient, + resetClients: jest.fn() + } + }; +}); + +import { MetadataService } from '../../services/ddb-metadata-service'; +import { PutItemCommand, GetItemCommand, UpdateItemCommand } from '@aws-sdk/client-dynamodb'; +import { DeleteObjectCommand } from '@aws-sdk/client-s3'; +import { FileStatus } from '../../utils/constants'; +import { AWSClientManager } from 'aws-sdk-lib'; + +jest.mock('@aws-sdk/client-dynamodb'); +jest.mock('@aws-sdk/client-s3'); +jest.mock('@aws-sdk/util-dynamodb', () => ({ + marshall: jest.fn((data) => ({ marshalled: data })), // todo + unmarshall: jest.fn((data) => { + // Handle test data with DynamoDB attribute format + if (data.unmarshalled) { + return data.unmarshalled; + } + + // Convert DynamoDB attribute format to plain object for tests + const result: any = {}; + for (const [key, value] of Object.entries(data)) { + if (typeof value === 'object' && value !== null) { + if ('S' in value) result[key] = (value as any).S; + else if ('N' in value) result[key] = (value as any).N; + else if ('BOOL' in value) result[key] = (value as any).BOOL; + else result[key] = value; + } else { + result[key] = value; + } + } + return result; + }) +})); +jest.mock('../../power-tools-init', () => ({ + logger: { + debug: jest.fn(), + error: jest.fn(), + info: jest.fn() + }, + tracer: { + captureMethod: () => (target: any, propertyKey: string, descriptor: PropertyDescriptor) => descriptor, + captureAWSv3Client: jest.fn((client) => client) + } +})); + +const mockDynamoSend = jest.fn(); +const mockS3Send = jest.fn(); + +describe('MetadataService', () => { + let metadataService: MetadataService; + const testTableName = 'test-files-metadata-table'; + const testBucketName = 'test-files-bucket'; + const testFileKey = 'use-case-123/user-123/conv-123/msg-123'; + const testFileName = 'test.txt'; + + beforeEach(() => { + jest.clearAllMocks(); + process.env.AWS_SDK_USER_AGENT = '{ "customUserAgent": "AWSSOLUTION/SO0084/v1.0.0" }'; + process.env.AWS_REGION = 'us-east-1'; + process.env.MULTIMODAL_METADATA_TABLE_NAME = testTableName; + process.env.MULTIMODAL_FILES_BUCKET_NAME = testBucketName; + + (AWSClientManager.getServiceClient as jest.Mock).mockImplementation((service: string) => { + if (service === 'dynamodb') { + return { send: mockDynamoSend }; + } else if (service === 's3') { + return { send: mockS3Send }; + } + return { send: jest.fn() }; + }); + + metadataService = new MetadataService(); + }); + + afterEach(() => { + jest.clearAllMocks(); + delete process.env.MULTIMODAL_METADATA_TABLE_NAME; + delete process.env.MULTIMODAL_FILES_BUCKET_NAME; + delete process.env.AWS_SDK_USER_AGENT; + }); + + describe('createFileMetadata', () => { + it('should create file metadata successfully', async () => { + const now = Date.now(); + jest.spyOn(Date, 'now').mockReturnValue(now); + + mockDynamoSend.mockResolvedValue({}); + + await metadataService.createFileMetadata(testFileKey, testFileName, 'txt', 'text/plain', 'uuid-123'); + + expect(mockDynamoSend).toHaveBeenCalledWith(expect.any(PutItemCommand)); + + const putItemCall = mockDynamoSend.mock.calls[0][0] as PutItemCommand; + expect(putItemCall).toBeInstanceOf(PutItemCommand); + expect(mockDynamoSend).toHaveBeenCalledTimes(1); + }); + + it('should handle ConditionalCheckFailedException for duplicate files', async () => { + const error = new Error('ConditionalCheckFailedException'); + error.name = 'ConditionalCheckFailedException'; + mockDynamoSend.mockRejectedValue(error); + + await expect( + metadataService.createFileMetadata(testFileKey, testFileName, 'txt', 'text/plain', 'uuid-123') + ).rejects.toThrow('File already exists with "pending"/"uploaded" status. Upload not allowed.'); + }); + + it('should handle other DynamoDB errors', async () => { + const error = new Error('DynamoDB error'); + mockDynamoSend.mockRejectedValue(error); + + await expect( + metadataService.createFileMetadata(testFileKey, testFileName, 'txt', 'text/plain', 'uuid-123') + ).rejects.toThrow('Failed due to unexpected error.'); + }); + + it('should handle retry logic in createFileMetadata', async () => { + // Mock to fail 3 times then succeed (testing retry logic) + mockDynamoSend + .mockRejectedValueOnce(new Error('Temporary error')) + .mockRejectedValueOnce(new Error('Temporary error')) + .mockRejectedValueOnce(new Error('Temporary error')) + .mockResolvedValueOnce({}); + + await expect( + metadataService.createFileMetadata(testFileKey, testFileName, 'txt', 'text/plain', 'uuid-123') + ).resolves.not.toThrow(); + + expect(mockDynamoSend).toHaveBeenCalledTimes(4); // 3 failures + 1 success + }); + + it('should handle retry exhaustion in createFileMetadata', async () => { + // Mock to fail all retry attempts + mockDynamoSend + .mockRejectedValueOnce(new Error('Persistent error')) + .mockRejectedValueOnce(new Error('Persistent error')) + .mockRejectedValueOnce(new Error('Persistent error')) + .mockRejectedValueOnce(new Error('Persistent error')); + + await expect( + metadataService.createFileMetadata(testFileKey, testFileName, 'txt', 'text/plain', 'uuid-123') + ).rejects.toThrow('Failed due to unexpected error.'); + }); + }); + + describe('getExistingMetadataRecord', () => { + it('should get file metadata successfully', async () => { + const mockFileMetadata = { + fileKey: testFileKey, + fileName: testFileName, + fileUuid: 'uuid-123', + fileExtension: 'txt', + fileContentType: 'text/plain', + status: FileStatus.PENDING + }; + + mockDynamoSend.mockResolvedValue({ Item: { unmarshalled: mockFileMetadata } }); + + const result = await metadataService.getExistingMetadataRecord(testFileKey, testFileName); + const getItemCall = mockDynamoSend.mock.calls[0][0] as GetItemCommand; + + expect(result).toEqual(mockFileMetadata); + expect(mockDynamoSend).toHaveBeenCalledWith(expect.any(GetItemCommand)); + expect(getItemCall).toBeInstanceOf(GetItemCommand); + expect(mockDynamoSend).toHaveBeenCalledTimes(1); + }); + + it('should return null when item not found', async () => { + mockDynamoSend.mockResolvedValue({ Item: null }); + + const result = await metadataService.getExistingMetadataRecord(testFileKey, testFileName); + + expect(result).toBeNull(); + }); + + it('should handle DynamoDB errors', async () => { + const error = new Error('DynamoDB error'); + mockDynamoSend.mockRejectedValue(error); + + await expect(metadataService.getExistingMetadataRecord(testFileKey, testFileName)).rejects.toThrow( + 'Failed due to unexpected error.' + ); + }); + }); + + describe('deleteMultipleFiles', () => { + it('should delete files successfully using individual parallel operations', async () => { + const fileKeys = [ + { fileKey: 'key1', fileName: 'file1.txt' }, + { fileKey: 'key2', fileName: 'file2.pdf' } + ]; + + // Mock GetItemCommand calls for each file (to get existing records) + mockDynamoSend + .mockResolvedValueOnce({ + Item: { + fileKey: { S: 'key1' }, + fileName: { S: 'file1.txt' }, + fileUuid: { S: 'uuid1' }, + fileExtension: { S: 'txt' }, + status: { S: 'uploaded' } + } + }) + .mockResolvedValueOnce({ + Item: { + fileKey: { S: 'key2' }, + fileName: { S: 'file2.pdf' }, + fileUuid: { S: 'uuid2' }, + fileExtension: { S: 'pdf' }, + status: { S: 'uploaded' } + } + }); + + mockS3Send.mockResolvedValue({}); // S3 DeleteObjectCommand call + mockDynamoSend.mockResolvedValueOnce({}).mockResolvedValueOnce({}); // UpdateItemCommand call + + const result = await metadataService.deleteMultipleFiles(fileKeys); + + expect(result).toEqual([ + { success: true, fileName: 'file1.txt', error: undefined }, + { success: true, fileName: 'file2.pdf', error: undefined } + ]); + + expect(mockDynamoSend).toHaveBeenCalledWith(expect.any(GetItemCommand)); + expect(mockS3Send).toHaveBeenCalledWith(expect.any(DeleteObjectCommand)); + expect(mockDynamoSend).toHaveBeenCalledWith(expect.any(UpdateItemCommand)); + expect(mockDynamoSend).toHaveBeenCalledTimes(4); // 2 Gets + 2 Updates + expect(mockS3Send).toHaveBeenCalledTimes(2); // 2 S3 Deletes + }); + + it('should handle empty array gracefully', async () => { + const result = await metadataService.deleteMultipleFiles([]); + + expect(result).toEqual([]); + expect(mockDynamoSend).not.toHaveBeenCalled(); + expect(mockS3Send).not.toHaveBeenCalled(); + }); + + it('should handle individual file failures in parallel operations', async () => { + const fileKeys = [{ fileKey: 'key1', fileName: 'file1.txt' }]; + + // Mock GetItemCommand to fail with retries (4 attempts) + mockDynamoSend.mockRejectedValue(new Error('DynamoDB error')); + + const result = await metadataService.deleteMultipleFiles(fileKeys); + + expect(result).toEqual([ + { success: false, fileName: 'file1.txt', error: 'Failed due to unexpected error.' } + ]); + }); + + it('should handle S3 deletion failure and not update DynamoDB', async () => { + const fileKeys = [{ fileKey: 'key1', fileName: 'file1.txt' }]; + + // Mock successful GetItemCommand + mockDynamoSend.mockResolvedValueOnce({ + Item: { + fileKey: { S: 'key1' }, + fileName: { S: 'file1.txt' }, + fileUuid: { S: 'uuid1' }, + fileExtension: { S: 'txt' } + } + }); + + // Mock S3 deletion to fail with retries + mockS3Send.mockRejectedValue(new Error('S3 deletion failed')); + + const result = await metadataService.deleteMultipleFiles(fileKeys); + + expect(result).toEqual([ + { success: false, fileName: 'file1.txt', error: 'Failed to delete file. Please retry.' } + ]); + + // Should have called GetItemCommand once and DeleteObjectCommand 4 times (retries) + // Should NOT have called UpdateItemCommand + expect(mockDynamoSend).toHaveBeenCalledTimes(1); // Only Get, no Update + expect(mockS3Send).toHaveBeenCalledTimes(4); // 4 retry attempts + }); + + it('should handle missing file records gracefully', async () => { + const fileKeys = [ + { fileKey: 'key1', fileName: 'file1.txt' }, + { fileKey: 'key2', fileName: 'file2.pdf' } + ]; + + // Mock first file exists, second file doesn't exist + mockDynamoSend + .mockResolvedValueOnce({ + Item: { + fileKey: { S: 'key1' }, + fileName: { S: 'file1.txt' }, + fileUuid: { S: 'uuid1' }, + fileExtension: { S: 'txt' } + } + }) + .mockResolvedValueOnce({ Item: null }); // file2 not found + + // Mock S3 deletion for file1 + mockS3Send.mockResolvedValue({}); + + // Mock UpdateItemCommand for file1 + mockDynamoSend.mockResolvedValueOnce({}); + + const result = await metadataService.deleteMultipleFiles(fileKeys); + + expect(result).toEqual([ + { success: true, fileName: 'file1.txt', error: undefined }, + { success: false, fileName: 'file2.pdf', error: 'File not found. Cannot perform deletion.' } + ]); + }); + + it('should handle UpdateItemCommand failures', async () => { + const fileKeys = [{ fileKey: 'key1', fileName: 'file1.txt' }]; + + // Mock successful GetItemCommand + mockDynamoSend.mockResolvedValueOnce({ + Item: { + fileKey: { S: 'key1' }, + fileName: { S: 'file1.txt' }, + fileUuid: { S: 'uuid1' }, + fileExtension: { S: 'txt' } + } + }); + + mockS3Send.mockResolvedValue({}); // Successful S3 deletion + + mockDynamoSend.mockRejectedValue(new Error('Update failed')); // Failed UpdateItemCommand (with retries) + + const result = await metadataService.deleteMultipleFiles(fileKeys); + expect(result).toEqual([{ success: false, fileName: 'file1.txt', error: 'Update failed' }]); + }); + + it('should handle ConditionalCheckFailedException gracefully', async () => { + const fileKeys = [{ fileKey: 'key1', fileName: 'file1.txt' }]; + + // Mock successful GetItemCommand + mockDynamoSend.mockResolvedValueOnce({ + Item: { + fileKey: { S: 'key1' }, + fileName: { S: 'file1.txt' }, + fileUuid: { S: 'uuid1' }, + fileExtension: { S: 'txt' } + } + }); + + mockS3Send.mockResolvedValue({}); // Successful S3 deletion + mockDynamoSend.mockRejectedValue(new Error('ConditionalCheckFailedException: Record was modified')); + + const result = await metadataService.deleteMultipleFiles(fileKeys); + + expect(result).toEqual([ + { + success: false, + fileName: 'file1.txt', + error: 'File record was modified or deleted by another process and is unavailable.' + } + ]); + }); + + it('should process large number of files using individual parallel operations', async () => { + const fileKeys = Array.from({ length: 25 }, (_, i) => ({ + fileKey: `key${i}`, + fileName: `file${i}.txt` + })); + + // Mock all GetItemCommand calls to return existing records + for (let i = 0; i < 25; i++) { + mockDynamoSend.mockResolvedValueOnce({ + Item: { + fileKey: { S: `key${i}` }, + fileName: { S: `file${i}.txt` }, + fileUuid: { S: `uuid${i}` }, + fileExtension: { S: 'txt' } + } + }); + } + + // Mock all S3 DeleteObjectCommand calls to succeed + mockS3Send.mockResolvedValue({}); + + // Mock all UpdateItemCommand calls to succeed + for (let i = 0; i < 25; i++) { + mockDynamoSend.mockResolvedValueOnce({}); + } + + const result = await metadataService.deleteMultipleFiles(fileKeys); + + expect(result).toHaveLength(25); + expect(result.every((r) => r.success)).toBe(true); + expect(mockDynamoSend).toHaveBeenCalledTimes(50); // 25 Gets + 25 Updates + expect(mockS3Send).toHaveBeenCalledTimes(25); // 25 S3 Deletes + }); + + it('should handle mixed success and failure scenarios', async () => { + const fileKeys = [ + { fileKey: 'key1', fileName: 'file1.txt' }, + { fileKey: 'key2', fileName: 'file2.pdf' }, + { fileKey: 'key3', fileName: 'file3.jpg' } + ]; + + // Mock: file1 succeeds, file2 not found, file3 update fails + mockDynamoSend + .mockResolvedValueOnce({ + Item: { + fileKey: { S: 'key1' }, + fileName: { S: 'file1.txt' }, + fileUuid: { S: 'uuid1' }, + fileExtension: { S: 'txt' } + } + }) + .mockResolvedValueOnce({ Item: null }) // file2 not found + .mockResolvedValueOnce({ + Item: { + fileKey: { S: 'key3' }, + fileName: { S: 'file3.jpg' }, + fileUuid: { S: 'uuid3' }, + fileExtension: { S: 'jpg' } + } + }); + + // Mock S3 deletions + mockS3Send.mockResolvedValue({}); + + // Mock DynamoDB updates: file1 succeeds, file3 fails + mockDynamoSend + .mockResolvedValueOnce({}) // file1 update succeeds + .mockRejectedValueOnce(new Error('Update failed for file3')) + .mockRejectedValueOnce(new Error('Update failed for file3')) + .mockRejectedValueOnce(new Error('Update failed for file3')) + .mockRejectedValueOnce(new Error('Update failed for file3')); // file3 update fails (with retries) + + const result = await metadataService.deleteMultipleFiles(fileKeys); + + expect(result).toEqual([ + { success: true, fileName: 'file1.txt', error: undefined }, + { success: false, fileName: 'file2.pdf', error: 'File not found. Cannot perform deletion.' }, + { success: false, fileName: 'file3.jpg', error: 'Update failed for file3' } + ]); + }); + + it('should handle individual file deletion retry with exponential backoff', async () => { + const fileKeys = [{ fileKey: 'key1', fileName: 'file1.txt' }]; + + // Mock successful get + mockDynamoSend.mockResolvedValueOnce({ + Item: { + fileKey: { S: 'key1' }, + fileName: { S: 'file1.txt' }, + fileUuid: { S: 'uuid1' }, + fileExtension: { S: 'txt' } + } + }); + + // Mock successful S3 deletion + mockS3Send.mockResolvedValue({}); + + // Mock throttling on update with eventual success + mockDynamoSend + .mockRejectedValueOnce(new Error('ProvisionedThroughputExceededException')) + .mockRejectedValueOnce(new Error('ProvisionedThroughputExceededException')) + .mockResolvedValueOnce({}); + + const result = await metadataService.deleteMultipleFiles(fileKeys); + + expect(result).toEqual([{ success: true, fileName: 'file1.txt', error: undefined }]); + expect(mockDynamoSend).toHaveBeenCalledTimes(4); // 1 get + 3 update attempts + expect(mockS3Send).toHaveBeenCalledTimes(1); // 1 S3 delete + }); + }); +}); diff --git a/source/lambda/files-management/test/services/s3-presigned-url-service.test.ts b/source/lambda/files-management/test/services/s3-presigned-url-service.test.ts new file mode 100644 index 00000000..728c36bd --- /dev/null +++ b/source/lambda/files-management/test/services/s3-presigned-url-service.test.ts @@ -0,0 +1,333 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn(() => ({ region: 'us-east-1' })) +})); + +import { S3PresignedUrlService } from '../../services/s3-presigned-url-service'; +import { getSignedUrl } from '@aws-sdk/s3-request-presigner'; +import { createPresignedPost } from '@aws-sdk/s3-presigned-post'; +import { AWSClientManager } from 'aws-sdk-lib'; + +jest.mock('@aws-sdk/client-s3'); +jest.mock('@aws-sdk/s3-request-presigner'); +jest.mock('@aws-sdk/s3-presigned-post'); +jest.mock('uuid', () => ({ + v4: jest.fn(() => 'mock-uuid-123') +})); +jest.mock('../../power-tools-init', () => ({ + logger: { + debug: jest.fn(), + error: jest.fn(), + info: jest.fn() + }, + tracer: { + captureMethod: () => (target: any, propertyKey: string, descriptor: PropertyDescriptor) => descriptor, + captureAWSv3Client: jest.fn((client) => client) + } +})); + +// Mock the utility functions from utils +jest.mock('../../utils/utils', () => ({ + ...jest.requireActual('../../utils/utils'), + generateUUID: jest.fn(() => 'mock-uuid-123'), + retryWithBackoff: jest.fn((operation) => operation()), + getRetrySettings: jest.fn(() => ({ maxRetries: 3, baseDelay: 100 })) +})); + +const mockS3Client = { + send: jest.fn() +}; + +const mockGetSignedUrl = getSignedUrl as jest.MockedFunction; +const mockCreatePresignedPost = createPresignedPost as jest.MockedFunction; + +describe('S3PresignedUrlService', () => { + let s3PresignedUrlService: S3PresignedUrlService; + + beforeEach(() => { + jest.clearAllMocks(); + process.env.AWS_SDK_USER_AGENT = '{ "customUserAgent": "AWSSOLUTION/SO0084/v1.0.0" }'; + process.env.AWS_REGION = 'us-east-1'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-bucket'; + process.env.AWS_SDK_USER_AGENT = '{"customUserAgent": "AWSSOLUTION/SO0123/v1.0.0"}'; + jest.spyOn(AWSClientManager, 'getServiceClient').mockReturnValue(mockS3Client as any); + + s3PresignedUrlService = new S3PresignedUrlService(); + }); + + afterEach(() => { + delete process.env.MULTIMODAL_DATA_BUCKET; + delete process.env.AWS_SDK_USER_AGENT; + }); + + describe('createFileUploadPresignedPost', () => { + const testParams = { + useCaseId: 'use-case-123', + userId: 'user-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileName: 'test.txt', + fileExtension: 'txt', + contentType: 'text/plain' + }; + + it('should create presigned post successfully', async () => { + const mockPresignedPost = { + url: 'https://test-bucket.s3.amazonaws.com', + fields: { + key: 'test-key', + policy: 'policy-string' + } + }; + mockCreatePresignedPost.mockResolvedValue(mockPresignedPost); + + const result = await s3PresignedUrlService.createFileUploadPresignedPost(testParams); + + expect(result).toEqual({ + uploadUrl: mockPresignedPost.url, + formFields: mockPresignedPost.fields, + fileName: 'test.txt', + fileKey: 'use-case-123/user-123/conv-123/msg-123', + fileUuid: 'mock-uuid-123', + fileExtension: 'txt', + fileContentType: 'text/plain', + expiresIn: 3600, + createdAt: expect.any(String) + }); + }); + + it('should generate correct S3 key structure', async () => { + const mockPresignedPost = { + url: 'https://test-bucket.s3.amazonaws.com', + fields: {} + }; + mockCreatePresignedPost.mockResolvedValue(mockPresignedPost); + + await s3PresignedUrlService.createFileUploadPresignedPost(testParams); + + expect(mockCreatePresignedPost).toHaveBeenCalledWith( + mockS3Client, + expect.objectContaining({ + Bucket: 'test-bucket', + Key: 'use-case-123/user-123/conv-123/msg-123/mock-uuid-123.txt', + Fields: expect.objectContaining({ + 'Content-Type': 'text/plain', + 'x-amz-meta-userid': 'user-123', + 'x-amz-meta-filename': 'test.txt', + 'x-amz-meta-fileextension': 'txt', + 'x-amz-meta-usecaseid': 'use-case-123', + 'x-amz-meta-conversationid': 'conv-123', + 'x-amz-meta-messageid': 'msg-123', + 'x-amz-meta-source': 'gaab' + }), + Conditions: expect.arrayContaining([ + ['starts-with', '$key', 'use-case-123/user-123/conv-123/msg-123/'], + ['content-length-range', 1, 4718592], + ['eq', '$Content-Type', 'text/plain'] + ]) + }) + ); + }); + + it('should throw error when presigned post creation fails', async () => { + const error = new Error('S3 presigned post error'); + mockCreatePresignedPost.mockRejectedValue(error); + + await expect(s3PresignedUrlService.createFileUploadPresignedPost(testParams)).rejects.toThrow( + 'Failed due to unexpected error.' + ); + }); + + it('should include proper XML tagging in presigned post', async () => { + const mockPresignedPost = { + url: 'https://test-bucket.s3.amazonaws.com', + fields: {} + }; + mockCreatePresignedPost.mockResolvedValue(mockPresignedPost); + + await s3PresignedUrlService.createFileUploadPresignedPost(testParams); + + const call = mockCreatePresignedPost.mock.calls[0][1]; + const tagging = call.Fields!.tagging; + expect(tagging).toContain(''); + expect(tagging).toContain('useCaseIduse-case-123'); + expect(tagging).toContain('uploadedByuser-123'); + expect(tagging).toContain(''); + }); + }); + + describe('generateDownloadUrl', () => { + it('should create download presigned URL successfully', async () => { + const mockUrl = 'https://test-bucket.s3.amazonaws.com/test-key?signature=abc123'; + mockGetSignedUrl.mockResolvedValue(mockUrl); + + const result = await s3PresignedUrlService.generateDownloadUrl( + 'use-case-123/user-123/conv-123/msg-123/uuid-123.txt', + 'test.txt', + 'text/plain' + ); + + expect(result).toBe(mockUrl); + }); + + it('should construct correct S3 key and parameters', async () => { + const mockUrl = 'https://test-bucket.s3.amazonaws.com/test-key?signature=abc123'; + mockGetSignedUrl.mockResolvedValue(mockUrl); + + const result = await s3PresignedUrlService.generateDownloadUrl( + 'use-case-123/user-123/conv-123/msg-123/uuid-123.txt', + 'test file.txt', + 'text/plain' + ); + + expect(result).toBe(mockUrl); + expect(mockGetSignedUrl).toHaveBeenCalledWith( + mockS3Client, + expect.any(Object), // GetObjectCommand instance + { expiresIn: 3600 } + ); + }); + + it('should throw error when URL generation fails', async () => { + const error = new Error('S3 access denied'); + mockGetSignedUrl.mockRejectedValue(error); + + await expect( + s3PresignedUrlService.generateDownloadUrl( + 'use-case-123/user-123/conv-123/msg-123/uuid-123.txt', + 'test.txt', + 'text/plain' + ) + ).rejects.toThrow('Failed due to unexpected error.'); + }); + }); + + describe('Edge cases and error handling', () => { + it('should handle different file extensions correctly', async () => { + const mockPresignedPost = { + url: 'https://test-bucket.s3.amazonaws.com', + fields: {} + }; + mockCreatePresignedPost.mockResolvedValue(mockPresignedPost); + + const testCases = [ + { fileName: 'test.PDF', fileExtension: 'PDF', expectedType: 'application/pdf' }, + { fileName: 'test.JPEG', fileExtension: 'JPEG', expectedType: 'image/jpeg' }, + { + fileName: 'test.docx', + fileExtension: 'docx', + expectedType: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' + } + ]; + + for (const testCase of testCases) { + const result = await s3PresignedUrlService.createFileUploadPresignedPost({ + useCaseId: 'use-case-123', + userId: 'user-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileName: testCase.fileName, + fileExtension: testCase.fileExtension, + contentType: testCase.expectedType + }); + + expect(result.fileExtension).toBe(testCase.fileExtension); + expect(result.fileContentType).toBe(testCase.expectedType); + } + }); + + it('should handle S3 access denied errors gracefully', async () => { + const error = new Error('Access Denied'); + mockCreatePresignedPost.mockRejectedValue(error); + + await expect( + s3PresignedUrlService.createFileUploadPresignedPost({ + useCaseId: 'use-case-123', + userId: 'user-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileName: 'test.txt', + fileExtension: 'txt', + contentType: 'text/plain' + }) + ).rejects.toThrow('Failed due to unexpected error.'); + }); + }); + + describe('Additional comprehensive tests', () => { + it('should validate S3 key structure and security conditions', async () => { + const mockPresignedPost = { + url: 'https://test-bucket.s3.amazonaws.com', + fields: {} + }; + mockCreatePresignedPost.mockResolvedValue(mockPresignedPost); + + await s3PresignedUrlService.createFileUploadPresignedPost({ + useCaseId: 'use-case-123', + userId: 'user-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileName: 'test.txt', + fileExtension: 'txt', + contentType: 'text/plain' + }); + + const call = mockCreatePresignedPost.mock.calls[0][1]; + expect(call.Conditions).toEqual( + expect.arrayContaining([ + ['starts-with', '$key', 'use-case-123/user-123/conv-123/msg-123/'], + ['content-length-range', 1, 4718592], + ['eq', '$x-amz-meta-userid', 'user-123'], + ['eq', '$x-amz-meta-filename', 'test.txt'], + ['eq', '$x-amz-meta-fileextension', 'txt'], + ['eq', '$x-amz-meta-usecaseid', 'use-case-123'], + ['eq', '$x-amz-meta-conversationid', 'conv-123'], + ['eq', '$x-amz-meta-messageid', 'msg-123'], + ['eq', '$x-amz-meta-source', 'gaab'], + ['eq', '$Content-Type', 'text/plain'], + ['eq', '$tagging', expect.any(String)] + ]) + ); + }); + + it('should handle download URL generation with proper parameters', async () => { + const mockUrl = 'https://test-bucket.s3.amazonaws.com/test-key?signature=abc123'; + mockGetSignedUrl.mockResolvedValue(mockUrl); + + const result = await s3PresignedUrlService.generateDownloadUrl( + 'use-case-123/user-123/conv-123/msg-123/uuid-123.txt', + 'test file.txt', + 'text/plain' + ); + + expect(result).toBe(mockUrl); + expect(mockGetSignedUrl).toHaveBeenCalledWith( + mockS3Client, + expect.any(Object), // GetObjectCommand instance + { expiresIn: 3600 } + ); + }); + + it('should validate expiry', async () => { + const mockPresignedPost = { + url: 'https://test-bucket.s3.amazonaws.com', + fields: {} + }; + mockCreatePresignedPost.mockResolvedValue(mockPresignedPost); + + const result = await s3PresignedUrlService.createFileUploadPresignedPost({ + useCaseId: 'use-case-123', + userId: 'user-123', + conversationId: 'conv-123', + messageId: 'msg-123', + fileName: 'test.txt', + fileExtension: 'txt', + contentType: 'text/plain' + }); + + expect(result.expiresIn).toBe(3600); // 1 hour + }); + }); +}); diff --git a/source/lambda/files-management/test/utils/error.test.ts b/source/lambda/files-management/test/utils/error.test.ts new file mode 100644 index 00000000..8cba40a4 --- /dev/null +++ b/source/lambda/files-management/test/utils/error.test.ts @@ -0,0 +1,31 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import RequestValidationError from '../../utils/error'; + +describe('RequestValidationError', () => { + it('should create error with correct message and name', () => { + const errorMessage = 'Test validation error'; + const error = new RequestValidationError(errorMessage); + + expect(error.message).toBe(errorMessage); + expect(error.name).toBe('CustomHttpError'); + expect(error).toBeInstanceOf(Error); + expect(error).toBeInstanceOf(RequestValidationError); + }); + + it('should be throwable and catchable', () => { + const errorMessage = 'Test validation error'; + + expect(() => { + throw new RequestValidationError(errorMessage); + }).toThrow(errorMessage); + + try { + throw new RequestValidationError(errorMessage); + } catch (error) { + expect(error).toBeInstanceOf(RequestValidationError); + expect((error as RequestValidationError).message).toBe(errorMessage); + } + }); +}); diff --git a/source/lambda/files-management/test/utils/http-response-formatter.test.ts b/source/lambda/files-management/test/utils/http-response-formatter.test.ts new file mode 100644 index 00000000..8ed01470 --- /dev/null +++ b/source/lambda/files-management/test/utils/http-response-formatter.test.ts @@ -0,0 +1,103 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { formatResponse, formatError } from '../../utils/http-response-formatters'; + +describe('When formatting messages as HTTP responses', () => { + beforeAll(() => { + process.env.AWS_SDK_USER_AGENT = '{ "customUserAgent": "AWSSOLUTION/SO0999/v9.9.9" }'; + process.env.AWS_REGION = 'us-east-1'; + }); + + it('Should format the message into a default response correctly', () => { + const response = formatResponse('Test response'); + expect(response).toEqual({ + 'statusCode': 200, + 'headers': { + 'Content-Type': 'application/json', + 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + 'Access-Control-Allow-Credentials': 'true', + 'Access-Control-Allow-Origin': '*' // NOSONAR - javascript:S5122 - Domain not known at this point. + }, + 'isBase64Encoded': false, + 'body': 'Test response' + }); + }); + + it('Should format the message into a response correctly with extra headers', () => { + const response = formatResponse({ 'test-body': 'Test response' }, 200, { + 'x-amz-testHeader': 'test-header-value' + }); + expect(response).toEqual({ + 'statusCode': 200, + 'headers': { + 'Content-Type': 'application/json', + 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + 'Access-Control-Allow-Origin': '*', // NOSONAR - javascript:S5122 - Domain not known at this point. + 'Access-Control-Allow-Credentials': 'true', + 'x-amz-testHeader': 'test-header-value' + }, + 'isBase64Encoded': false, + 'body': '{"test-body":"Test response"}' + }); + }); + + afterAll(() => { + delete process.env.AWS_SDK_USER_AGENT; + delete process.env.AWS_REGION; + }); +}); + +describe('When formatting error responses as HTTP responses', () => { + beforeAll(() => { + process.env.AWS_SDK_USER_AGENT = '{ "customUserAgent": "AWSSOLUTION/SO0999/v9.9.9" }'; + process.env.AWS_REGION = 'us-east-1'; + }); + + it('Should format the error into a default response correctly', () => { + const response = formatError({ + message: 'Test Error' + }); + expect(response).toEqual({ + 'statusCode': 400, + 'headers': { + 'Content-Type': 'application/json', + 'x-amzn-ErrorType': 'CustomExecutionError', + 'Access-Control-Allow-Origin': '*', + 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET' + }, + 'isBase64Encoded': false, + 'body': '{"message":"Test Error"}' + }); + }); + + it('Should format a custom error response correctly', () => { + expect( + formatError({ + message: 'Test Error', + originalStatusCode: 417, + extraHeaders: { mockHeader: 'mockValue' } + }) + ).toEqual({ + 'statusCode': 400, + 'headers': { + 'Content-Type': 'application/json', + 'x-amzn-ErrorType': 'CustomExecutionError', + 'Access-Control-Allow-Origin': '*', + 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + 'mockHeader': 'mockValue' + }, + 'isBase64Encoded': false, + 'body': '{"message":"Test Error","originalStatusCode":417}' + }); + }); + + afterAll(() => { + delete process.env.AWS_SDK_USER_AGENT; + delete process.env.AWS_REGION; + }); +}); diff --git a/source/lambda/files-management/test/utils/multimodal-cache.test.ts b/source/lambda/files-management/test/utils/multimodal-cache.test.ts new file mode 100644 index 00000000..3f39a34b --- /dev/null +++ b/source/lambda/files-management/test/utils/multimodal-cache.test.ts @@ -0,0 +1,226 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { MultimodalCache } from '../../utils/multimodal-cache'; +import { MULTIMODAL_CACHE_TTL_MS } from '../../utils/constants'; + +// Mock power tools +jest.mock('../../power-tools-init', () => ({ + logger: { + debug: jest.fn(), + info: jest.fn(), + warn: jest.fn(), + error: jest.fn() + } +})); + +describe('MultimodalCache', () => { + beforeEach(() => { + MultimodalCache.clear(); // Clear cache before each test + jest.clearAllMocks(); + process.env.AWS_SDK_USER_AGENT = '{ "customUserAgent": "AWSSOLUTION/SO0084/v1.0.0" }'; + process.env.AWS_REGION = 'us-east-1'; + }); + + describe('set and get operations', () => { + it('should store and retrieve cached values', () => { + const useCaseId = 'test-use-case-123'; + + MultimodalCache.set(useCaseId, true); + const result = MultimodalCache.get(useCaseId); + + expect(result).toBe(true); + }); + + it('should store and retrieve false values', () => { + const useCaseId = 'test-use-case-123'; + + MultimodalCache.set(useCaseId, false); + const result = MultimodalCache.get(useCaseId); + + expect(result).toBe(false); + }); + + it('should return undefined for non-existent keys', () => { + const result = MultimodalCache.get('non-existent-key'); + expect(result).toBeUndefined(); + }); + + it('should handle multiple use case IDs', () => { + MultimodalCache.set('use-case-1', true); + MultimodalCache.set('use-case-2', false); + MultimodalCache.set('use-case-3', true); + + expect(MultimodalCache.get('use-case-1')).toBe(true); + expect(MultimodalCache.get('use-case-2')).toBe(false); + expect(MultimodalCache.get('use-case-3')).toBe(true); + }); + }); + + describe('TTL expiration', () => { + it('should return undefined for expired entries', () => { + const useCaseId = 'test-use-case-123'; + + // Mock Date.now to simulate time passing + const originalDateNow = Date.now; + const startTime = 1000000; + Date.now = jest.fn(() => startTime); + + MultimodalCache.set(useCaseId, true); + + // Advance time beyond TTL + Date.now = jest.fn(() => startTime + MULTIMODAL_CACHE_TTL_MS + 1000); + + const result = MultimodalCache.get(useCaseId); + expect(result).toBeUndefined(); + + // Restore original Date.now + Date.now = originalDateNow; + }); + + it('should return cached value within TTL', () => { + const useCaseId = 'test-use-case-123'; + + // Mock Date.now to control time + const originalDateNow = Date.now; + const startTime = 1000000; + Date.now = jest.fn(() => startTime); + + MultimodalCache.set(useCaseId, true); + + // Advance time but stay within TTL + Date.now = jest.fn(() => startTime + MULTIMODAL_CACHE_TTL_MS - 1000); + + const result = MultimodalCache.get(useCaseId); + expect(result).toBe(true); + + // Restore original Date.now + Date.now = originalDateNow; + }); + + it('should automatically remove expired entries on get', () => { + const useCaseId = 'test-use-case-123'; + + // Mock Date.now to simulate time passing + const originalDateNow = Date.now; + const startTime = 1000000; + Date.now = jest.fn(() => startTime); + + MultimodalCache.set(useCaseId, true); + + // Verify entry exists + expect(MultimodalCache.getStats().size).toBe(1); + + // Advance time beyond TTL + Date.now = jest.fn(() => startTime + MULTIMODAL_CACHE_TTL_MS + 1000); + + // Get should return undefined and remove expired entry + const result = MultimodalCache.get(useCaseId); + expect(result).toBeUndefined(); + expect(MultimodalCache.getStats().size).toBe(0); + + // Restore original Date.now + Date.now = originalDateNow; + }); + }); + + describe('cleanupExpiredEntries', () => { + it('should remove expired entries', () => { + const originalDateNow = Date.now; + const startTime = 1000000; + Date.now = jest.fn(() => startTime); + + // Add multiple entries + MultimodalCache.set('use-case-1', true); + MultimodalCache.set('use-case-2', false); + MultimodalCache.set('use-case-3', true); + + expect(MultimodalCache.getStats().size).toBe(3); + + // Advance time beyond TTL + Date.now = jest.fn(() => startTime + MULTIMODAL_CACHE_TTL_MS + 1000); + + // Cleanup should remove all expired entries + MultimodalCache.cleanupExpiredEntries(); + expect(MultimodalCache.getStats().size).toBe(0); + + // Restore original Date.now + Date.now = originalDateNow; + }); + + it('should keep non-expired entries', () => { + const originalDateNow = Date.now; + const startTime = 1000000; + Date.now = jest.fn(() => startTime); + + // Add entry + MultimodalCache.set('use-case-1', true); + + // Advance time but stay within TTL + Date.now = jest.fn(() => startTime + MULTIMODAL_CACHE_TTL_MS - 1000); + + // Add another entry (this one is newer) + MultimodalCache.set('use-case-2', false); + + // Advance time to expire first entry but not second + Date.now = jest.fn(() => startTime + MULTIMODAL_CACHE_TTL_MS + 500); + + MultimodalCache.cleanupExpiredEntries(); + + // Only the newer entry should remain + expect(MultimodalCache.getStats().size).toBe(1); + expect(MultimodalCache.get('use-case-1')).toBeUndefined(); + expect(MultimodalCache.get('use-case-2')).toBe(false); + + // Restore original Date.now + Date.now = originalDateNow; + }); + + it('should handle empty cache gracefully', () => { + expect(() => MultimodalCache.cleanupExpiredEntries()).not.toThrow(); + expect(MultimodalCache.getStats().size).toBe(0); + }); + }); + + describe('clear', () => { + it('should remove all entries', () => { + MultimodalCache.set('use-case-1', true); + MultimodalCache.set('use-case-2', false); + MultimodalCache.set('use-case-3', true); + + expect(MultimodalCache.getStats().size).toBe(3); + + MultimodalCache.clear(); + + expect(MultimodalCache.getStats().size).toBe(0); + expect(MultimodalCache.get('use-case-1')).toBeUndefined(); + expect(MultimodalCache.get('use-case-2')).toBeUndefined(); + expect(MultimodalCache.get('use-case-3')).toBeUndefined(); + }); + + it('should handle empty cache gracefully', () => { + expect(() => MultimodalCache.clear()).not.toThrow(); + expect(MultimodalCache.getStats().size).toBe(0); + }); + }); + + describe('getStats', () => { + it('should return correct statistics', () => { + const stats = MultimodalCache.getStats(); + expect(stats).toHaveProperty('size'); + expect(stats).toHaveProperty('keys'); + expect(Array.isArray(stats.keys)).toBe(true); + expect(stats.size).toBe(0); + }); + + it('should return correct size and keys', () => { + MultimodalCache.set('use-case-1', true); + MultimodalCache.set('use-case-2', false); + + const stats = MultimodalCache.getStats(); + expect(stats.size).toBe(2); + expect(stats.keys).toContain('use-case-1'); + expect(stats.keys).toContain('use-case-2'); + }); + }); +}); diff --git a/source/lambda/files-management/test/utils/utils.test.ts b/source/lambda/files-management/test/utils/utils.test.ts new file mode 100644 index 00000000..8a23c077 --- /dev/null +++ b/source/lambda/files-management/test/utils/utils.test.ts @@ -0,0 +1,554 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { + extractUserId, + extractUseCaseId, + checkEnv, + parseEventBody, + delay, + generateUUID, + retryWithBackoff, + setupMetricsDimensions, + extractFileInfo, + getContentTypeFromExtension, + handleLambdaError +} from '../../utils/utils'; +import { APIGatewayEvent } from 'aws-lambda'; +import { + DOCUMENT_CONTENT_TYPES, + IMAGE_CONTENT_TYPES, + REQUIRED_ENV_VARS, + MAX_INPUT_PAYLOAD_SIZE, + AMZN_TRACE_ID_HEADER +} from '../../utils/constants'; +import RequestValidationError from '../../utils/error'; +import { logger as mockLogger, metrics as mockMetrics } from '../../power-tools-init'; +import { formatError as mockFormatError } from '../../utils/http-response-formatters'; + +jest.mock('../../power-tools-init', () => ({ + logger: { + info: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + warn: jest.fn() + }, + metrics: { + addMetric: jest.fn(), + setDefaultDimensions: jest.fn() + }, + tracer: { + getRootXrayTraceId: jest.fn().mockReturnValue('test-trace-id') + } +})); + +jest.mock('../../validators/file-validator', () => ({ + FileValidator: jest.fn().mockImplementation(() => ({ + validateMultimodalEnabled: jest.fn() + })) +})); + +jest.mock('../../utils/http-response-formatters', () => ({ + formatError: jest.fn().mockReturnValue({ + statusCode: 500, + body: JSON.stringify({ message: 'Mocked error' }) + }) +})); + +// Import mocked functions +// Helper function to create a minimal valid APIGatewayEvent mock +const createMockAPIGatewayEvent = (overrides: Partial = {}): APIGatewayEvent => ({ + body: null, + headers: {}, + multiValueHeaders: {}, + httpMethod: 'GET', + isBase64Encoded: false, + path: '/test', + pathParameters: null, + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: { + accountId: '123456789012', + apiId: 'test-api', + authorizer: {}, + httpMethod: 'GET', + identity: { sourceIp: '127.0.0.1', userAgent: 'test-agent' } as any, + path: '/test', + protocol: 'HTTP/1.1', + requestId: 'test-request-id', + requestTime: '01/Jan/2023:00:00:00 +0000', + requestTimeEpoch: 1672531200000, + resourceId: 'test-resource', + resourcePath: '/test', + stage: 'test' + }, + resource: '/test', + ...overrides +}); + +describe('Utils - Extraction Functions', () => { + beforeEach(() => { + jest.clearAllMocks(); + process.env.AWS_SDK_USER_AGENT = '{ "customUserAgent": "AWSSOLUTION/SO0084/v1.0.0" }'; + process.env.AWS_REGION = 'us-east-1'; + }); + + describe('extractUserId', () => { + it('should extract userId from authorizer context', () => { + const mockEvent = createMockAPIGatewayEvent({ + requestContext: { + ...createMockAPIGatewayEvent().requestContext, + authorizer: { + UserId: 'user-123' + } + } + }); + + const result = extractUserId(mockEvent); + expect(result).toBe('user-123'); + }); + + it('should throw error when requestContext is missing', () => { + const mockEvent = createMockAPIGatewayEvent({ + requestContext: undefined as any + }); + expect(() => extractUserId(mockEvent)).toThrow('Missing authorizer context in API Gateway event'); + }); + + it('should throw error when authorizer is missing', () => { + const mockEvent = createMockAPIGatewayEvent({ + requestContext: { + ...createMockAPIGatewayEvent().requestContext, + authorizer: undefined as any + } + }); + expect(() => extractUserId(mockEvent)).toThrow('Missing authorizer context in API Gateway event'); + }); + + it('should throw error when UserId is missing', () => { + const mockEvent = createMockAPIGatewayEvent({ + requestContext: { + ...createMockAPIGatewayEvent().requestContext, + authorizer: {} + } + }); + expect(() => extractUserId(mockEvent)).toThrow('Missing UserId in authorizer context'); + }); + + it('should throw error when UserId is empty string', () => { + const mockEvent = createMockAPIGatewayEvent({ + requestContext: { + ...createMockAPIGatewayEvent().requestContext, + authorizer: { + UserId: '' + } + } + }); + expect(() => extractUserId(mockEvent)).toThrow('Missing UserId in authorizer context'); + }); + }); + + describe('extractUseCaseId', () => { + it('should extract useCaseId from path parameters', () => { + const mockEvent = createMockAPIGatewayEvent({ + pathParameters: { + useCaseId: 'test-use-case-123' + } + }); + + const result = extractUseCaseId(mockEvent); + expect(result).toBe('test-use-case-123'); + }); + + it('should throw error when pathParameters is null', () => { + const mockEvent = createMockAPIGatewayEvent({ + pathParameters: null + }); + expect(() => extractUseCaseId(mockEvent)).toThrow('Missing useCaseId in path parameters'); + }); + + it('should throw error when useCaseId is missing from pathParameters', () => { + const mockEvent = createMockAPIGatewayEvent({ + pathParameters: { + someOtherParam: 'value' + } + }); + expect(() => extractUseCaseId(mockEvent)).toThrow('Missing useCaseId in path parameters'); + }); + + it('should throw error when useCaseId is empty string', () => { + const mockEvent = createMockAPIGatewayEvent({ + pathParameters: { + useCaseId: '' + } + }); + expect(() => extractUseCaseId(mockEvent)).toThrow('Missing useCaseId in path parameters'); + }); + + it('should handle multiple path parameters', () => { + const mockEvent = createMockAPIGatewayEvent({ + pathParameters: { + useCaseId: 'test-use-case-123', + otherParam: 'other-value', + anotherParam: 'another-value' + } + }); + + const result = extractUseCaseId(mockEvent); + expect(result).toBe('test-use-case-123'); + }); + }); + + describe('checkEnv', () => { + const originalEnv = {}; + + beforeEach(() => { + jest.resetModules(); + process.env = { ...originalEnv }; + process.env.AWS_SDK_USER_AGENT = '{ "customUserAgent": "AWSSOLUTION/SO0084/v1.0.0" }'; + process.env.AWS_REGION = 'us-east-1'; + }); + + afterEach(() => { + process.env = originalEnv; + }); + + it('should pass when all required environment variables are set', () => { + REQUIRED_ENV_VARS.forEach((envVar) => { + process.env[envVar] = 'test-value'; + }); + + expect(() => checkEnv()).not.toThrow(); + }); + + it('should throw error when required environment variables are missing', () => { + delete process.env[REQUIRED_ENV_VARS[0]]; + + expect(() => checkEnv()).toThrow(`Missing required environment variables: ${REQUIRED_ENV_VARS[0]}`); + }); + + it('should throw error when multiple environment variables are missing', () => { + delete process.env[REQUIRED_ENV_VARS[0]]; + delete process.env[REQUIRED_ENV_VARS[1]]; + + expect(() => checkEnv()).toThrow( + `Missing required environment variables: ${REQUIRED_ENV_VARS[0]}, ${REQUIRED_ENV_VARS[1]}` + ); + }); + }); + + describe('parseEventBody', () => { + it('should parse valid JSON body', () => { + const mockEvent = createMockAPIGatewayEvent({ + body: JSON.stringify({ test: 'value' }) + }); + + const result = parseEventBody(mockEvent); + + expect(result).toEqual({ test: 'value' }); + }); + + it('should parse empty body as empty object', () => { + const mockEvent = createMockAPIGatewayEvent({ + body: '{}' + }); + + const result = parseEventBody(mockEvent); + + expect(result).toEqual({}); + }); + + it('should handle null body', () => { + const mockEvent = createMockAPIGatewayEvent({ + body: null + }); + + const result = parseEventBody(mockEvent); + + expect(result).toEqual({}); + }); + + it('should throw error for body exceeding size limit', () => { + const largeBody = 'x'.repeat(MAX_INPUT_PAYLOAD_SIZE + 1); + const mockEvent = createMockAPIGatewayEvent({ + body: largeBody + }); + + expect(() => parseEventBody(mockEvent)).toThrow(RequestValidationError); + expect(() => parseEventBody(mockEvent)).toThrow('Request body exceeds maximum allowed size'); + }); + + it('should throw error for invalid JSON', () => { + const mockEvent = createMockAPIGatewayEvent({ + body: '{ invalid json' + }); + + expect(() => parseEventBody(mockEvent)).toThrow(RequestValidationError); + expect(() => parseEventBody(mockEvent)).toThrow('Invalid JSON in request body'); + }); + + it('should throw error for non-object JSON', () => { + const mockEvent = createMockAPIGatewayEvent({ + body: '"string value"' + }); + + expect(() => parseEventBody(mockEvent)).toThrow(RequestValidationError); + expect(() => parseEventBody(mockEvent)).toThrow('Invalid request body format'); + }); + + it('should throw error for array JSON', () => { + const mockEvent = createMockAPIGatewayEvent({ + body: '[1, 2, 3]' + }); + + expect(() => parseEventBody(mockEvent)).toThrow(RequestValidationError); + expect(() => parseEventBody(mockEvent)).toThrow('Invalid request body format'); + }); + }); + + describe('delay', () => { + it('should delay for specified milliseconds', async () => { + const start = Date.now(); + await delay(100); + const end = Date.now(); + + expect(end - start).toBeGreaterThanOrEqual(90); + }); + }); + + describe('generateUUID', () => { + it('should generate full UUID by default', () => { + const uuid = generateUUID(); + + expect(uuid).toMatch(/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/); + }); + + it('should generate short UUID when requested', () => { + const shortUuid = generateUUID(true); + + expect(shortUuid).toMatch(/^[0-9a-f]{8}$/); + }); + + it('should generate different UUIDs on multiple calls', () => { + const uuid1 = generateUUID(); + const uuid2 = generateUUID(); + + expect(uuid1).not.toBe(uuid2); + }); + }); + + describe('retryWithBackoff', () => { + it('should succeed on first attempt', async () => { + const operation = jest.fn().mockResolvedValue('success'); + const retrySettings = { maxRetries: 3, backOffRate: 2, initialDelayMs: 10 }; + + const result = await retryWithBackoff(operation, retrySettings); + + expect(result).toBe('success'); + expect(operation).toHaveBeenCalledTimes(1); + }); + + it('should retry on failure and eventually succeed', async () => { + const operation = jest + .fn() + .mockRejectedValueOnce(new Error('fail 1')) + .mockRejectedValueOnce(new Error('fail 2')) + .mockResolvedValue('success'); + const retrySettings = { maxRetries: 3, backOffRate: 2, initialDelayMs: 10 }; + + const result = await retryWithBackoff(operation, retrySettings); + + expect(result).toBe('success'); + expect(operation).toHaveBeenCalledTimes(3); + }); + + it('should throw last error after max retries', async () => { + const finalError = new Error('final failure'); + const operation = jest.fn().mockRejectedValue(finalError); + const retrySettings = { maxRetries: 2, backOffRate: 2, initialDelayMs: 10 }; + + await expect(retryWithBackoff(operation, retrySettings)).rejects.toThrow('final failure'); + expect(operation).toHaveBeenCalledTimes(3); // initial + 2 retries + }); + }); + + describe('setupMetricsDimensions', () => { + it('should set metrics dimensions correctly', () => { + setupMetricsDimensions('use-case-123'); + + expect(mockMetrics.setDefaultDimensions).toHaveBeenCalledWith({ + 'UseCaseId': 'use-case-123' + }); + }); + }); + + describe('extractFileInfo', () => { + it('should extract file info for PDF file', () => { + const result = extractFileInfo('document.pdf'); + + expect(result).toEqual({ + fileName: 'document.pdf', + fileExtension: 'pdf', + contentType: DOCUMENT_CONTENT_TYPES['pdf'] + }); + }); + + it('should extract file info for image file', () => { + const result = extractFileInfo('image.png'); + + expect(result).toEqual({ + fileName: 'image.png', + fileExtension: 'png', + contentType: IMAGE_CONTENT_TYPES['png'] + }); + }); + + it('should throw error for file with no extension', () => { + expect(() => extractFileInfo('filename')).toThrow(RequestValidationError); + expect(() => extractFileInfo('filename')).toThrow( + 'File "filename" has no extension. All files must have a valid extension.' + ); + }); + + it('should throw error for file with empty extension after dot', () => { + expect(() => extractFileInfo('filename.')).toThrow(RequestValidationError); + expect(() => extractFileInfo('filename.')).toThrow( + 'File "filename." has no extension. All files must have a valid extension.' + ); + }); + + it('should trim whitespace from filename', () => { + const result = extractFileInfo(' document.txt '); + + expect(result).toEqual({ + fileName: 'document.txt', + fileExtension: 'txt', + contentType: DOCUMENT_CONTENT_TYPES['txt'] + }); + }); + + it('should throw error for unsupported file extension', () => { + expect(() => extractFileInfo('malicious.exe')).toThrow(RequestValidationError); + expect(() => extractFileInfo('malicious.exe')).toThrow( + 'File extension "exe" is not supported. Supported extensions: png, jpg, jpeg, gif, webp, pdf, csv, doc, docx, xls, xlsx, html, txt, md' + ); + }); + + it('should throw error for unsupported file extension with multiple dots', () => { + expect(() => extractFileInfo('file.name.exe')).toThrow(RequestValidationError); + expect(() => extractFileInfo('file.name.exe')).toThrow('File extension "exe" is not supported'); + }); + + it('should reject uppercase extensions for supported file types', () => { + expect(() => extractFileInfo('DOCUMENT.PDF')).toThrow(RequestValidationError); + expect(() => extractFileInfo('DOCUMENT.PDF')).toThrow('File extension "PDF" is not supported'); + }); + + it('should throw error for unsupported extensions', () => { + expect(() => extractFileInfo('MALICIOUS.EXE')).toThrow(RequestValidationError); + expect(() => extractFileInfo('MALICIOUS.EXE')).toThrow('File extension "EXE" is not supported'); + }); + + it('should pass validation for all supported image extensions', () => { + const imageExtensions = ['png', 'jpg', 'jpeg', 'gif', 'webp']; + + imageExtensions.forEach((ext) => { + const result = extractFileInfo(`image.${ext}`); + expect(result.fileExtension).toBe(ext); + expect(result.contentType).toBe(IMAGE_CONTENT_TYPES[ext]); + }); + }); + + it('should pass validation for all supported document extensions', () => { + const documentExtensions = ['pdf', 'csv', 'doc', 'docx', 'xls', 'xlsx', 'html', 'txt', 'md']; + + documentExtensions.forEach((ext) => { + const result = extractFileInfo(`document.${ext}`); + expect(result.fileExtension).toBe(ext); + expect(result.contentType).toBe(DOCUMENT_CONTENT_TYPES[ext]); + }); + }); + }); + + describe('getContentTypeFromExtension', () => { + it('should return correct content type for document extensions', () => { + Object.entries(DOCUMENT_CONTENT_TYPES).forEach(([ext, contentType]) => { + expect(getContentTypeFromExtension(ext)).toBe(contentType); + }); + }); + + it('should return correct content type for image extensions', () => { + Object.entries(IMAGE_CONTENT_TYPES).forEach(([ext, contentType]) => { + expect(getContentTypeFromExtension(ext)).toBe(contentType); + }); + }); + + it('should throw error for unknown extensions', () => { + expect(() => getContentTypeFromExtension('unknown')).toThrow( + 'Unsupported file extension: unknown. This indicates a validation error.' + ); + }); + + it('should reject uppercase extensions', () => { + expect(() => getContentTypeFromExtension('PDF')).toThrow('Unsupported file extension: PDF'); + expect(() => getContentTypeFromExtension('PNG')).toThrow('Unsupported file extension: PNG'); + }); + }); + + describe('handleLambdaError', () => { + it('should handle RequestValidationError', () => { + const error = new RequestValidationError('Validation failed'); + + const result = handleLambdaError(error, 'test-action', 'TestContext'); + + expect(mockFormatError).toHaveBeenCalledWith({ + message: expect.stringContaining('Request Validation Error'), + extraHeaders: { [AMZN_TRACE_ID_HEADER]: 'test-trace-id' } + }); + expect(result).toBeDefined(); + }); + + it('should handle generic errors', () => { + const error = new Error('Generic error'); + + const result = handleLambdaError(error, 'test-action', 'TestContext'); + + expect(mockFormatError).toHaveBeenCalledWith({ + message: expect.stringContaining('Internal Error'), + extraHeaders: { [AMZN_TRACE_ID_HEADER]: 'test-trace-id' } + }); + expect(result).toBeDefined(); + }); + + it('should handle errors without context', () => { + const error = new Error('Generic error'); + + const result = handleLambdaError(error, 'test-action'); + + expect(mockFormatError).toHaveBeenCalledWith({ + message: expect.stringContaining('Internal Error'), + extraHeaders: { [AMZN_TRACE_ID_HEADER]: 'test-trace-id' } + }); + expect(result).toBeDefined(); + }); + + it('should log validation errors correctly', () => { + const error = new RequestValidationError('Validation failed'); + + handleLambdaError(error, 'test-action', 'TestContext'); + + expect(mockLogger.error).toHaveBeenCalledWith( + expect.stringContaining('Validation of TestContext request failed with error') + ); + }); + + it('should log generic errors correctly', () => { + const error = new Error('Generic error'); + + handleLambdaError(error, 'test-action', 'TestContext'); + + expect(mockLogger.error).toHaveBeenCalledWith(expect.stringContaining('TestContext Management Error')); + }); + }); +}); diff --git a/source/lambda/files-management/test/validators/file-validator.test.ts b/source/lambda/files-management/test/validators/file-validator.test.ts new file mode 100644 index 00000000..05399e51 --- /dev/null +++ b/source/lambda/files-management/test/validators/file-validator.test.ts @@ -0,0 +1,213 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { FileValidator } from '../../validators/file-validator'; +import { MultimodalCache } from '../../utils/multimodal-cache'; +import { + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, + USE_CASES_TABLE_NAME_ENV_VAR, + MULTIMODAL_ENABLED_ENV_VAR, + CloudWatchMetrics +} from '../../utils/constants'; +import { DdbConfigService } from '../../services/ddb-config-service'; +import { metrics } from '../../power-tools-init'; + +jest.mock('../../services/ddb-config-service', () => ({ + DdbConfigService: jest.fn().mockImplementation(() => ({ + fetchUseCaseConfigRecordKey: jest.fn(), + fetchUseCaseMultimodalityConfig: jest.fn() + })) +})); +jest.mock('../../utils/multimodal-cache', () => ({ + MultimodalCache: { + get: jest.fn(), + set: jest.fn(), + cleanupExpiredEntries: jest.fn() + } +})); +jest.mock('../../power-tools-init', () => ({ + logger: { + info: jest.fn(), + warn: jest.fn(), + error: jest.fn(), + debug: jest.fn() + }, + tracer: { + captureAWSv3Client: jest.fn(), + captureMethod: () => (target: any, propertyKey: string, descriptor: PropertyDescriptor) => descriptor + }, + metrics: { + addMetric: jest.fn() + } +})); + +describe('FileValidator', () => { + let fileValidator: FileValidator; + let mockDdbConfigService: jest.Mocked; + let mockMultimodalCache: jest.Mocked; + + beforeEach(() => { + process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] = 'test-use-case-config-table'; + process.env[USE_CASES_TABLE_NAME_ENV_VAR] = 'test-use-cases-table'; + + mockMultimodalCache = MultimodalCache as jest.Mocked; + + mockDdbConfigService = { + fetchUseCaseConfigRecordKey: jest.fn(), + fetchUseCaseMultimodalityConfig: jest.fn() + }; + + (DdbConfigService as jest.MockedClass).mockImplementation(() => mockDdbConfigService); + + fileValidator = new FileValidator(); + }); + + afterEach(() => { + jest.clearAllMocks(); + delete process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]; + delete process.env[USE_CASES_TABLE_NAME_ENV_VAR]; + delete process.env[MULTIMODAL_ENABLED_ENV_VAR]; + }); + + describe('validateMultimodalCapability', () => { + const testUseCaseId = 'test-use-case-123'; + + beforeEach(() => { + // Reset cache mocks + mockMultimodalCache.get.mockReturnValue(undefined); + mockMultimodalCache.set.mockClear(); + mockMultimodalCache.cleanupExpiredEntries.mockClear(); + }); + + describe('cache behavior', () => { + it('should return early if cache has enabled=true', async () => { + mockMultimodalCache.get.mockReturnValue(true); + + await expect(fileValidator.validateMultimodalCapability(testUseCaseId)).resolves.not.toThrow(); + + expect(mockMultimodalCache.get).toHaveBeenCalledWith(testUseCaseId); + expect(mockDdbConfigService.fetchUseCaseConfigRecordKey).not.toHaveBeenCalled(); + }); + + it('should throw error if cache has enabled=false', async () => { + mockMultimodalCache.get.mockReturnValue(false); + + await expect(fileValidator.validateMultimodalCapability(testUseCaseId)).rejects.toThrow( + `Multimodal functionality is not enabled for use case: ${testUseCaseId}` + ); + + expect(mockMultimodalCache.get).toHaveBeenCalledWith(testUseCaseId); + expect(mockDdbConfigService.fetchUseCaseConfigRecordKey).not.toHaveBeenCalled(); + }); + + it('should cleanup expired entries when cache miss', async () => { + mockMultimodalCache.get.mockReturnValue(undefined); + process.env.MULTIMODAL_ENABLED = 'true'; + + await fileValidator.validateMultimodalCapability(testUseCaseId); + + expect(mockMultimodalCache.cleanupExpiredEntries).toHaveBeenCalled(); + }); + }); + + describe('environment variable validation', () => { + it('should pass when MULTIMODAL_ENABLED env var is true', async () => { + mockMultimodalCache.get.mockReturnValue(undefined); // Cache miss + process.env[MULTIMODAL_ENABLED_ENV_VAR] = 'true'; + + await expect(fileValidator.validateMultimodalCapability(testUseCaseId)).resolves.not.toThrow(); + + expect(mockMultimodalCache.set).toHaveBeenCalledWith(testUseCaseId, true); + }); + + it('should throw error when MULTIMODAL_ENABLED env var is false', async () => { + mockMultimodalCache.get.mockReturnValue(undefined); // Cache miss + process.env[MULTIMODAL_ENABLED_ENV_VAR] = 'false'; + + await expect(fileValidator.validateMultimodalCapability(testUseCaseId)).rejects.toThrow( + `Multimodal functionality is not enabled for use case: ${testUseCaseId}` + ); + + expect(mockMultimodalCache.set).toHaveBeenCalledWith(testUseCaseId, false); + }); + }); + + describe('database validation', () => { + beforeEach(() => { + mockMultimodalCache.get.mockReturnValue(undefined); // Cache miss + delete process.env[MULTIMODAL_ENABLED_ENV_VAR]; // Force database lookup + process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] = 'test-use-case-config-table'; + }); + + it('should validate through database when env var not set', async () => { + mockDdbConfigService.fetchUseCaseConfigRecordKey.mockResolvedValue('test-record-key'); + mockDdbConfigService.fetchUseCaseMultimodalityConfig.mockResolvedValue(true); + + await expect(fileValidator.validateMultimodalCapability(testUseCaseId)).resolves.not.toThrow(); + + expect(mockDdbConfigService.fetchUseCaseConfigRecordKey).toHaveBeenCalledWith(testUseCaseId); + expect(mockDdbConfigService.fetchUseCaseMultimodalityConfig).toHaveBeenCalledWith('test-record-key'); + expect(mockMultimodalCache.set).toHaveBeenCalledWith(testUseCaseId, true); + }); + + it('should throw error when use case config not found', async () => { + mockDdbConfigService.fetchUseCaseConfigRecordKey.mockRejectedValue( + new Error(`Use case configuration not found for useCaseId: ${testUseCaseId}`) + ); + + await expect(fileValidator.validateMultimodalCapability(testUseCaseId)).rejects.toThrow( + `Use case configuration not found for useCaseId: ${testUseCaseId}` + ); + }); + + it('should throw error when multimodal is disabled in database', async () => { + mockDdbConfigService.fetchUseCaseConfigRecordKey.mockResolvedValue('test-record-key'); + mockDdbConfigService.fetchUseCaseMultimodalityConfig.mockResolvedValue(false); + + await expect(fileValidator.validateMultimodalCapability(testUseCaseId)).rejects.toThrow( + `Multimodal functionality is not enabled for use case: ${testUseCaseId}` + ); + + expect(mockMultimodalCache.set).toHaveBeenCalledWith(testUseCaseId, false); + }); + + it('should throw error when neither env vars are available', async () => { + // Create a new FileValidator instance without the USE_CASES_TABLE_NAME_ENV_VAR + delete process.env[USE_CASES_TABLE_NAME_ENV_VAR]; + const newFileValidator = new FileValidator(); + + await expect(newFileValidator.validateMultimodalCapability(testUseCaseId)).rejects.toThrow( + `Neither ${MULTIMODAL_ENABLED_ENV_VAR} nor ${USE_CASES_TABLE_NAME_ENV_VAR} environment variables are available` + ); + }); + + it('should handle database errors gracefully', async () => { + const error = new Error('DynamoDB connection failed'); + mockDdbConfigService.fetchUseCaseConfigRecordKey.mockRejectedValue(error); + + await expect(fileValidator.validateMultimodalCapability(testUseCaseId)).rejects.toThrow( + 'Multimodal validation failed: DynamoDB connection failed' + ); + }); + }); + + describe('metrics', () => { + it('should add metric when multimodal is disabled', async () => { + mockMultimodalCache.get.mockReturnValue(false); + + await expect(fileValidator.validateMultimodalCapability(testUseCaseId)).rejects.toThrow(); + + expect(metrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.MULTIMODAL_DISABLED_ERROR, 'Count', 1); + }); + + it('should add metric when env var is false', async () => { + mockMultimodalCache.get.mockReturnValue(undefined); + process.env[MULTIMODAL_ENABLED_ENV_VAR] = 'false'; + + await expect(fileValidator.validateMultimodalCapability(testUseCaseId)).rejects.toThrow(); + + expect(metrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.MULTIMODAL_DISABLED_ERROR, 'Count', 1); + }); + }); + }); +}); diff --git a/source/lambda/files-management/test/validators/request-validators.test.ts b/source/lambda/files-management/test/validators/request-validators.test.ts new file mode 100644 index 00000000..d7939963 --- /dev/null +++ b/source/lambda/files-management/test/validators/request-validators.test.ts @@ -0,0 +1,258 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { validateFileUploadRequest, validateFileDeleteRequest } from '../../validators/request-validators'; +import { FileUploadRequest, FileDeleteRequest } from '../../models/types'; +import RequestValidationError from '../../utils/error'; +import { FILE_OPERATION_CONSTRAINTS } from '../../utils/constants'; + +describe('Request Validators', () => { + describe('validateFileUploadRequest', () => { + const baseRequest: FileUploadRequest = { + useCaseId: 'test-use-case', + conversationId: 'test-conversation', + messageId: 'test-message', + fileNames: ['test1.txt'] + }; + + it('should pass validation for valid request with single file', () => { + expect(() => validateFileUploadRequest(baseRequest)).not.toThrow(); + }); + + it('should pass validation for valid request with multiple files within limit', () => { + const request: FileUploadRequest = { + ...baseRequest, + fileNames: Array.from( + { length: FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_UPLOAD_REQUEST }, + (_, i) => `test${i + 1}.txt` + ) + }; + + expect(() => validateFileUploadRequest(request)).not.toThrow(); + }); + + it('should throw error when files exceed maximum limit', () => { + const request: FileUploadRequest = { + ...baseRequest, + fileNames: Array.from( + { length: FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_UPLOAD_REQUEST + 1 }, + (_, i) => `test${i + 1}.txt` + ) + }; + + expect(() => validateFileUploadRequest(request)).toThrow(RequestValidationError); + expect(() => validateFileUploadRequest(request)).toThrow( + `Too many files. Maximum ${FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_UPLOAD_REQUEST} files allowed per request` + ); + }); + + it('should pass validation for empty files array', () => { + const request: FileUploadRequest = { + ...baseRequest, + fileNames: [] + }; + + expect(() => validateFileUploadRequest(request)).not.toThrow(); + }); + + it('should pass validation at exact limit boundary', () => { + const request: FileUploadRequest = { + ...baseRequest, + fileNames: Array.from( + { length: FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_UPLOAD_REQUEST }, + (_, i) => `test${i + 1}.txt` + ) + }; + + expect(() => validateFileUploadRequest(request)).not.toThrow(); + }); + + it('should validate with different file types', () => { + const request: FileUploadRequest = { + ...baseRequest, + fileNames: ['document.pdf', 'image.jpg', 'data.csv'] + }; + + expect(() => validateFileUploadRequest(request)).not.toThrow(); + }); + }); + + describe('validateFileDeleteRequest', () => { + const baseRequest: FileDeleteRequest = { + useCaseId: 'test-use-case', + conversationId: 'test-conversation', + messageId: 'test-message', + fileNames: ['test1.txt'] + }; + + it('should pass validation for valid request with single file', () => { + expect(() => validateFileDeleteRequest(baseRequest)).not.toThrow(); + }); + + it('should pass validation for valid request with multiple files within limit', () => { + const request: FileDeleteRequest = { + ...baseRequest, + fileNames: Array.from( + { length: FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_DELETE_REQUEST }, + (_, i) => `test${i + 1}.txt` + ) + }; + + expect(() => validateFileDeleteRequest(request)).not.toThrow(); + }); + + it('should throw error when files exceed maximum limit', () => { + const request: FileDeleteRequest = { + ...baseRequest, + fileNames: Array.from( + { length: FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_DELETE_REQUEST + 1 }, + (_, i) => `test${i + 1}.txt` + ) + }; + + expect(() => validateFileDeleteRequest(request)).toThrow(RequestValidationError); + expect(() => validateFileDeleteRequest(request)).toThrow( + `Too many files to delete. Maximum ${FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_DELETE_REQUEST} files allowed per request` + ); + }); + + it('should pass validation at exact limit boundary', () => { + const request: FileDeleteRequest = { + ...baseRequest, + fileNames: Array.from( + { length: FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_DELETE_REQUEST }, + (_, i) => `test${i + 1}.txt` + ) + }; + + expect(() => validateFileDeleteRequest(request)).not.toThrow(); + }); + + it('should validate with different file name patterns', () => { + const request: FileDeleteRequest = { + ...baseRequest, + fileNames: [ + 'document.pdf', + 'image-with-dashes.jpg', + 'file_with_underscores.txt', + 'file with spaces.docx', + 'file.with.multiple.dots.csv', + 'UPPERCASE.TXT', + 'file123.json' + ] + }; + + expect(() => validateFileDeleteRequest(request)).not.toThrow(); + }); + + it('should validate with very long file names', () => { + const longFileName = 'a'.repeat(255) + '.txt'; + const request: FileDeleteRequest = { + ...baseRequest, + fileNames: [longFileName] + }; + + expect(() => validateFileDeleteRequest(request)).not.toThrow(); + }); + }); + + describe('Error handling', () => { + it('should throw RequestValidationError with correct name', () => { + const request: FileUploadRequest = { + useCaseId: 'test', + conversationId: 'test', + messageId: 'test', + fileNames: Array.from( + { length: FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_UPLOAD_REQUEST + 1 }, + (_, i) => `test${i}.txt` + ) + }; + + try { + validateFileUploadRequest(request); + fail('Expected RequestValidationError to be thrown'); + } catch (error) { + expect(error).toBeInstanceOf(RequestValidationError); + expect((error as RequestValidationError).name).toBe('CustomHttpError'); + } + }); + + it('should throw RequestValidationError for delete request with correct name', () => { + const request: FileDeleteRequest = { + useCaseId: 'test', + conversationId: 'test', + messageId: 'test', + fileNames: Array.from( + { length: FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_DELETE_REQUEST + 1 }, + (_, i) => `test${i}.txt` + ) + }; + + try { + validateFileDeleteRequest(request); + fail('Expected RequestValidationError to be thrown'); + } catch (error) { + expect(error).toBeInstanceOf(RequestValidationError); + expect((error as RequestValidationError).name).toBe('CustomHttpError'); + } + }); + }); + + describe('Boundary testing', () => { + it('should handle edge case with exactly maximum files for upload', () => { + const request: FileUploadRequest = { + useCaseId: 'test', + conversationId: 'test', + messageId: 'test', + fileNames: Array.from( + { length: FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_UPLOAD_REQUEST }, + (_, i) => `boundary-test-${i}.txt` + ) + }; + + expect(() => validateFileUploadRequest(request)).not.toThrow(); + }); + + it('should handle edge case with exactly maximum files for delete', () => { + const request: FileDeleteRequest = { + useCaseId: 'test', + conversationId: 'test', + messageId: 'test', + fileNames: Array.from( + { length: FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_DELETE_REQUEST }, + (_, i) => `boundary-test-${i}.txt` + ) + }; + + expect(() => validateFileDeleteRequest(request)).not.toThrow(); + }); + + it('should fail with one file over the upload limit', () => { + const request: FileUploadRequest = { + useCaseId: 'test', + conversationId: 'test', + messageId: 'test', + fileNames: Array.from( + { length: FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_UPLOAD_REQUEST + 1 }, + (_, i) => `over-limit-${i}.txt` + ) + }; + + expect(() => validateFileUploadRequest(request)).toThrow(RequestValidationError); + }); + + it('should fail with one file over the delete limit', () => { + const request: FileDeleteRequest = { + useCaseId: 'test', + conversationId: 'test', + messageId: 'test', + fileNames: Array.from( + { length: FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_DELETE_REQUEST + 1 }, + (_, i) => `over-limit-${i}.txt` + ) + }; + + expect(() => validateFileDeleteRequest(request)).toThrow(RequestValidationError); + }); + }); +}); diff --git a/source/lambda/files-management/tsconfig.json b/source/lambda/files-management/tsconfig.json new file mode 100644 index 00000000..96392e96 --- /dev/null +++ b/source/lambda/files-management/tsconfig.json @@ -0,0 +1,85 @@ +{ + "compilerOptions": { + "target": "ES2018", + "module": "commonjs", + "lib": [ + "es2018", + "dom" + ], + "declaration": true, + "strict": true, + "noImplicitAny": true, + "strictNullChecks": true, + "noImplicitThis": true, + "alwaysStrict": true, + "noUnusedLocals": false, + "noUnusedParameters": false, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": false, + "inlineSourceMap": true, + "inlineSources": true, + "experimentalDecorators": true, + "strictPropertyInitialization": false, + "typeRoots": [ + "./node_modules/@types" + ], + "esModuleInterop": true, + "resolveJsonModule": true, + "outDir": "./dist", + "moduleResolution": "Node", + "rootDir": ".", + "paths": { + "aws-sdk-lib": [ + "../layers/aws-sdk-lib/dist" + ], + "aws-node-user-agent-config": [ + "../layers/aws-node-user-agent-config/dist" + ], + "@middy/core": [ + "../layers/aws-node-user-agent-config/node_modules/@middy/core" + ], + "@smithy/types": [ + "../layers/aws-sdk-lib/node_modules/@smithy/types" + ], + "aws-lambda": [ + "../layers/aws-sdk-lib/node_modules/@types/aws-lambda" + ], + "@aws-lambda-powertools/logger": [ + "../layers/aws-node-user-agent-config/node_modules/@aws-lambda-powertools/logger" + ], + "@aws-lambda-powertools/logger/middleware": [ + "../layers/aws-node-user-agent-config/node_modules/@aws-lambda-powertools/logger/lib/cjs/middleware/middy.d.ts" + ], + "@aws-lambda-powertools/tracer": [ + "../layers/aws-node-user-agent-config/node_modules/@aws-lambda-powertools/tracer" + ], + "@aws-lambda-powertools/tracer/middleware": [ + "../layers/aws-node-user-agent-config/node_modules/@aws-lambda-powertools/tracer/lib/cjs/middleware/middy.d.ts" + ], + "@aws-lambda-powertools/metrics": [ + "../layers/aws-node-user-agent-config/node_modules/@aws-lambda-powertools/metrics" + ], + "@aws-lambda-powertools/metrics/middleware": [ + "../layers/aws-node-user-agent-config/node_modules/@aws-lambda-powertools/metrics/lib/cjs/middleware/middy.d.ts" + ], + "@aws-sdk/client-dynamodb": [ + "../layers/aws-sdk-lib/node_modules/@aws-sdk/client-dynamodb" + ], + "@aws-sdk/client-s3": [ + "../layers/aws-sdk-lib/node_modules/@aws-sdk/client-s3" + ], + "@aws-sdk/s3-presigned-post": [ + "../layers/aws-sdk-lib/node_modules/@aws-sdk/s3-presigned-post" + ], + "@aws-sdk/s3-request-presigner": [ + "../layers/aws-sdk-lib/node_modules/@aws-sdk/s3-request-presigner" + ], + "@aws-sdk/util-dynamodb": [ + "../layers/aws-sdk-lib/node_modules/@aws-sdk/util-dynamodb" + ], + "@aws-sdk/types": [ + "../layers/aws-sdk-lib/node_modules/@aws-sdk/types" + ], + } + } +} \ No newline at end of file diff --git a/source/lambda/files-management/utils/constants.ts b/source/lambda/files-management/utils/constants.ts new file mode 100644 index 00000000..2dd5ed94 --- /dev/null +++ b/source/lambda/files-management/utils/constants.ts @@ -0,0 +1,106 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export const RETRY_CONFIG = { + maxRetries: 3, + backOffRate: 2, + initialDelayMs: 1000 +}; + +export enum FileOperationTypes { + UPLOAD = 'UPLOAD', + DOWNLOAD = 'DOWNLOAD', + DELETE = 'DELETE' +} + +export enum FileStatus { + PENDING = 'pending', + UPLOADED = 'uploaded', + DELETED = 'deleted', + INVALID = 'invalid' +} + +export enum CloudWatchNamespace { + FILE_HANDLING = 'Solution/FileHandling' +} + +// Environment variables +export const MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR = 'MULTIMODAL_METADATA_TABLE_NAME'; +export const MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR = 'MULTIMODAL_DATA_BUCKET'; +export const USE_CASE_CONFIG_TABLE_NAME_ENV_VAR = 'USE_CASE_CONFIG_TABLE_NAME'; +export const USE_CASES_TABLE_NAME_ENV_VAR = 'USE_CASES_TABLE_NAME'; +export const MULTIMODAL_ENABLED_ENV_VAR = 'MULTIMODAL_ENABLED'; + +export const REQUIRED_ENV_VARS = [ + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR, + MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR, + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR +]; + +// X-Ray trace ID header for error tracking +export const AMZN_TRACE_ID_HEADER = '_X_AMZN_TRACE_ID'; + +// Cache TTL for multimodal validation results (5 minutes) +export const MULTIMODAL_CACHE_TTL_MS = 5 * 60 * 1000; + +// Maximum input payload size (same as use-case-management) +export const MAX_INPUT_PAYLOAD_SIZE = 6 * 1024 * 1024; // 6MB + +// File upload constraints (similar to MCP schema upload constraints) +export const MULTIMODAL_FILE_UPLOAD_CONSTRAINTS = { + MIN_FILE_SIZE_BYTES: 1, + MAX_FILE_SIZE_BYTES: 4.5 * 1024 * 1024, // 4.5MB max per file + PRESIGNED_URL_EXPIRY_SECONDS: 3600, // 1 hour + TTL_SECONDS: 10 * 60 // 10 minutes for automatic cleanup +}; + +// File operation limits +export const FILE_OPERATION_CONSTRAINTS = { + MAX_FILES_PER_UPLOAD_REQUEST: 25, + MAX_FILES_PER_DELETE_REQUEST: 25, + DELETION_RECORD_TTL_SECONDS: 300 // 5 minutes +}; + +// MIME content type mappings for file extensions +export const IMAGE_CONTENT_TYPES: Record = { + 'png': 'image/png', + 'jpg': 'image/jpeg', + 'jpeg': 'image/jpeg', + 'gif': 'image/gif', + 'webp': 'image/webp' +}; + +export const DOCUMENT_CONTENT_TYPES: Record = { + 'pdf': 'application/pdf', + 'csv': 'text/csv', + 'doc': 'application/msword', + 'docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', + 'xls': 'application/vnd.ms-excel', + 'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + 'html': 'text/html', + 'txt': 'text/plain', + 'md': 'text/markdown' +}; + +export const SUPPORTED_MULTIMODAL_FILE_EXTENSIONS = [ + ...Object.keys(IMAGE_CONTENT_TYPES), + ...Object.keys(DOCUMENT_CONTENT_TYPES) +]; + +export const DEFAULT_CONTENT_TYPE = 'application/octet-stream'; + +// CloudWatch metrics for file operations +export enum CloudWatchMetrics { + // Upload metrics + FILE_UPLOAD_TRIGGERED = 'FileUploadTriggered', + FILE_UPLOAD_FAILURE = 'FileUploadFailure', + + // Access and validation failures + FILE_ACCESS_FAILURES = 'FileAccessFailures', + FILE_VALIDATION_ERROR = 'FileValidationError', + MULTIMODAL_DISABLED_ERROR = 'MultimodalDisabledError', + + // Operation counts + FILE_DELETE = 'FileDelete', + FILE_DOWNLOAD = 'FileDownload' +} diff --git a/source/lambda/files-management/utils/error.ts b/source/lambda/files-management/utils/error.ts new file mode 100644 index 00000000..48061f6a --- /dev/null +++ b/source/lambda/files-management/utils/error.ts @@ -0,0 +1,9 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export default class RequestValidationError extends Error { + constructor(readonly message: string) { + super(message); + this.name = 'CustomHttpError'; + } +} diff --git a/source/lambda/files-management/utils/http-response-formatters.ts b/source/lambda/files-management/utils/http-response-formatters.ts new file mode 100644 index 00000000..afb647c7 --- /dev/null +++ b/source/lambda/files-management/utils/http-response-formatters.ts @@ -0,0 +1,75 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +/** + * Utility function to convert any success response into a Http 200/201 response with + * proper formatting and headers. + * + * @param {any} body Response message. This will be stringified and inserted into 'body' + * @param {number} statusCode HTTP status code for the response (default: 200) + * @param {[key: string]: string} extraHeaders any extra headers to include in response. + * any key in extraHeaders will override any header in the defaultHeaders with the same key. + * @returns + */ +export const formatResponse = ( + body: string | { [key: string]: any }, + statusCode: number = 200, + extraHeaders: { [key: string]: string } = {} +) => { + const defaultHeaders = { + 'Content-Type': 'application/json', + 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + 'Access-Control-Allow-Credentials': 'true', + 'Access-Control-Allow-Origin': '*' // NOSONAR - javascript:S5122 - Domain not known at this point. + }; + const headers = typeof extraHeaders === 'undefined' ? defaultHeaders : { ...defaultHeaders, ...extraHeaders }; + body = typeof body === 'string' ? body : JSON.stringify(body); + + return { + 'statusCode': statusCode, + 'headers': headers, + 'isBase64Encoded': false, + 'body': body + }; +}; + +/** + * Formats an error object into a HTTP response with an error status code. + * If error is a string, it is converted to a Object with parameter key `message`. + * Always sends a 400 error response for security reasons (masking 500 errors). + * + * @param {message} Error message + * @param {originalStatusCode} Original error status code (will be masked as 400) + * @param {extraHeaders} any extra headers to include in response. + * @returns + */ +export const formatError = ({ + message, + originalStatusCode, + extraHeaders +}: { + message: string; + originalStatusCode?: number; + extraHeaders?: { [key: string]: string }; +}) => { + const defaultHeaders = { + 'Content-Type': 'application/json', + 'x-amzn-ErrorType': 'CustomExecutionError', + 'Access-Control-Allow-Origin': '*', // NOSONAR - javascript:S5122 - Domain not known at this point. + 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET' + }; + + // For security reasons, we mask all errors as 400 Bad Request + // This prevents information leakage about internal server errors + return { + statusCode: 400, + headers: { + ...defaultHeaders, + ...extraHeaders + }, + isBase64Encoded: false, + body: JSON.stringify({ message, originalStatusCode }) + }; +}; diff --git a/source/lambda/files-management/utils/multimodal-cache.ts b/source/lambda/files-management/utils/multimodal-cache.ts new file mode 100644 index 00000000..8d231e3a --- /dev/null +++ b/source/lambda/files-management/utils/multimodal-cache.ts @@ -0,0 +1,90 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { logger } from '../power-tools-init'; +import { MULTIMODAL_CACHE_TTL_MS } from './constants'; + +interface CacheEntry { + enabled: boolean; + timestamp: number; +} + +/** + * Simple in-memory cache for multimodal validation results + * Using Map for simplicity since this lambda doesn't need the advanced features of node-cache + */ +class MultimodalCache { + private static cache = new Map(); + + /** + * Get a cached multimodal validation result + * @param useCaseId - The use case ID to check + * @returns boolean if cached and valid, undefined if not cached or expired + */ + public static get(useCaseId: string): boolean | undefined { + const cachedEntry = this.cache.get(useCaseId); + + if (cachedEntry && Date.now() - cachedEntry.timestamp < MULTIMODAL_CACHE_TTL_MS) { + logger.debug(`Multimodal capability retrieved from cache for useCaseId: ${useCaseId}`); + return cachedEntry.enabled; + } + + // Entry is expired or doesn't exist + if (cachedEntry) { + this.cache.delete(useCaseId); + logger.debug(`Expired cache entry removed for useCaseId: ${useCaseId}`); + } + + return undefined; + } + + /** + * Set a multimodal validation result in cache + * @param useCaseId - The use case ID + * @param enabled - Whether multimodal is enabled + */ + public static set(useCaseId: string, enabled: boolean): void { + this.cache.set(useCaseId, { enabled, timestamp: Date.now() }); + logger.debug(`Multimodal capability cached for useCaseId: ${useCaseId}, enabled: ${enabled}`); + } + + /** + * Clean up expired entries from the cache + * Called periodically to prevent memory leaks + */ + public static cleanupExpiredEntries(): void { + const now = Date.now(); + const expiredKeys: string[] = []; + + for (const [key, entry] of this.cache.entries()) { + if (now - entry.timestamp >= MULTIMODAL_CACHE_TTL_MS) { + expiredKeys.push(key); + } + } + + expiredKeys.forEach((key) => this.cache.delete(key)); + + if (expiredKeys.length > 0) { + logger.debug(`Cleaned up ${expiredKeys.length} expired cache entries`); + } + } + + /** + * Clear all cache entries + */ + public static clear(): void { + this.cache.clear(); + } + + /** + * Get cache statistics + */ + public static getStats(): { size: number; keys: string[] } { + return { + size: this.cache.size, + keys: Array.from(this.cache.keys()) + }; + } +} + +export { MultimodalCache }; diff --git a/source/lambda/files-management/utils/utils.ts b/source/lambda/files-management/utils/utils.ts new file mode 100644 index 00000000..4870212f --- /dev/null +++ b/source/lambda/files-management/utils/utils.ts @@ -0,0 +1,275 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { APIGatewayEvent } from 'aws-lambda'; +import { randomUUID } from 'crypto'; +import { logger, tracer, metrics } from '../power-tools-init'; +import { formatError } from './http-response-formatters'; +import RequestValidationError from './error'; +import { RetrySettings, FileUploadInfo } from '../models/types'; +import { + AMZN_TRACE_ID_HEADER, + REQUIRED_ENV_VARS, + RETRY_CONFIG, + MAX_INPUT_PAYLOAD_SIZE, + IMAGE_CONTENT_TYPES, + DOCUMENT_CONTENT_TYPES, + SUPPORTED_MULTIMODAL_FILE_EXTENSIONS +} from './constants'; + +/** + * Validates that required environment variables are set + * @param requiredVars - Array of required environment variable names + */ +export const checkEnv = () => { + let missingVars = []; + for (let envVar of REQUIRED_ENV_VARS) { + if (!process.env[envVar]) { + missingVars.push(envVar); + } + } + if (missingVars.length > 0) { + const errMsg = `Missing required environment variables: ${missingVars.join( + ', ' + )}. This should not happen and indicates an issue with your deployment.`; + logger.error(errMsg); + throw new Error(errMsg); + } +}; + +/** + * Generic error handler for Lambda operations + * @param error - The error that occurred + * @param action - The action that was being performed + * @param context - Optional context for error messages (e.g., 'multimodality', 'use case') + * @returns Formatted error response + */ +export const handleLambdaError = (error: unknown, action: string, context: string = ''): any => { + const rootTraceId = tracer.getRootXrayTraceId(); + let errorMessage; + const contextPrefix = context ? `${context} ` : ''; + + if (error instanceof RequestValidationError) { + logger.error(`Validation of ${contextPrefix} request failed with error: ${error}`); + logger.error( + `Error while validating ${contextPrefix} request for action: ${action}, root trace id: ${rootTraceId}` + ); + errorMessage = `Request Validation Error - Please contact support and quote the following trace id: ${rootTraceId}`; + } else { + logger.error(`${contextPrefix} Management Error: ${error}`); + logger.error(`Error while executing ${contextPrefix} action: ${action}, root trace id: ${rootTraceId}`); + errorMessage = `Internal Error - Please contact support and quote the following trace id: ${rootTraceId}`; + } + + return formatError({ + message: errorMessage, + extraHeaders: { [AMZN_TRACE_ID_HEADER]: rootTraceId as string } + }); +}; + +/** + * Safely parses API Gateway event body with basic validations + * @param event - API Gateway event + * @returns Parsed and validated event body + * @throws RequestValidationError if validation fails + */ +export const parseEventBody = (event: APIGatewayEvent): any => { + const body = event.body || '{}'; + + if (body.length > MAX_INPUT_PAYLOAD_SIZE) { + logger.error(`Request body too large: ${body.length} bytes (max: ${MAX_INPUT_PAYLOAD_SIZE})`); + throw new RequestValidationError('Request body exceeds maximum allowed size'); + } + + if (typeof body !== 'string') { + logger.error('Request body must be a string'); + throw new RequestValidationError('Invalid request body format'); + } + + let parsed: any; + try { + parsed = JSON.parse(body); + } catch (error) { + logger.error(`Failed to parse JSON: ${(error as Error).message}`); + throw new RequestValidationError('Invalid JSON in request body'); + } + + if (typeof parsed !== 'object' || parsed === null || Array.isArray(parsed)) { + // Validate it's an object (not primitive or array) + throw new RequestValidationError('Invalid request body format'); + } + + return parsed; +}; + +/** + * Gets retry settings for DynamoDB operations + * @returns RetrySettings object with default values + */ +export function getRetrySettings(): RetrySettings { + return { + maxRetries: RETRY_CONFIG.maxRetries, + backOffRate: RETRY_CONFIG.backOffRate, + initialDelayMs: RETRY_CONFIG.initialDelayMs + }; +} + +/** + * Delays execution for the specified number of milliseconds + * @param delayMillis - Number of milliseconds to delay + * @returns Promise that resolves after the delay + */ +export function delay(delayMillis: number): Promise { + return new Promise((resolve) => setTimeout(resolve, delayMillis)); +} + +/** + * Extracts user ID from API Gateway event context + * @param event - The API Gateway event + * @returns The user ID from the authorizer context + * @throws Error if authorizer context or UserId is missing + */ +export function extractUserId(event: APIGatewayEvent): string { + if (!event.requestContext?.authorizer) { + throw new Error('Missing authorizer context in API Gateway event'); + } + + const userId = event.requestContext.authorizer.UserId; + if (!userId) { + throw new Error('Missing UserId in authorizer context'); + } + + return userId; +} + +/** + * Extracts use case ID from API Gateway event path parameters + * @param event - The API Gateway event + * @returns The use case ID from path parameters + * @throws Error if useCaseId is missing from path parameters + */ +export function extractUseCaseId(event: APIGatewayEvent): string { + const useCaseId = event.pathParameters?.useCaseId; + if (!useCaseId) { + throw new Error('Missing useCaseId in path parameters'); + } + + return useCaseId; +} + +/** + * Generates a UUID v4 string using the native crypto.randomUUID() method + * @param shortUUID - Optional flag to return only the first segment of the UUID + * @returns A UUID v4 string (full or shortened) + */ +export function generateUUID(shortUUID: boolean = false): string { + const generatedUuid = randomUUID(); + if (shortUUID) { + return generatedUuid.split('-')[0]; + } + return generatedUuid; +} + +/** + * Retry function with exponential backoff + * @param operation - The operation to retry + * @param retrySettings - Retry configuration + * @returns Promise with operation result + */ +export const retryWithBackoff = async (operation: () => Promise, retrySettings: RetrySettings): Promise => { + let lastError: Error; + let delayMs = retrySettings.initialDelayMs; + + let attempt = 0; + do { + try { + return await operation(); + } catch (error) { + lastError = error as Error; + + if (attempt === retrySettings.maxRetries) { + break; + } + + logger.warn( + `Operation failed, retrying in ${delayMs}ms (attempt ${attempt + 1}/${retrySettings.maxRetries + 1}): ${lastError.message}` + ); + + await delay(delayMs); + delayMs *= retrySettings.backOffRate; + } + attempt++; + } while (attempt <= retrySettings.maxRetries); + + throw lastError; +}; + +/** + * Sets up CloudWatch metrics dimensions for file operations + * @param useCaseId - Use case ID + */ +export const setupMetricsDimensions = (useCaseId: string): void => { + const dimensions: Record = { + 'UseCaseId': useCaseId + }; + + metrics.setDefaultDimensions(dimensions); + logger.debug(`Metrics dimensions set: ${JSON.stringify(dimensions)}`); +}; + +/** + * Extracts file information from a file name + * @param fileName - The file name to extract information from + * @returns FileUploadInfo object with extracted information + * @throws RequestValidationError if file has no extension or unsupported extension + */ +export const extractFileInfo = (fileName: string): FileUploadInfo => { + const trimmedFileName = fileName.trim(); + + // Check if file has no extension (no dot or empty extension after dot) + if (!trimmedFileName.includes('.')) { + throw new RequestValidationError( + `File "${trimmedFileName}" has no extension. All files must have a valid extension.` + ); + } + + const fileExtension = trimmedFileName.split('.').pop()?.trim() || ''; + + if (!fileExtension) { + throw new RequestValidationError( + `File "${trimmedFileName}" has no extension. All files must have a valid extension.` + ); + } + + // Validate file extension is supported + if (!SUPPORTED_MULTIMODAL_FILE_EXTENSIONS.includes(fileExtension)) { + throw new RequestValidationError( + `File extension "${fileExtension}" is not supported. Supported extensions: ${SUPPORTED_MULTIMODAL_FILE_EXTENSIONS.join(', ')}` + ); + } + + // Determine content type based on file extension + const contentType = getContentTypeFromExtension(fileExtension); + + return { + fileName: trimmedFileName, + fileExtension, + contentType + }; +}; + +/** + * Gets content type from file extension + * @param extension - A supported file extension + * @returns MIME content type + * @throws Error if extension is not supported + */ +export const getContentTypeFromExtension = (extension: string): string => { + const contentType = IMAGE_CONTENT_TYPES[extension] || DOCUMENT_CONTENT_TYPES[extension]; + + if (!contentType) { + throw new Error(`Unsupported file extension: ${extension}. This indicates a validation error.`); + } + + return contentType; +}; diff --git a/source/lambda/files-management/validators/file-validator.ts b/source/lambda/files-management/validators/file-validator.ts new file mode 100644 index 00000000..65729452 --- /dev/null +++ b/source/lambda/files-management/validators/file-validator.ts @@ -0,0 +1,99 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { logger, tracer, metrics } from '../power-tools-init'; +import { MULTIMODAL_ENABLED_ENV_VAR, USE_CASES_TABLE_NAME_ENV_VAR, CloudWatchMetrics } from '../utils/constants'; +import { MultimodalCache } from '../utils/multimodal-cache'; +import { MetricUnit } from '@aws-lambda-powertools/metrics'; +import { DdbConfigService } from '../services/ddb-config-service'; + +/** + * Service for validating file management capabilities including multimodal support + */ +export class FileValidator { + private ddbConfigService: DdbConfigService; + private useCasesTable: string; + + constructor() { + this.ddbConfigService = new DdbConfigService(); + this.useCasesTable = process.env[USE_CASES_TABLE_NAME_ENV_VAR]!; + } + + /** + * Checks cached multimodal validation status and handles cache-based validation + * @param useCaseId - The use case ID to check in cache + * @returns Promise - Returns true if validation should continue, false if already validated from cache + * @throws Error if multimodal is disabled (cached) + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateCachedMultimodalResult' }) + private async validateCachedMultimodalResult(useCaseId: string): Promise { + const cachedMultimodalEnabled = MultimodalCache.get(useCaseId); // returns boolean based on whether multimodality was fetched before (true/false) or not (undefined) + + if (cachedMultimodalEnabled !== undefined) { + if (!cachedMultimodalEnabled) { + const errorMsg = `Multimodal functionality is not enabled for use case: ${useCaseId}`; + logger.error(errorMsg); + metrics.addMetric(CloudWatchMetrics.MULTIMODAL_DISABLED_ERROR, MetricUnit.Count, 1); + throw new Error(errorMsg); + } + logger.debug(`Multimodal capability validated from cache for useCaseId: ${useCaseId}`); + return false; // Cache hit - no need to continue validation + } + + // Clean up expired cache entries periodically + MultimodalCache.cleanupExpiredEntries(); + return true; // Cache miss - continue with validation + } + + /** + * Validates that multimodal functionality is enabled for the given use case + * @param useCaseId - The use case ID for validation and error messages + * @throws Error if multimodal is not enabled + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateMultimodalCapability' }) + public async validateMultimodalCapability(useCaseId: string): Promise { + // Check cache first + const shouldContinueValidation = await this.validateCachedMultimodalResult(useCaseId); + if (!shouldContinueValidation) { + return; // multimodality is enabled + } + + let multimodalEnabled = false; + + // Check if MULTIMODAL_ENABLED_ENV_VAR is set + const multimodalEnabledEnv = process.env[MULTIMODAL_ENABLED_ENV_VAR]; + if (multimodalEnabledEnv) { + multimodalEnabled = multimodalEnabledEnv.toLowerCase() === 'true'; + logger.debug(`Multimodal capability determined from environment variable: ${multimodalEnabled}`); + } else { + // Query USE_CASES_TABLE and then USE_CASE_CONFIG_TABLE to check if multimodality is enabled on this use case + if (!this.useCasesTable) { + const errorMsg = `Neither ${MULTIMODAL_ENABLED_ENV_VAR} nor ${USE_CASES_TABLE_NAME_ENV_VAR} environment variables are available`; + logger.error(errorMsg); + throw new Error(errorMsg); + } + + try { + const useCaseRecordConfigKey = await this.ddbConfigService.fetchUseCaseConfigRecordKey(useCaseId); + multimodalEnabled = await this.ddbConfigService.fetchUseCaseMultimodalityConfig(useCaseRecordConfigKey); + } catch (error) { + logger.error( + `Failed to validate multimodal capability for useCaseId: ${useCaseId}, error: ${(error as Error).message}` + ); + throw new Error(`Multimodal validation failed: ${(error as Error).message}`); + } + } + + // Cache the result + MultimodalCache.set(useCaseId, multimodalEnabled); + + if (!multimodalEnabled) { + const errorMsg = `Multimodal functionality is not enabled for use case: ${useCaseId}`; + logger.error(errorMsg); + metrics.addMetric(CloudWatchMetrics.MULTIMODAL_DISABLED_ERROR, MetricUnit.Count, 1); + throw new Error(errorMsg); + } + + logger.debug(`Multimodal capability validated for useCaseId: ${useCaseId}`); + } +} diff --git a/source/lambda/files-management/validators/request-validators.ts b/source/lambda/files-management/validators/request-validators.ts new file mode 100644 index 00000000..a463c081 --- /dev/null +++ b/source/lambda/files-management/validators/request-validators.ts @@ -0,0 +1,32 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { FileUploadRequest, FileDeleteRequest } from '../models/types'; +import RequestValidationError from '../utils/error'; +import { FILE_OPERATION_CONSTRAINTS } from '../utils/constants'; + +/** + * Validates file upload request parameters + * @param request - The file upload request to validate + * @throws RequestValidationError if validation fails + */ +export const validateFileUploadRequest = (request: FileUploadRequest): void => { + if (request.fileNames.length > FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_UPLOAD_REQUEST) { + throw new RequestValidationError( + `Too many files. Maximum ${FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_UPLOAD_REQUEST} files allowed per request` + ); + } +}; + +/** + * Validates file delete request parameters + * @param request - The file delete request to validate + * @throws RequestValidationError if validation fails + */ +export const validateFileDeleteRequest = (request: FileDeleteRequest): void => { + if (request.fileNames.length > FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_DELETE_REQUEST) { + throw new RequestValidationError( + `Too many files to delete. Maximum ${FILE_OPERATION_CONSTRAINTS.MAX_FILES_PER_DELETE_REQUEST} files allowed per request` + ); + } +}; diff --git a/source/lambda/files-metadata-management/index.ts b/source/lambda/files-metadata-management/index.ts new file mode 100644 index 00000000..e40fc820 --- /dev/null +++ b/source/lambda/files-metadata-management/index.ts @@ -0,0 +1,122 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { EventBridgeEvent } from 'aws-lambda'; +import { injectLambdaContext } from '@aws-lambda-powertools/logger/middleware'; +import { logMetrics } from '@aws-lambda-powertools/metrics/middleware'; +import { captureLambdaHandler } from '@aws-lambda-powertools/tracer/middleware'; +import middy from '@middy/core'; +import { logger, metrics, tracer } from './power-tools-init'; +import { EventBridgeProcessor } from './utils/eventbridge-processor'; +import { MetadataValidator } from './utils/metadata-validator'; +import { FileValidator } from './utils/file-validator'; +import { checkEnv, handleLambdaError } from './utils/utils'; +import { + MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR, + MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR, + EVENT_VALIDATION, + ERROR_MESSAGES +} from './utils/constants'; +import { MetadataUpdateResult, ValidationResult } from './models/types'; + +/** + * Main Lambda Handler to update metadata in DynamoDB Table + */ +export const updateFilesMetadataHandler = async (event: EventBridgeEvent): Promise => { + tracer.getSegment(); + try { + checkEnv(); + + const metadataTable = process.env[MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR]!; + const multimodalFilesBucket = process.env[MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR]!; + + if ( + event.source !== EVENT_VALIDATION.EXPECTED_SOURCE || + event['detail-type'] !== EVENT_VALIDATION.EXPECTED_DETAIL_TYPE + ) { + const errorMessage = `${ERROR_MESSAGES.INVALID_EVENT_TYPE}: ${event.source}/${event['detail-type']}`; + logger.error(`${errorMessage} - eventSource: ${event.source}, eventDetailType: ${event['detail-type']}`); + throw new Error(errorMessage); + } + + logger.info( + `Processing EventBridge S3 event - eventSource: ${event.source}, eventDetailType: ${event['detail-type']}` + ); + + if (!event.detail) { + const errorMessage = 'Missing event detail in EventBridge event'; + logger.error(`${errorMessage} - eventSource: ${event.source}, eventDetailType: ${event['detail-type']}`); + throw new Error(errorMessage); + } + + const bucketName = event.detail?.bucket?.name; + const objectKey = event.detail?.object?.key; + + if (!bucketName || !objectKey) { + const errorMessage = 'Missing required S3 object information in EventBridge event'; + logger.error( + `${errorMessage} - eventSource: ${event.source}, eventDetailType: ${event['detail-type']}, bucketName: ${bucketName}, objectKey: ${objectKey}` + ); + throw new Error(errorMessage); + } + + // Metadata Validation + const metadataValidator = new MetadataValidator(); + const metadataValidationResult = await metadataValidator.validateMetadata(bucketName, objectKey); + + if (!metadataValidationResult.isValid) { + logger.warn( + `Metadata validation failed - proceeding with invalid status - component: index.ts, bucketName: ${bucketName}, objectKey: ${objectKey}, validationError: ${metadataValidationResult.error}, action: marking-file-invalid` + ); + } + + // File Type Validation using Magic number detection + const fileValidator = new FileValidator(); + const fileValidationResult = await fileValidator.validateFile(bucketName, objectKey); + + if (!fileValidationResult.isValid) { + logger.warn( + `File validation failed - proceeding with invalid status - component: index.ts, bucketName: ${bucketName}, objectKey: ${objectKey}, validationError: ${fileValidationResult.validationErrors}, action: marking-file-invalid` + ); + } + + const validationResult: ValidationResult = { + isValid: metadataValidationResult.isValid && fileValidationResult.isValid, + error: metadataValidationResult.error || fileValidationResult.validationErrors, + originalFileName: metadataValidationResult.originalFileName + }; + + const processor = new EventBridgeProcessor(metadataTable, multimodalFilesBucket); + const result: MetadataUpdateResult = await processor.processEvent(event, validationResult); + + if (result.success) { + const statusMessage = validationResult.isValid + ? 'Successfully processed event for file' + : 'Successfully processed event for file (marked as invalid due to validation failure)'; + + logger.info( + `${statusMessage} - fileKey: ${result.fileKey}, fileName: ${result.fileName}, validationPassed: ${validationResult.isValid}` + ); + } else { + logger.warn( + `Failed to process event for file - fileKey: ${result.fileKey}, fileName: ${result.fileName}, error: ${result.error}, validationPassed: ${validationResult.isValid}` + ); + throw new Error(`Failed to process event: ${result.error}`); + } + } catch (error) { + handleLambdaError(error, 'updateFilesMetadata', 'Files Metadata Management'); + + throw error; + } finally { + metrics.publishStoredMetrics(); + } +}; + +/** + * Middy-wrapped handler with powertools middleware + */ +export const handler = middy(updateFilesMetadataHandler).use([ + captureLambdaHandler(tracer), + injectLambdaContext(logger), + logMetrics(metrics) +]); diff --git a/source/lambda/files-metadata-management/jest.config.js b/source/lambda/files-metadata-management/jest.config.js new file mode 100644 index 00000000..f3a0b111 --- /dev/null +++ b/source/lambda/files-metadata-management/jest.config.js @@ -0,0 +1,22 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +module.exports = { + modulePaths: [ + '/../layers/', + '/../layers/aws-sdk-lib/node_modules/', + '/../layers/aws-node-user-agent-config/', + '/../layers/aws-node-user-agent-config/node_modules/' + ], + testMatch: ['**/*.test.ts'], + modulePathIgnorePatterns: ['/dist/'], + collectCoverage: true, + collectCoverageFrom: ['**/*.ts', '!**/test/*.ts', '!dist/'], + coverageReporters: ['text', ['lcov', { projectRoot: '../../../' }]], + preset: 'ts-jest', + testEnvironment: 'node', + maxWorkers: 2, + testTimeout: 30000, + forceExit: true, + detectOpenHandles: true +}; diff --git a/source/lambda/files-metadata-management/models/types.ts b/source/lambda/files-metadata-management/models/types.ts new file mode 100644 index 00000000..217adca7 --- /dev/null +++ b/source/lambda/files-metadata-management/models/types.ts @@ -0,0 +1,54 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export interface RetrySettings { + maxRetries: number; + backOffRate: number; + initialDelayMs: number; +} + +export enum FileStatus { + PENDING = 'pending', + UPLOADED = 'uploaded', + DELETED = 'deleted', + INVALID = 'invalid' +} + +export interface FileMetadata { + fileKey: string; // "useCaseId/user-uuid/conversation-uuid/message-uuid" + fileName: string; // Original filename + fileUuid: string; + fileExtension: string; + fileContentType: string; + createdAt: number; + status: FileStatus; + fileSize?: number; + uploadTimestamp?: number; + ttl: number; // TTL for automatic cleanup +} + +export interface FileKeyComponents { + useCaseId: string; + userId: string; + conversationId: string; + messageId: string; + fileName: string; + fileKey: string; // "useCaseId/userId/conversationId/messageId" +} + +export interface MetadataUpdateResult { + success: boolean; + fileKey: string; + fileName: string; + error?: string; +} +export interface ValidationResult { + isValid: boolean; + error?: string; + originalFileName: string; +} + +export interface FileValidationResult { + isValid: boolean; + validationErrors: string; +} diff --git a/source/lambda/files-metadata-management/package-lock.json b/source/lambda/files-metadata-management/package-lock.json new file mode 100644 index 00000000..be8b1513 --- /dev/null +++ b/source/lambda/files-metadata-management/package-lock.json @@ -0,0 +1,5960 @@ +{ + "name": "@amzn/files-metadata", + "version": "4.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "@amzn/files-metadata", + "version": "4.0.0", + "license": "Apache-2.0", + "dependencies": { + "file-type": "^16.5.4" + }, + "devDependencies": { + "@types/jest": "^29.5.14", + "@types/lodash": "^4.17.13", + "@types/node": "^22.10.1", + "@typescript-eslint/eslint-plugin": "^8.18.0", + "@typescript-eslint/parser": "^8.18.0", + "aws-sdk-client-mock": "^4.1.0", + "aws-sdk-client-mock-jest": "^4.1.0", + "eslint": "^9.16.0", + "jest": "^29.7.0", + "lodash": "^4.17.21", + "prettier": "^3.4.2", + "ts-jest": "^29.2.5", + "ts-node": "^10.9.2", + "typescript": "^5.7.2" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.4.tgz", + "integrity": "sha512-YsmSKC29MJwf0gF8Rjjrg5LQCmyh+j/nD8/eP7f+BeoQTKYqs9RoWbjGOdy0+1Ekr68RJZMUOPVQaQisnIo4Rw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.4.tgz", + "integrity": "sha512-2BCOP7TN8M+gVDj7/ht3hsaO/B/n5oDbiAyyvnRlNOs+u1o+JWNYTQrmpuNp1/Wq2gcFrI01JAW+paEKDMx/CA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.3", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.28.3", + "@babel/helpers": "^7.28.4", + "@babel/parser": "^7.28.4", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.28.4", + "@babel/types": "^7.28.4", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.3.tgz", + "integrity": "sha512-3lSpxGgvnmZznmBkCRnVREPUFJv2wrv9iAoFDvADJc0ypmdOxdUtcLeBgBJ6zE0PMeTKnxeQzyk0xTBq4Ep7zw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.3", + "@babel/types": "^7.28.2", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz", + "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz", + "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.28.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz", + "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz", + "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz", + "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.4" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.4.tgz", + "integrity": "sha512-yZbBqeM6TkpP9du/I2pUZnJsRMGGvOuIrhjzC1AwHwW+6he4mni6Bp/m8ijn0iOuZuPI2BfkCoSRunpyjnrQKg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.4" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz", + "integrity": "sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz", + "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz", + "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.4.tgz", + "integrity": "sha512-YEzuboP2qvQavAcjgQNVgsvHIDv6ZpwXvcvjmyySP2DIMuByS/6ioU5G9pYrWHM6T2YDfc7xga9iNzYOs12CFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.3", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.4", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.4", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.4.tgz", + "integrity": "sha512-bkFqkLhh3pMBUQQkpVgWDWq/lqzc2678eUyDlTBhRqhCHFguYYGM0Efga7tYk4TogG/3x0EEl66/OQ+WGbWB/Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz", + "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz", + "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.0", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.0.tgz", + "integrity": "sha512-ENIdc4iLu0d93HeYirvKmrzshzofPw6VkZRKQGe9Nv46ZnWUzcF1xV01dcvEg/1wXUR61OmmlSfyeyO7EvjLxQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/object-schema": "^2.1.6", + "debug": "^4.3.1", + "minimatch": "^3.1.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-array/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.0.tgz", + "integrity": "sha512-WUFvV4WoIwW8Bv0KeKCIIEgdSiFOsulyN0xrMu+7z43q/hkOLXjvb5u7UC9jDxvRzcrbEmuZBX5yJZz1741jog==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.16.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.16.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.16.0.tgz", + "integrity": "sha512-nmC8/totwobIiFcGkDza3GIKfAw1+hLiYVrh3I1nIomQ8PEr5cxg34jnkmGawul/ep52wGRAcyeDCNtWKSOj4Q==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz", + "integrity": "sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/js": { + "version": "9.37.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.37.0.tgz", + "integrity": "sha512-jaS+NJ+hximswBG6pjNX0uEJZkrT0zwpVi3BA3vX22aFGjJjmgSTSmPpZCRKmoBL5VY/M6p0xsSJx7rk7sy5gg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.6.tgz", + "integrity": "sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.0.tgz", + "integrity": "sha512-sB5uyeq+dwCWyPi31B2gQlVlo+j5brPlWx4yZBrEaRo/nhdDE8Xke1gsGgtiBdaBTxuTkceLVuVt/pclrasb0A==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.16.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/console": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", + "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/core": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", + "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/reporters": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^29.7.0", + "jest-config": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-resolve-dependencies": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "jest-watcher": "^29.7.0", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/environment": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", + "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "expect": "^29.7.0", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-get-type": "^29.6.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/fake-timers": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", + "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@sinonjs/fake-timers": "^10.0.2", + "@types/node": "*", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/globals": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", + "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/types": "^29.6.3", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/reporters": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", + "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^6.0.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "slash": "^3.0.0", + "string-length": "^4.0.1", + "strip-ansi": "^6.0.0", + "v8-to-istanbul": "^9.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/source-map": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", + "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-result": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", + "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-sequencer": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", + "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/test-result": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", + "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@sinonjs/commons": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz", + "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "type-detect": "4.0.8" + } + }, + "node_modules/@sinonjs/fake-timers": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", + "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@sinonjs/commons": "^3.0.0" + } + }, + "node_modules/@sinonjs/samsam": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/@sinonjs/samsam/-/samsam-8.0.3.tgz", + "integrity": "sha512-hw6HbX+GyVZzmaYNh82Ecj1vdGZrqVIn/keDTg63IgAwiQPO+xCz99uG6Woqgb4tM0mUiFENKZ4cqd7IX94AXQ==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@sinonjs/commons": "^3.0.1", + "type-detect": "^4.1.0" + } + }, + "node_modules/@sinonjs/samsam/node_modules/type-detect": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.1.0.tgz", + "integrity": "sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/@sinonjs/text-encoding": { + "version": "0.7.3", + "resolved": "https://registry.npmjs.org/@sinonjs/text-encoding/-/text-encoding-0.7.3.tgz", + "integrity": "sha512-DE427ROAphMQzU4ENbliGYrBSYPXF+TtLg9S8vzeA+OF4ZKzoDdzfL8sxuMUGS/lgRhM6j1URSk9ghf7Xo1tyA==", + "dev": true, + "license": "(Unlicense OR Apache-2.0)" + }, + "node_modules/@tokenizer/token": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@tokenizer/token/-/token-0.3.0.tgz", + "integrity": "sha512-OvjF+z51L3ov0OyAU0duzsYuvO01PH7x4t6DJx+guahgTnBHkhJdG7soQeTSFLWN3efnHyibZ4Z8l2EuWwJN3A==", + "license": "MIT" + }, + "node_modules/@tsconfig/node10": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz", + "integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/chai": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.2.tgz", + "integrity": "sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.9", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", + "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/jest": { + "version": "29.5.14", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz", + "integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "expect": "^29.0.0", + "pretty-format": "^29.0.0" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/lodash": { + "version": "4.17.20", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.20.tgz", + "integrity": "sha512-H3MHACvFUEiujabxhaI/ImO6gUrd8oOurg7LQtS7mbwIXA/cUqWrvBsaeJ23aZEPk1TAYkurjfMbSELfoCXlGA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "22.18.8", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.18.8.tgz", + "integrity": "sha512-pAZSHMiagDR7cARo/cch1f3rXy0AEXwsVsVH09FcyeJVAzCnGgmYis7P3JidtTUjyadhTeSo8TgRPswstghDaw==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/sinon": { + "version": "17.0.4", + "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-17.0.4.tgz", + "integrity": "sha512-RHnIrhfPO3+tJT0s7cFaXGZvsL4bbR3/k7z3P312qMS4JaS2Tk+KiwiLx1S0rQ56ERj00u1/BtdyVd0FY+Pdew==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/sinonjs__fake-timers": "*" + } + }, + "node_modules/@types/sinonjs__fake-timers": { + "version": "8.1.5", + "resolved": "https://registry.npmjs.org/@types/sinonjs__fake-timers/-/sinonjs__fake-timers-8.1.5.tgz", + "integrity": "sha512-mQkU2jY8jJEF7YHjHvsQO8+3ughTL1mcnn96igfhONmR+fUPSKIkefQYpSe8bsly2Ep7oQbn/6VG5/9/0qcArQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/stack-utils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", + "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/yargs": { + "version": "17.0.33", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.33.tgz", + "integrity": "sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.46.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.46.0.tgz", + "integrity": "sha512-hA8gxBq4ukonVXPy0OKhiaUh/68D0E88GSmtC1iAEnGaieuDi38LhS7jdCHRLi6ErJBNDGCzvh5EnzdPwUc0DA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "8.46.0", + "@typescript-eslint/type-utils": "8.46.0", + "@typescript-eslint/utils": "8.46.0", + "@typescript-eslint/visitor-keys": "8.46.0", + "graphemer": "^1.4.0", + "ignore": "^7.0.0", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.46.0", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.46.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.46.0.tgz", + "integrity": "sha512-n1H6IcDhmmUEG7TNVSspGmiHHutt7iVKtZwRppD7e04wha5MrkV1h3pti9xQLcCMt6YWsncpoT0HMjkH1FNwWQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/scope-manager": "8.46.0", + "@typescript-eslint/types": "8.46.0", + "@typescript-eslint/typescript-estree": "8.46.0", + "@typescript-eslint/visitor-keys": "8.46.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.46.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.46.0.tgz", + "integrity": "sha512-OEhec0mH+U5Je2NZOeK1AbVCdm0ChyapAyTeXVIYTPXDJ3F07+cu87PPXcGoYqZ7M9YJVvFnfpGg1UmCIqM+QQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.46.0", + "@typescript-eslint/types": "^8.46.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.46.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.46.0.tgz", + "integrity": "sha512-lWETPa9XGcBes4jqAMYD9fW0j4n6hrPtTJwWDmtqgFO/4HF4jmdH/Q6wggTw5qIT5TXjKzbt7GsZUBnWoO3dqw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.46.0", + "@typescript-eslint/visitor-keys": "8.46.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.46.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.46.0.tgz", + "integrity": "sha512-WrYXKGAHY836/N7zoK/kzi6p8tXFhasHh8ocFL9VZSAkvH956gfeRfcnhs3xzRy8qQ/dq3q44v1jvQieMFg2cw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.46.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.46.0.tgz", + "integrity": "sha512-hy+lvYV1lZpVs2jRaEYvgCblZxUoJiPyCemwbQZ+NGulWkQRy0HRPYAoef/CNSzaLt+MLvMptZsHXHlkEilaeg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.46.0", + "@typescript-eslint/typescript-estree": "8.46.0", + "@typescript-eslint/utils": "8.46.0", + "debug": "^4.3.4", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "8.46.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.46.0.tgz", + "integrity": "sha512-bHGGJyVjSE4dJJIO5yyEWt/cHyNwga/zXGJbJJ8TiO01aVREK6gCTu3L+5wrkb1FbDkQ+TKjMNe9R/QQQP9+rA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.46.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.46.0.tgz", + "integrity": "sha512-ekDCUfVpAKWJbRfm8T1YRrCot1KFxZn21oV76v5Fj4tr7ELyk84OS+ouvYdcDAwZL89WpEkEj2DKQ+qg//+ucg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/project-service": "8.46.0", + "@typescript-eslint/tsconfig-utils": "8.46.0", + "@typescript-eslint/types": "8.46.0", + "@typescript-eslint/visitor-keys": "8.46.0", + "debug": "^4.3.4", + "fast-glob": "^3.3.2", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.46.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.46.0.tgz", + "integrity": "sha512-nD6yGWPj1xiOm4Gk0k6hLSZz2XkNXhuYmyIrOWcHoPuAhjT9i5bAG+xbWPgFeNR8HPHHtpNKdYUXJl/D3x7f5g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.7.0", + "@typescript-eslint/scope-manager": "8.46.0", + "@typescript-eslint/types": "8.46.0", + "@typescript-eslint/typescript-estree": "8.46.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.46.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.46.0.tgz", + "integrity": "sha512-FrvMpAK+hTbFy7vH5j1+tMYHMSKLE6RzluFJlkFNKD0p9YsUT75JlBSmr5so3QRzvMwU5/bIEdeNrxm8du8l3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.46.0", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@vitest/expect": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.4.tgz", + "integrity": "sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/pretty-format": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.2.4.tgz", + "integrity": "sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.2.4.tgz", + "integrity": "sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyspy": "^4.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.2.4.tgz", + "integrity": "sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "loupe": "^3.1.4", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/abort-controller": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", + "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + "license": "MIT", + "dependencies": { + "event-target-shim": "^5.0.0" + }, + "engines": { + "node": ">=6.5" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", + "dev": true, + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/aws-sdk-client-mock": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/aws-sdk-client-mock/-/aws-sdk-client-mock-4.1.0.tgz", + "integrity": "sha512-h/tOYTkXEsAcV3//6C1/7U4ifSpKyJvb6auveAepqqNJl6TdZaPFEtKjBQNf8UxQdDP850knB2i/whq4zlsxJw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/sinon": "^17.0.3", + "sinon": "^18.0.1", + "tslib": "^2.1.0" + } + }, + "node_modules/aws-sdk-client-mock-jest": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/aws-sdk-client-mock-jest/-/aws-sdk-client-mock-jest-4.1.0.tgz", + "integrity": "sha512-+g4a5Hp+MmPqqNnvwfLitByggrqf+xSbk1pm6fBYHNcon6+aQjL5iB+3YB6HuGPemY+/mUKN34iP62S14R61bA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/expect": ">1.6.0", + "expect": ">28.1.3", + "tslib": "^2.1.0" + }, + "peerDependencies": { + "aws-sdk-client-mock": "4.1.0", + "vitest": ">1.6.0" + }, + "peerDependenciesMeta": { + "vitest": { + "optional": true + } + } + }, + "node_modules/babel-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", + "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/transform": "^29.7.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.6.3", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", + "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.2.0.tgz", + "integrity": "sha512-E/VlAEzRrsLEb2+dv8yp3bo4scof3l9nR4lrld+Iy5NyVqgVYUJnDAmunkhPMisRI32Qc4iRiz425d8vM++2fg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5" + }, + "peerDependencies": { + "@babel/core": "^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/babel-preset-jest": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", + "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", + "dev": true, + "license": "MIT", + "dependencies": { + "babel-plugin-jest-hoist": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.8.13", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.8.13.tgz", + "integrity": "sha512-7s16KR8io8nIBWQyCYhmFhd+ebIzb9VKTzki+wOJXHTxTnV6+mFGH3+Jwn1zoKaY9/H9T/0BcKCZnzXljPnpSQ==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.26.3", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.26.3.tgz", + "integrity": "sha512-lAUU+02RFBuCKQPj/P6NgjlbCnLBMp4UtgTx7vNHd3XSIJF87s9a5rA3aH2yw3GS9DqZAUbOtZdCCiZeVRqt0w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.8.9", + "caniuse-lite": "^1.0.30001746", + "electron-to-chromium": "^1.5.227", + "node-releases": "^2.0.21", + "update-browserslist-db": "^1.1.3" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bs-logger": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz", + "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-json-stable-stringify": "2.x" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001748", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001748.tgz", + "integrity": "sha512-5P5UgAr0+aBmNiplks08JLw+AW/XG/SurlgZLgB1dDLfAw7EfRGxIwzPHxdSCGY/BTKDqIVyJL87cCN6s0ZR0w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chai": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", + "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/check-error": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", + "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 16" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", + "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/collect-v8-coverage": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz", + "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/create-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", + "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "prompts": "^2.0.1" + }, + "bin": { + "create-jest": "bin/create-jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/dedent": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.7.0.tgz", + "integrity": "sha512-HGFtf8yhuhGhqO07SV79tRp+br4MnbdjeVxotpn1QBl30pcLLCQjX5b2295ll0fv8RKDKsmWYrl05usHM9CewQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/detect-newline": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/diff": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.2.0.tgz", + "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.233", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.233.tgz", + "integrity": "sha512-iUdTQSf7EFXsDdQsp8MwJz5SVk4APEFqXU/S47OtQ0YLqacSwPXdZ5vRlMX3neb07Cy2vgioNuRnWUXFwuslkg==", + "dev": true, + "license": "ISC" + }, + "node_modules/emittery": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", + "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "9.37.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.37.0.tgz", + "integrity": "sha512-XyLmROnACWqSxiGYArdef1fItQd47weqB7iwtfr9JHwRrqIXZdcFMvvEcL9xHCmL0SNsOvF0c42lWyM1U5dgig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.0", + "@eslint/config-helpers": "^0.4.0", + "@eslint/core": "^0.16.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.37.0", + "@eslint/plugin-kit": "^0.4.0", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "@types/json-schema": "^7.0.15", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/espree/node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esquery": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", + "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/event-target-shim": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", + "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "license": "MIT", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fastq": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", + "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/file-type": { + "version": "16.5.4", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-16.5.4.tgz", + "integrity": "sha512-/yFHK0aGjFEgDJjEKP0pWCplsPFPhwyfwevf/pVxiN0tmE4L9LmwWxWukdJSHdoCli4VgQLehjJtwQBnqmsKcw==", + "license": "MIT", + "dependencies": { + "readable-web-to-node-stream": "^3.0.0", + "strtok3": "^6.2.4", + "token-types": "^4.1.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/file-type?sponsor=1" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true, + "license": "MIT" + }, + "node_modules/handlebars": { + "version": "4.7.8", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz", + "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.5", + "neo-async": "^2.6.2", + "source-map": "^0.6.1", + "wordwrap": "^1.0.0" + }, + "bin": { + "handlebars": "bin/handlebars" + }, + "engines": { + "node": ">=0.4.7" + }, + "optionalDependencies": { + "uglify-js": "^3.1.4" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true, + "license": "MIT" + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-local": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.2.0.tgz", + "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz", + "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", + "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", + "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/types": "^29.6.3", + "import-local": "^3.0.2", + "jest-cli": "^29.7.0" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-changed-files": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", + "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", + "dev": true, + "license": "MIT", + "dependencies": { + "execa": "^5.0.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", + "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^1.0.0", + "is-generator-fn": "^2.0.0", + "jest-each": "^29.7.0", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0", + "pretty-format": "^29.7.0", + "pure-rand": "^6.0.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-cli": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", + "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "create-jest": "^29.7.0", + "exit": "^0.1.2", + "import-local": "^3.0.2", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "yargs": "^17.3.1" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-config": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", + "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/test-sequencer": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-jest": "^29.7.0", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-circus": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@types/node": "*", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-docblock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", + "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", + "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "jest-util": "^29.7.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", + "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", + "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", + "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-leak-detector": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", + "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-pnp-resolver": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "jest-resolve": "*" + }, + "peerDependenciesMeta": { + "jest-resolve": { + "optional": true + } + } + }, + "node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", + "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "resolve": "^1.20.0", + "resolve.exports": "^2.0.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve-dependencies": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", + "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-regex-util": "^29.6.3", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", + "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/environment": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-leak-detector": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-util": "^29.7.0", + "jest-watcher": "^29.7.0", + "jest-worker": "^29.7.0", + "p-limit": "^3.1.0", + "source-map-support": "0.5.13" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", + "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/globals": "^29.7.0", + "@jest/source-map": "^29.6.3", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", + "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-jsx": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/types": "^7.3.3", + "@jest/expect-utils": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "natural-compare": "^1.4.0", + "pretty-format": "^29.7.0", + "semver": "^7.5.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", + "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "leven": "^3.1.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate/node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-watcher": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", + "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "jest-util": "^29.7.0", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/just-extend": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/just-extend/-/just-extend-6.2.0.tgz", + "integrity": "sha512-cYofQu2Xpom82S6qD778jBDpwvvy39s1l/hrYij2u9AMdQcGRpaBu6kY4mVhuno5kJVi1DAz4aiphA2WI1/OAw==", + "dev": true, + "license": "MIT" + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/loupe": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", + "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true, + "license": "ISC" + }, + "node_modules/makeerror": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "dev": true, + "license": "MIT" + }, + "node_modules/nise": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/nise/-/nise-6.1.1.tgz", + "integrity": "sha512-aMSAzLVY7LyeM60gvBS423nBmIPP+Wy7St7hsb+8/fc1HmeoHJfLO8CKse4u3BtOZvQLJghYPI2i/1WZrEj5/g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@sinonjs/commons": "^3.0.1", + "@sinonjs/fake-timers": "^13.0.1", + "@sinonjs/text-encoding": "^0.7.3", + "just-extend": "^6.2.0", + "path-to-regexp": "^8.1.0" + } + }, + "node_modules/nise/node_modules/@sinonjs/fake-timers": { + "version": "13.0.5", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-13.0.5.tgz", + "integrity": "sha512-36/hTbH2uaWuGVERyC6da9YwGWnzUZXuPro/F2LfsdOsLnCojz/iSH8MxUt/FD2S5XBSVPhmArFUXcpCQ2Hkiw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@sinonjs/commons": "^3.0.1" + } + }, + "node_modules/node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-releases": { + "version": "2.0.23", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.23.tgz", + "integrity": "sha512-cCmFDMSm26S6tQSDpBCg/NR8NENrVPhAJSf+XbxBG4rPFaaonlEoE9wHQmun+cls499TQGSb7ZyPBRlzgKfpeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-to-regexp": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.3.0.tgz", + "integrity": "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/pathval": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", + "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16" + } + }, + "node_modules/peek-readable": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/peek-readable/-/peek-readable-4.1.0.tgz", + "integrity": "sha512-ZI3LnwUv5nOGbQzD9c2iDG6toheuXSZP5esSHBjopsXH4dg19soufvpUGA3uohi5anFtGb2lhAVdHzH6R/Evvg==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-dir/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prettier": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", + "dev": true, + "license": "MIT", + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", + "license": "MIT", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/pure-rand": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz", + "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ], + "license": "MIT" + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/readable-stream": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.7.0.tgz", + "integrity": "sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==", + "license": "MIT", + "dependencies": { + "abort-controller": "^3.0.0", + "buffer": "^6.0.3", + "events": "^3.3.0", + "process": "^0.11.10", + "string_decoder": "^1.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/readable-web-to-node-stream": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/readable-web-to-node-stream/-/readable-web-to-node-stream-3.0.4.tgz", + "integrity": "sha512-9nX56alTf5bwXQ3ZDipHJhusu9NTQJ/CVPtb/XHAJCXihZeitfJvIRS4GqQ/mfIoOE3IelHMrpayVrosdHBuLw==", + "license": "MIT", + "dependencies": { + "readable-stream": "^4.7.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.10", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", + "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-cwd/node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve.exports": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz", + "integrity": "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/sinon": { + "version": "18.0.1", + "resolved": "https://registry.npmjs.org/sinon/-/sinon-18.0.1.tgz", + "integrity": "sha512-a2N2TDY1uGviajJ6r4D1CyRAkzE9NNVlYOV1wX5xQDuAk0ONgzgRl0EjCQuRCPxOwp13ghsMwt9Gdldujs39qw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@sinonjs/commons": "^3.0.1", + "@sinonjs/fake-timers": "11.2.2", + "@sinonjs/samsam": "^8.0.0", + "diff": "^5.2.0", + "nise": "^6.0.0", + "supports-color": "^7" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/sinon" + } + }, + "node_modules/sinon/node_modules/@sinonjs/fake-timers": { + "version": "11.2.2", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-11.2.2.tgz", + "integrity": "sha512-G2piCSxQ7oWOxwGSAyFHfPIsyeJGXYtc6mFbnFA+kRXkiEnTl8c/8jul2S329iFBnDI9HGoeWWAZvuvOkZccgw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@sinonjs/commons": "^3.0.0" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true, + "license": "MIT" + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.13", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", + "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/stack-utils/node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strtok3": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/strtok3/-/strtok3-6.3.0.tgz", + "integrity": "sha512-fZtbhtvI9I48xDSywd/somNqgUHl2L2cstmXCCif0itOf96jeW18MBSyrLuNicYQVkvpOxkZtkzujiTJ9LW5Jw==", + "license": "MIT", + "dependencies": { + "@tokenizer/token": "^0.3.0", + "peek-readable": "^4.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "license": "ISC", + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/test-exclude/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/test-exclude/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/tinyrainbow": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz", + "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-4.0.4.tgz", + "integrity": "sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tmpl": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/token-types": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/token-types/-/token-types-4.2.1.tgz", + "integrity": "sha512-6udB24Q737UD/SDsKAHI9FCRP7Bqc9D/MQUV02ORQg5iskjtLJlZJNdN4kKtcdtwCeWIwIHDGaUsTsCCAa8sFQ==", + "license": "MIT", + "dependencies": { + "@tokenizer/token": "^0.3.0", + "ieee754": "^1.2.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/ts-api-utils": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz", + "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, + "node_modules/ts-jest": { + "version": "29.4.4", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.4.4.tgz", + "integrity": "sha512-ccVcRABct5ZELCT5U0+DZwkXMCcOCLi2doHRrKy1nK/s7J7bch6TzJMsrY09WxgUUIP/ITfmcDS8D2yl63rnXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "bs-logger": "^0.2.6", + "fast-json-stable-stringify": "^2.1.0", + "handlebars": "^4.7.8", + "json5": "^2.2.3", + "lodash.memoize": "^4.1.2", + "make-error": "^1.3.6", + "semver": "^7.7.2", + "type-fest": "^4.41.0", + "yargs-parser": "^21.1.1" + }, + "bin": { + "ts-jest": "cli.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0" + }, + "peerDependencies": { + "@babel/core": ">=7.0.0-beta.0 <8", + "@jest/transform": "^29.0.0 || ^30.0.0", + "@jest/types": "^29.0.0 || ^30.0.0", + "babel-jest": "^29.0.0 || ^30.0.0", + "jest": "^29.0.0 || ^30.0.0", + "jest-util": "^29.0.0 || ^30.0.0", + "typescript": ">=4.3 <6" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "@jest/transform": { + "optional": true + }, + "@jest/types": { + "optional": true + }, + "babel-jest": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "jest-util": { + "optional": true + } + } + }, + "node_modules/ts-jest/node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ts-node": { + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, + "node_modules/ts-node/node_modules/diff": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", + "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "dev": true, + "license": "0BSD" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/uglify-js": { + "version": "3.19.3", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz", + "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==", + "dev": true, + "license": "BSD-2-Clause", + "optional": true, + "bin": { + "uglifyjs": "bin/uglifyjs" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/update-browserslist-db": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz", + "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", + "dev": true, + "license": "MIT" + }, + "node_modules/v8-to-istanbul": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", + "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", + "dev": true, + "license": "ISC", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^2.0.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/walker": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "makeerror": "1.0.12" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yn": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", + "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/source/lambda/files-metadata-management/package.json b/source/lambda/files-metadata-management/package.json new file mode 100644 index 00000000..1bf59047 --- /dev/null +++ b/source/lambda/files-metadata-management/package.json @@ -0,0 +1,43 @@ +{ + "name": "@amzn/files-metadata", + "version": "4.0.0", + "description": "This lambda supports APIs that provide export functionality for use cases", + "main": "index.ts", + "scripts": { + "test": "jest --coverage --silent --verbose", + "test-debug": "jest --coverage", + "test:no-cov": "jest --no-coverage --verbose", + "test:watch": "jest --watchAll --verbose", + "build": "npx tsc", + "clean": "rm -rf node_modules", + "clean-dev": "rm -rf node_modules && npm i --omit=dev", + "code-linter-js": "./node_modules/eslint/bin/eslint.js lambda --ext .js", + "code-linter-ts": "./node_modules/eslint/bin/eslint.js bin lib --ext .ts", + "code-linter": "npm run code-linter-ts && npm run code-linter-js", + "code-formatter": "./node_modules/prettier/bin-prettier.js --config .prettierrc.yml '**/*.ts' '**/*.js' --write" + }, + "author": { + "name": "Amazon Web Services", + "url": "https://aws.amazon.com/solutions" + }, + "license": "Apache-2.0", + "dependencies": { + "file-type": "^16.5.4" + }, + "devDependencies": { + "@types/jest": "^29.5.14", + "@types/lodash": "^4.17.13", + "@types/node": "^22.10.1", + "@typescript-eslint/eslint-plugin": "^8.18.0", + "@typescript-eslint/parser": "^8.18.0", + "aws-sdk-client-mock": "^4.1.0", + "aws-sdk-client-mock-jest": "^4.1.0", + "eslint": "^9.16.0", + "jest": "^29.7.0", + "lodash": "^4.17.21", + "prettier": "^3.4.2", + "ts-jest": "^29.2.5", + "ts-node": "^10.9.2", + "typescript": "^5.7.2" + } +} diff --git a/source/lambda/files-metadata-management/power-tools-init.ts b/source/lambda/files-metadata-management/power-tools-init.ts new file mode 100644 index 00000000..60126f01 --- /dev/null +++ b/source/lambda/files-metadata-management/power-tools-init.ts @@ -0,0 +1,16 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Logger } from '@aws-lambda-powertools/logger'; +import { Metrics } from '@aws-lambda-powertools/metrics'; +import { Tracer } from '@aws-lambda-powertools/tracer'; +import { CloudWatchNamespace } from './utils/constants'; + +const serviceName = { serviceName: 'FilesManagement' }; + +export const tracer = new Tracer(serviceName); +export const logger = new Logger(serviceName); +export const metrics = new Metrics({ + namespace: CloudWatchNamespace.FILE_HANDLING, + serviceName: serviceName.serviceName +}); diff --git a/source/lambda/files-metadata-management/test/index.test.ts b/source/lambda/files-metadata-management/test/index.test.ts new file mode 100644 index 00000000..2a852e8f --- /dev/null +++ b/source/lambda/files-metadata-management/test/index.test.ts @@ -0,0 +1,632 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { updateFilesMetadataHandler, handler } from '../index'; +import { MetadataValidator } from '../utils/metadata-validator'; +import { EventBridgeProcessor } from '../utils/eventbridge-processor'; +import { FileValidator } from '../utils/file-validator'; +import { logger } from '../power-tools-init'; +interface MockEvent { + source: string; + 'detail-type': string; + time: string; + detail: { + bucket: { + name: string; + }; + object: { + key: string; + size?: number; + }; + }; +} + +const MOCK_USE_CASE_ID = 'useCase1'; +const MOCK_USER_ID = '11111111-1111-1111-1111-111111111111'; +const MOCK_CONVERSATION_ID = '22222222-2222-2222-2222-222222222222'; +const MOCK_MESSAGE_ID = '33333333-3333-3333-3333-333333333333'; +const MOCK_FILE_NAME = 'test-file.jpg'; +const MOCK_OBJECT_KEY = `${MOCK_USE_CASE_ID}/${MOCK_USER_ID}/${MOCK_CONVERSATION_ID}/${MOCK_MESSAGE_ID}/${MOCK_FILE_NAME}`; + +const MIME_TYPES: Record = { + jpg: 'image/jpeg', + jpeg: 'image/jpeg', + png: 'image/png', + gif: 'image/gif', + pdf: 'application/pdf', + txt: 'text/plain' +}; + +function createMockEvent(overrides: Partial = {}): MockEvent { + const defaultEvent: MockEvent = { + source: 'aws.s3', + 'detail-type': 'Object Created', + time: '2023-01-01T00:00:00Z', + detail: { + bucket: { + name: 'test-bucket' + }, + object: { + key: MOCK_OBJECT_KEY, + size: 1024 + } + } + }; + + return { + ...defaultEvent, + ...overrides, + detail: { + bucket: { + ...defaultEvent.detail.bucket, + ...overrides.detail?.bucket + }, + object: { + ...defaultEvent.detail.object, + ...overrides.detail?.object + } + } + }; +} + +jest.mock('../power-tools-init', () => ({ + logger: { + info: jest.fn(), + error: jest.fn(), + warn: jest.fn() + }, + tracer: { + getRootXrayTraceId: jest.fn().mockReturnValue('test-trace-id'), + getSegment: jest.fn().mockReturnValue({}), + captureAWSv3Client: jest.fn(), + captureMethod: jest + .fn() + .mockImplementation( + () => (_target: any, _propertyKey: string, descriptor: PropertyDescriptor) => descriptor + ) + }, + metrics: { + publishStoredMetrics: jest.fn(), + addMetric: jest.fn() + } +})); + +// Mock the classes +jest.mock('../utils/eventbridge-processor'); +jest.mock('../utils/metadata-validator'); +jest.mock('../utils/file-validator'); + +const MockedEventBridgeProcessor = EventBridgeProcessor as jest.MockedClass; +const MockedMetadataValidator = MetadataValidator as jest.MockedClass; +const MockedFileValidator = FileValidator as jest.MockedClass; + +jest.mock('../utils/utils', () => ({ + checkEnv: jest.fn(), + handleLambdaError: jest.fn(), + extractContentTypeFromFileName: jest.fn((fileName: string) => { + const extension = fileName.split('.').pop()?.toLowerCase() || ''; + return MIME_TYPES[extension] || 'application/octet-stream'; + }), + extractFileExtension: jest.fn((fileName: string) => { + const lastDotIndex = fileName.lastIndexOf('.'); + if (lastDotIndex === -1 || lastDotIndex === fileName.length - 1) { + return 'unknown'; + } + return fileName.substring(lastDotIndex + 1).toLowerCase(); + }) +})); + +describe('Files Metadata Lambda', () => { + beforeAll(() => { + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-bucket'; + process.env.AWS_SDK_USER_AGENT = JSON.stringify({ + customUserAgent: [['AWSSOLUTION/SO0276/v0.0.0']] + }); + }); + + afterAll(() => { + delete process.env.MULTIMODAL_METADATA_TABLE_NAME; + delete process.env.MULTIMODAL_DATA_BUCKET; + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + const mockEvent = createMockEvent(); + + it('should successfully process EventBridge event with valid metadata', async () => { + const mockValidateMetadata = jest.fn().mockResolvedValue({ + objectKey: MOCK_OBJECT_KEY, + isValid: true, + originalFileName: 'test-file.jpg' + }); + const mockValidateFile = jest.fn().mockResolvedValue({ + isValid: true, + validationErrors: '' + }); + const mockProcessEvent = jest.fn().mockResolvedValue({ + success: true, + fileKey: MOCK_OBJECT_KEY, + fileName: 'test-file.jpg' + }); + + MockedMetadataValidator.mockImplementation( + () => + ({ + validateMetadata: mockValidateMetadata + }) as any + ); + + MockedFileValidator.mockImplementation( + () => + ({ + validateFile: mockValidateFile + }) as any + ); + + MockedEventBridgeProcessor.mockImplementation( + () => + ({ + processEvent: mockProcessEvent + }) as any + ); + + await expect(updateFilesMetadataHandler(mockEvent as any)).resolves.not.toThrow(); + + expect(mockValidateMetadata).toHaveBeenCalledWith('test-bucket', MOCK_OBJECT_KEY); + + expect(mockValidateFile).toHaveBeenCalledWith('test-bucket', MOCK_OBJECT_KEY); + + expect(mockProcessEvent).toHaveBeenCalledWith( + mockEvent, + expect.objectContaining({ + isValid: true, + originalFileName: 'test-file.jpg' + }) + ); + }); + + it('should successfully process EventBridge event with invalid metadata', async () => { + const mockValidateMetadata = jest.fn().mockResolvedValue({ + objectKey: MOCK_OBJECT_KEY, + isValid: false, + error: 'Missing required metadata', + originalFileName: 'test-file.jpg' + }); + const mockValidateFile = jest.fn().mockResolvedValue({ + isValid: true, + validationErrors: '' + }); + const mockProcessEvent = jest.fn().mockResolvedValue({ + success: true, + fileKey: MOCK_OBJECT_KEY, + fileName: 'test-file.jpg' + }); + + MockedMetadataValidator.mockImplementation( + () => + ({ + validateMetadata: mockValidateMetadata + }) as any + ); + + MockedFileValidator.mockImplementation( + () => + ({ + validateFile: mockValidateFile + }) as any + ); + + MockedEventBridgeProcessor.mockImplementation( + () => + ({ + processEvent: mockProcessEvent + }) as any + ); + + await expect(updateFilesMetadataHandler(mockEvent as any)).resolves.not.toThrow(); + + expect(mockValidateMetadata).toHaveBeenCalledWith('test-bucket', MOCK_OBJECT_KEY); + + expect(mockProcessEvent).toHaveBeenCalledWith( + mockEvent, + expect.objectContaining({ + isValid: false, + error: 'Missing required metadata', + originalFileName: 'test-file.jpg' + }) + ); + + expect(logger.warn).toHaveBeenCalledWith( + expect.stringContaining('Metadata validation failed - proceeding with invalid status') + ); + }); + + it('should throw error when event processing fails', async () => { + const mockValidateMetadata = jest.fn().mockResolvedValue({ + objectKey: MOCK_OBJECT_KEY, + isValid: true, + originalFileName: 'test-file.jpg' + }); + const mockValidateFile = jest.fn().mockResolvedValue({ + isValid: true, + validationErrors: '' + }); + const mockProcessEvent = jest.fn().mockResolvedValue({ + success: false, + fileKey: MOCK_OBJECT_KEY, + fileName: 'test-file.jpg', + error: 'Processing failed' + }); + + MockedMetadataValidator.mockImplementation( + () => + ({ + validateMetadata: mockValidateMetadata + }) as any + ); + + MockedFileValidator.mockImplementation( + () => + ({ + validateFile: mockValidateFile + }) as any + ); + + MockedEventBridgeProcessor.mockImplementation( + () => + ({ + processEvent: mockProcessEvent + }) as any + ); + + await expect(updateFilesMetadataHandler(mockEvent as any)).rejects.toThrow( + 'Failed to process event: Processing failed' + ); + }); + + it('should throw error when event detail is missing', async () => { + const eventWithoutDetail = { + source: 'aws.s3', + 'detail-type': 'Object Created', + time: '2023-01-01T00:00:00Z', + detail: null + }; + + await expect(updateFilesMetadataHandler(eventWithoutDetail as any)).rejects.toThrow( + 'Missing event detail in EventBridge event' + ); + }); + + it('should throw error when S3 object information is missing', async () => { + const eventWithoutObjectKey = createMockEvent({ + detail: { + bucket: { name: 'test-bucket' }, + object: { key: undefined as any } + } + }); + + await expect(updateFilesMetadataHandler(eventWithoutObjectKey as any)).rejects.toThrow( + 'Missing required S3 object information in EventBridge event' + ); + }); + + it('should throw error when validation fails with system error', async () => { + MockedMetadataValidator.mockImplementation( + () => + ({ + validateMetadata: jest.fn().mockRejectedValue(new Error('S3 API error')) + }) as any + ); + + await expect(updateFilesMetadataHandler(mockEvent as any)).rejects.toThrow('S3 API error'); + }); + + it('should export handler', () => { + expect(handler).toBeDefined(); + expect(typeof handler).toBe('function'); + }); +}); + +describe('Integration Tests - Orchestration Flow', () => { + beforeAll(() => { + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-bucket'; + }); + + afterAll(() => { + delete process.env.MULTIMODAL_METADATA_TABLE_NAME; + delete process.env.MULTIMODAL_DATA_BUCKET; + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + it('should orchestrate validation-first flow correctly', async () => { + const mockValidateMetadata = jest.fn().mockResolvedValue({ + objectKey: MOCK_OBJECT_KEY, + isValid: true, + originalFileName: 'test-file.jpg' + }); + const mockValidateFile = jest.fn().mockResolvedValue({ + isValid: true, + validationErrors: '' + }); + const mockProcessEvent = jest.fn().mockResolvedValue({ + success: true, + fileKey: MOCK_OBJECT_KEY, + fileName: 'test-file.jpg' + }); + + MockedMetadataValidator.mockImplementation( + () => + ({ + validateMetadata: mockValidateMetadata + }) as any + ); + + MockedFileValidator.mockImplementation( + () => + ({ + validateFile: mockValidateFile + }) as any + ); + + MockedEventBridgeProcessor.mockImplementation( + () => + ({ + processEvent: mockProcessEvent + }) as any + ); + + const testEvent = createMockEvent(); + await updateFilesMetadataHandler(testEvent as any); + + expect(mockValidateMetadata).toHaveBeenCalled(); + expect(mockValidateFile).toHaveBeenCalled(); + expect(mockProcessEvent).toHaveBeenCalled(); + + expect(mockValidateMetadata).toHaveBeenCalledWith('test-bucket', MOCK_OBJECT_KEY); + expect(mockValidateFile).toHaveBeenCalledWith('test-bucket', MOCK_OBJECT_KEY); + expect(mockProcessEvent).toHaveBeenCalledWith( + testEvent, + expect.objectContaining({ + isValid: true, + originalFileName: 'test-file.jpg' + }) + ); + }); + + it('should handle validation failure gracefully and continue processing', async () => { + const mockValidateMetadata = jest.fn().mockResolvedValue({ + objectKey: MOCK_OBJECT_KEY, + isValid: false, + error: 'Security violation detected', + originalFileName: 'test-file.jpg' + }); + const mockValidateFile = jest.fn().mockResolvedValue({ + isValid: true, + validationErrors: '' + }); + const mockProcessEvent = jest.fn().mockResolvedValue({ + success: true, + fileKey: MOCK_OBJECT_KEY, + fileName: 'test-file.jpg' + }); + + MockedMetadataValidator.mockImplementation( + () => + ({ + validateMetadata: mockValidateMetadata + }) as any + ); + + MockedFileValidator.mockImplementation( + () => + ({ + validateFile: mockValidateFile + }) as any + ); + + MockedEventBridgeProcessor.mockImplementation( + () => + ({ + processEvent: mockProcessEvent + }) as any + ); + + const testEvent = createMockEvent(); + await updateFilesMetadataHandler(testEvent as any); + + expect(logger.warn).toHaveBeenCalledWith( + expect.stringContaining('Metadata validation failed - proceeding with invalid status') + ); + + expect(mockProcessEvent).toHaveBeenCalledWith( + testEvent, + expect.objectContaining({ + isValid: false, + error: 'Security violation detected', + originalFileName: 'test-file.jpg' + }) + ); + }); + + it('should handle successful processing with valid metadata', async () => { + const mockValidateMetadata = jest.fn().mockResolvedValue({ + objectKey: MOCK_OBJECT_KEY, + isValid: true, + originalFileName: 'test-file.jpg' + }); + const mockValidateFile = jest.fn().mockResolvedValue({ + isValid: true, + validationErrors: '' + }); + const mockProcessEvent = jest.fn().mockResolvedValue({ + success: true, + fileKey: MOCK_OBJECT_KEY, + fileName: 'test-file.jpg' + }); + + MockedMetadataValidator.mockImplementation( + () => + ({ + validateMetadata: mockValidateMetadata + }) as any + ); + + MockedFileValidator.mockImplementation( + () => + ({ + validateFile: mockValidateFile + }) as any + ); + + MockedEventBridgeProcessor.mockImplementation( + () => + ({ + processEvent: mockProcessEvent + }) as any + ); + + await updateFilesMetadataHandler(createMockEvent() as any); + + expect(logger.info).toHaveBeenCalledWith(expect.stringContaining('Successfully processed event for file')); + }); + + it('should handle successful processing with invalid metadata', async () => { + const mockValidateMetadata = jest.fn().mockResolvedValue({ + objectKey: MOCK_OBJECT_KEY, + isValid: false, + error: 'Missing metadata tag', + originalFileName: 'test-file.jpg' + }); + const mockValidateFile = jest.fn().mockResolvedValue({ + isValid: true, + validationErrors: '' + }); + const mockProcessEvent = jest.fn().mockResolvedValue({ + success: true, + fileKey: MOCK_OBJECT_KEY, + fileName: 'test-file.jpg' + }); + + MockedMetadataValidator.mockImplementation( + () => + ({ + validateMetadata: mockValidateMetadata + }) as any + ); + + MockedFileValidator.mockImplementation( + () => + ({ + validateFile: mockValidateFile + }) as any + ); + + MockedEventBridgeProcessor.mockImplementation( + () => + ({ + processEvent: mockProcessEvent + }) as any + ); + + await updateFilesMetadataHandler(createMockEvent() as any); + + expect(logger.info).toHaveBeenCalledWith( + expect.stringContaining('marked as invalid due to validation failure') + ); + }); + + it('should handle processing failure and log appropriately', async () => { + const mockValidateMetadata = jest.fn().mockResolvedValue({ + objectKey: MOCK_OBJECT_KEY, + isValid: true, + originalFileName: 'test-file.jpg' + }); + const mockValidateFile = jest.fn().mockResolvedValue({ + isValid: true, + validationErrors: '' + }); + const mockProcessEvent = jest.fn().mockResolvedValue({ + success: false, + fileKey: MOCK_OBJECT_KEY, + fileName: 'test-file.jpg', + error: 'DynamoDB update failed' + }); + + MockedMetadataValidator.mockImplementation( + () => + ({ + validateMetadata: mockValidateMetadata + }) as any + ); + + MockedFileValidator.mockImplementation( + () => + ({ + validateFile: mockValidateFile + }) as any + ); + + MockedEventBridgeProcessor.mockImplementation( + () => + ({ + processEvent: mockProcessEvent + }) as any + ); + + await expect(updateFilesMetadataHandler(createMockEvent() as any)).rejects.toThrow( + 'Failed to process event: DynamoDB update failed' + ); + + expect(logger.warn).toHaveBeenCalledWith(expect.stringContaining('Failed to process event for file')); + }); + + it('should maintain backward compatibility with error response format', async () => { + const mockValidateMetadata = jest.fn().mockResolvedValue({ + objectKey: MOCK_OBJECT_KEY, + isValid: true, + originalFileName: 'test-file.jpg' + }); + const mockValidateFile = jest.fn().mockResolvedValue({ + isValid: true, + validationErrors: '' + }); + const mockProcessEvent = jest.fn().mockResolvedValue({ + success: false, + fileKey: MOCK_OBJECT_KEY, + fileName: 'test-file.jpg', + error: 'Specific error message' + }); + + MockedMetadataValidator.mockImplementation( + () => + ({ + validateMetadata: mockValidateMetadata + }) as any + ); + + MockedFileValidator.mockImplementation( + () => + ({ + validateFile: mockValidateFile + }) as any + ); + + MockedEventBridgeProcessor.mockImplementation( + () => + ({ + processEvent: mockProcessEvent + }) as any + ); + + await expect(updateFilesMetadataHandler(createMockEvent() as any)).rejects.toThrow( + 'Failed to process event: Specific error message' + ); + }); +}); diff --git a/source/lambda/files-metadata-management/test/power-tools-init.test.ts b/source/lambda/files-metadata-management/test/power-tools-init.test.ts new file mode 100644 index 00000000..6c36f04d --- /dev/null +++ b/source/lambda/files-metadata-management/test/power-tools-init.test.ts @@ -0,0 +1,55 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { tracer, logger, metrics } from '../power-tools-init'; +import { Logger } from '@aws-lambda-powertools/logger'; +import { Metrics } from '@aws-lambda-powertools/metrics'; +import { Tracer } from '@aws-lambda-powertools/tracer'; +import { CloudWatchNamespace } from '../utils/constants'; + +describe('Power Tools Initialization', () => { + describe('Tracer', () => { + it('should initialize tracer instance', () => { + expect(tracer).toBeInstanceOf(Tracer); + expect(tracer).toBeDefined(); + }); + }); + + describe('Logger', () => { + it('should initialize logger instance', () => { + expect(logger).toBeInstanceOf(Logger); + expect(logger).toBeDefined(); + }); + }); + + describe('Metrics', () => { + it('should initialize metrics instance', () => { + expect(metrics).toBeInstanceOf(Metrics); + expect(metrics).toBeDefined(); + }); + }); + + describe('Power Tools Integration', () => { + it('should have all power tools instances available', () => { + expect(tracer).toBeInstanceOf(Tracer); + expect(logger).toBeInstanceOf(Logger); + expect(metrics).toBeInstanceOf(Metrics); + }); + + it('should be able to use logger methods', () => { + expect(typeof logger.info).toBe('function'); + expect(typeof logger.error).toBe('function'); + expect(typeof logger.debug).toBe('function'); + }); + + it('should be able to use metrics methods', () => { + expect(typeof metrics.addMetric).toBe('function'); + expect(typeof metrics.publishStoredMetrics).toBe('function'); + }); + + it('should be able to use tracer methods', () => { + expect(typeof tracer.captureMethod).toBe('function'); + expect(typeof tracer.captureAWSv3Client).toBe('function'); + }); + }); +}); diff --git a/source/lambda/files-metadata-management/test/utils/error.test.ts b/source/lambda/files-metadata-management/test/utils/error.test.ts new file mode 100644 index 00000000..8cba40a4 --- /dev/null +++ b/source/lambda/files-metadata-management/test/utils/error.test.ts @@ -0,0 +1,31 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import RequestValidationError from '../../utils/error'; + +describe('RequestValidationError', () => { + it('should create error with correct message and name', () => { + const errorMessage = 'Test validation error'; + const error = new RequestValidationError(errorMessage); + + expect(error.message).toBe(errorMessage); + expect(error.name).toBe('CustomHttpError'); + expect(error).toBeInstanceOf(Error); + expect(error).toBeInstanceOf(RequestValidationError); + }); + + it('should be throwable and catchable', () => { + const errorMessage = 'Test validation error'; + + expect(() => { + throw new RequestValidationError(errorMessage); + }).toThrow(errorMessage); + + try { + throw new RequestValidationError(errorMessage); + } catch (error) { + expect(error).toBeInstanceOf(RequestValidationError); + expect((error as RequestValidationError).message).toBe(errorMessage); + } + }); +}); diff --git a/source/lambda/files-metadata-management/test/utils/eventbridge-processor.test.ts b/source/lambda/files-metadata-management/test/utils/eventbridge-processor.test.ts new file mode 100644 index 00000000..691873bf --- /dev/null +++ b/source/lambda/files-metadata-management/test/utils/eventbridge-processor.test.ts @@ -0,0 +1,501 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { DynamoDBClient, UpdateItemCommand } from '@aws-sdk/client-dynamodb'; +import { EventBridgeEvent } from 'aws-lambda'; +import { mockClient } from 'aws-sdk-client-mock'; +import { EventBridgeProcessor } from '../../utils/eventbridge-processor'; +import { logger as mockLogger, metrics as mockMetrics } from '../../power-tools-init'; +import { retryWithBackoff } from '../../utils/utils'; +import { CloudWatchMetrics, ERROR_MESSAGES } from '../../utils/constants'; + +const mockRetryWithBackoff = retryWithBackoff as jest.MockedFunction; + +import { FileStatus, ValidationResult } from '../../models/types'; + +jest.mock('../../power-tools-init', () => ({ + logger: { + info: jest.fn(), + error: jest.fn(), + warn: jest.fn(), + debug: jest.fn() + }, + tracer: { + captureAWSv3Client: jest.fn((client) => client), + captureMethod: jest.fn(() => (_target: any, _propertyKey: string, descriptor: PropertyDescriptor) => descriptor) + }, + metrics: { + addMetric: jest.fn(), + addDimension: jest.fn(), + setDefaultDimensions: jest.fn(), + publishStoredMetrics: jest.fn() + } +})); + +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn(() => ({ region: 'us-east-1' })) +})); + +jest.mock('../../utils/utils', () => ({ + retryWithBackoff: jest.fn(async (operation, retrySettings) => { + let lastError: any; + + for (let attempt = 0; attempt <= retrySettings.maxRetries; attempt++) { + try { + return await operation(); + } catch (error) { + lastError = error instanceof Error ? error : new Error(String(error)); + if (attempt === retrySettings.maxRetries) { + break; + } + } + } + throw lastError; + }), + getRetrySettings: jest.fn(() => ({ + maxRetries: 3, + backOffRate: 2, + initialDelayMs: 1000 + })), + calculateTTL: jest.fn(() => 1234567890), + extractFileExtension: jest.fn((fileName: string) => { + const lastDotIndex = fileName.lastIndexOf('.'); + if (lastDotIndex === -1 || lastDotIndex === fileName.length - 1) { + return 'unknown'; + } + return fileName.substring(lastDotIndex + 1).toLowerCase(); + }), + categorizeProcessingError: jest.fn((error: Error) => { + const errorMessage = error.message.toLowerCase(); + const errorName = error.name; + + if ( + errorMessage.includes('timeout') || + errorMessage.includes('serviceunavailable') || + errorMessage.includes('internalerror') || + errorMessage.includes('throttling') || + errorName === 'TimeoutError' || + errorName === 'ServiceUnavailable' + ) { + return 'system-error'; + } + + if ( + errorMessage.includes('invalid file key format') || + errorMessage.includes('validation') || + errorName === 'ValidationException' + ) { + return 'validation-error'; + } + + return 'application-error'; + }) +})); + +const dynamoMock = mockClient(DynamoDBClient); + +describe('EventBridgeProcessor - DDB Focused Tests', () => { + let processor: EventBridgeProcessor; + const mockTableName = 'test-table'; + const mockBucketName = 'test-bucket'; + const mockObjectKey = 'useCase1/user123/conv456/msg789/test-file.jpg'; + + beforeEach(() => { + jest.clearAllMocks(); + dynamoMock.reset(); + + mockRetryWithBackoff.mockImplementation(async (operation: () => Promise) => { + return await operation(); + }); + + processor = new EventBridgeProcessor(mockTableName, mockBucketName); + }); + + const createMockEvent = (overrides: any = {}): EventBridgeEvent => ({ + source: 'aws.s3', + 'detail-type': 'Object Created', + time: '2023-01-01T00:00:00Z', + detail: { + bucket: { + name: mockBucketName + }, + object: { + key: mockObjectKey, + size: 1024 + } + }, + ...overrides + }); + + describe('DDB-Focused EventBridge Processing', () => { + it('should successfully process event with valid validation result', async () => { + const validationResult: ValidationResult = { + isValid: true, + originalFileName: 'original-file.jpg' + }; + dynamoMock.on(UpdateItemCommand).resolves({}); + + const event = createMockEvent(); + const result = await processor.processEvent(event, validationResult); + + expect(result.success).toBe(true); + expect(result.fileKey).toBe('useCase1/user123/conv456/msg789'); + expect(result.fileName).toBe('original-file.jpg'); + + expect(dynamoMock.commandCalls(UpdateItemCommand)).toHaveLength(1); + const updateCall = dynamoMock.commandCalls(UpdateItemCommand)[0]; + expect(updateCall.args[0].input).toMatchObject({ + TableName: mockTableName, + Key: { + fileKey: { S: 'useCase1/user123/conv456/msg789' }, + fileName: { S: 'original-file.jpg' } + } + }); + expect(updateCall.args[0].input.ExpressionAttributeValues).toMatchObject({ + ':status': { S: FileStatus.UPLOADED }, + ':uploadTimestamp': { N: '1672531200000' }, + ':ttl': { N: '1234567890' }, + ':pendingStatus': { S: FileStatus.PENDING } + }); + expect(updateCall.args[0].input.ConditionExpression).toBe( + 'attribute_exists(fileKey) AND attribute_exists(fileName) AND #status = :pendingStatus' + ); + }); + + it('should successfully process event with invalid validation result', async () => { + const validationResult: ValidationResult = { + isValid: false, + error: 'Missing required metadata', + originalFileName: 'original-file.jpg' + }; + dynamoMock.on(UpdateItemCommand).resolves({}); + + const event = createMockEvent(); + const result = await processor.processEvent(event, validationResult); + + expect(result.success).toBe(true); // Processing succeeded + expect(result.fileKey).toBe('useCase1/user123/conv456/msg789'); + expect(result.fileName).toBe('original-file.jpg'); + + expect(dynamoMock.commandCalls(UpdateItemCommand)).toHaveLength(1); + const updateCall = dynamoMock.commandCalls(UpdateItemCommand)[0]; + expect(updateCall.args[0].input.ExpressionAttributeValues).toMatchObject({ + ':status': { S: FileStatus.INVALID }, + ':uploadTimestamp': { N: '1672531200000' }, + ':ttl': { N: '1234567890' }, + ':pendingStatus': { S: FileStatus.PENDING } + }); + expect(updateCall.args[0].input.Key).toMatchObject({ + fileKey: { S: 'useCase1/user123/conv456/msg789' }, + fileName: { S: 'original-file.jpg' } + }); + + expect(mockLogger.info).toHaveBeenCalledWith( + `${ERROR_MESSAGES.FILE_MARKED_INVALID_METADATA} - fileKey: useCase1/user123/conv456/msg789, fileName: original-file.jpg, reason: metadata-validation-failure, error: Missing required metadata` + ); + }); + + it('should handle validation result without error message', async () => { + const validationResult: ValidationResult = { + isValid: false, + originalFileName: 'test-name.png' + }; + dynamoMock.on(UpdateItemCommand).resolves({}); + + const event = createMockEvent(); + const result = await processor.processEvent(event, validationResult); + + expect(result.success).toBe(true); + expect(result.fileKey).toBe('useCase1/user123/conv456/msg789'); + expect(result.fileName).toBe('test-name.png'); + + expect(mockLogger.info).toHaveBeenCalledWith( + `${ERROR_MESSAGES.FILE_MARKED_INVALID_METADATA} - fileKey: useCase1/user123/conv456/msg789, fileName: test-name.png, reason: metadata-validation-failure, error: undefined` + ); + }); + }); + + describe('Single DDB Update Operation Behavior', () => { + it('should perform exactly one DDB update with correct expression structure', async () => { + const validationResult: ValidationResult = { + isValid: true, + originalFileName: 'test-name.png' + }; + dynamoMock.on(UpdateItemCommand).resolves({}); + + const event = createMockEvent(); + await processor.processEvent(event, validationResult); + + expect(dynamoMock.commandCalls(UpdateItemCommand)).toHaveLength(1); + + const updateCall = dynamoMock.commandCalls(UpdateItemCommand)[0]; + const updateExpression = updateCall.args[0].input.UpdateExpression; + + expect(updateExpression).toBe('SET #status = :status, #uploadTimestamp = :uploadTimestamp, #ttl = :ttl'); + + expect(updateCall.args[0].input.ExpressionAttributeNames).toEqual({ + '#status': 'status', + '#uploadTimestamp': 'uploadTimestamp', + '#ttl': 'ttl' + }); + + expect(updateCall.args[0].input.ConditionExpression).toBe( + 'attribute_exists(fileKey) AND attribute_exists(fileName) AND #status = :pendingStatus' + ); + }); + + it('should use single update method for validation failures', async () => { + const validationResult: ValidationResult = { + isValid: false, + error: 'Missing required metadata', + originalFileName: 'test-name.png' + }; + dynamoMock.on(UpdateItemCommand).resolves({}); + + const event = createMockEvent(); + await processor.processEvent(event, validationResult); + + expect(dynamoMock.commandCalls(UpdateItemCommand)).toHaveLength(1); + + const updateCall = dynamoMock.commandCalls(UpdateItemCommand)[0]; + expect(updateCall.args[0].input.ExpressionAttributeValues).toMatchObject({ + ':status': { S: FileStatus.INVALID }, + ':pendingStatus': { S: FileStatus.PENDING } + }); + expect(updateCall.args[0].input.ConditionExpression).toBe( + 'attribute_exists(fileKey) AND attribute_exists(fileName) AND #status = :pendingStatus' + ); + }); + + it('should handle conditional check failures in DDB update', async () => { + const validationResult: ValidationResult = { + isValid: true, + originalFileName: 'test-name.png' + }; + const dynamoError = new Error('ConditionalCheckFailedException'); + dynamoError.name = 'ConditionalCheckFailedException'; + dynamoMock.on(UpdateItemCommand).rejects(dynamoError); + + const event = createMockEvent(); + const result = await processor.processEvent(event, validationResult); + + expect(result.success).toBe(false); + expect(result.error).toContain('ConditionalCheckFailedException'); + + expect(dynamoMock.commandCalls(UpdateItemCommand)).toHaveLength(1); + }); + }); + + describe('Metrics Collection During DDB Operations', () => { + it('should record metrics for successful DDB operations', async () => { + const validationResult: ValidationResult = { + isValid: true, + originalFileName: 'test-name.png' + }; + dynamoMock.on(UpdateItemCommand).resolves({}); + + const event = createMockEvent(); + await processor.processEvent(event, validationResult); + + expect(mockMetrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.METADATA_UPDATE_SUCCESS, 'Count', 1); + expect(mockMetrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.FILES_UPLOADED, 'Count', 1); + expect(mockMetrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.FILE_SIZE, 'Bytes', 1024); + expect(mockMetrics.publishStoredMetrics).toHaveBeenCalled(); + expect(mockMetrics.addDimension).toHaveBeenCalledWith(CloudWatchMetrics.FILE_EXTENSION, 'png'); + expect(mockMetrics.addMetric).toHaveBeenCalledWith( + CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + 'Count', + 1 + ); + expect(mockMetrics.setDefaultDimensions).toHaveBeenCalledWith({ UseCaseId: 'useCase1' }); + }); + + it('should record file extension and size metrics', async () => { + const validationResult: ValidationResult = { + isValid: true, + originalFileName: 'test-name.png' + }; + dynamoMock.on(UpdateItemCommand).resolves({}); + + const event = createMockEvent({ + detail: { + bucket: { name: mockBucketName }, + object: { + key: mockObjectKey, + size: 2048 + } + } + }); + await processor.processEvent(event, validationResult); + + expect(mockMetrics.setDefaultDimensions).toHaveBeenCalledWith({ UseCaseId: 'useCase1' }); + expect(mockMetrics.addDimension).toHaveBeenCalledWith(CloudWatchMetrics.FILE_EXTENSION, 'png'); + expect(mockMetrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.FILE_SIZE, 'Bytes', 2048); + }); + + it('should record failure metrics for DDB operation failures', async () => { + const validationResult: ValidationResult = { + isValid: true, + originalFileName: 'test-name.png' + }; + const dynamoError = new Error('ConditionalCheckFailedException'); + dynamoError.name = 'ConditionalCheckFailedException'; + dynamoMock.on(UpdateItemCommand).rejects(dynamoError); + + const event = createMockEvent(); + await processor.processEvent(event, validationResult); + + expect(mockMetrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.METADATA_UPDATE_FAILURE, 'Count', 1); + }); + + it('should record metrics for invalid validation results', async () => { + const validationResult: ValidationResult = { + isValid: false, + error: 'Missing metadata', + originalFileName: 'test-name.png' + }; + dynamoMock.on(UpdateItemCommand).resolves({}); + + const event = createMockEvent(); + await processor.processEvent(event, validationResult); + + expect(mockMetrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.METADATA_UPDATE_SUCCESS, 'Count', 1); + expect(mockMetrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.FILES_UPLOADED, 'Count', 1); + expect(mockMetrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.FILE_SIZE, 'Bytes', 1024); + expect(mockMetrics.publishStoredMetrics).toHaveBeenCalled(); + expect(mockMetrics.addMetric).toHaveBeenCalledWith( + CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, + 'Count', + 1 + ); + }); + }); + + describe('Error Message Constants Usage', () => { + it('should use error message constants for unexpected bucket', async () => { + const validationResult: ValidationResult = { + isValid: true, + originalFileName: 'test-name.png' + }; + const event = createMockEvent({ + detail: { + bucket: { name: 'wrong-bucket' }, + object: { key: mockObjectKey, size: 1024 } + } + }); + const result = await processor.processEvent(event, validationResult); + + expect(result.success).toBe(false); + expect(result.error).toContain(ERROR_MESSAGES.UNEXPECTED_BUCKET); + expect(mockLogger.error).toHaveBeenCalledWith( + `${ERROR_MESSAGES.UNEXPECTED_BUCKET}: wrong-bucket - actualBucket: wrong-bucket, expectedBucket: test-bucket` + ); + }); + + it('should use error message constants for invalid file key format', async () => { + const validationResult: ValidationResult = { + isValid: true, + originalFileName: 'test-name.png' + }; + const event = createMockEvent({ + detail: { + bucket: { name: mockBucketName }, + object: { key: 'invalid-key-format', size: 1024 } + } + }); + const result = await processor.processEvent(event, validationResult); + + expect(result.success).toBe(false); + expect(result.error).toContain(ERROR_MESSAGES.INVALID_FILE_KEY_FORMAT); + }); + + it('should use error message constants for processing success', async () => { + const validationResult: ValidationResult = { + isValid: true, + originalFileName: 'test-name.png' + }; + dynamoMock.on(UpdateItemCommand).resolves({}); + + const event = createMockEvent(); + await processor.processEvent(event, validationResult); + + expect(mockLogger.info).toHaveBeenCalledWith( + `${ERROR_MESSAGES.PROCESSING_SUCCESS} - fileKey: useCase1/user123/conv456/msg789, fileName: test-name.png` + ); + }); + + it('should use error message constants for DynamoDB update failures', async () => { + const validationResult: ValidationResult = { + isValid: true, + originalFileName: 'test-name.png' + }; + const dynamoError = new Error('ConditionalCheckFailedException'); + dynamoError.name = 'ConditionalCheckFailedException'; + dynamoMock.on(UpdateItemCommand).rejects(dynamoError); + + const event = createMockEvent(); + const result = await processor.processEvent(event, validationResult); + + expect(result.success).toBe(false); + expect(mockLogger.warn).toHaveBeenCalledWith( + `${ERROR_MESSAGES.DYNAMODB_UPDATE_FAILED} - error: ConditionalCheckFailedException, fileKey: useCase1/user123/conv456/msg789, fileName: test-name.png` + ); + }); + }); + + describe('Consistent Behavior Across All Environments', () => { + it('should use consistent DDB update behavior regardless of environment', async () => { + const validationResult: ValidationResult = { + isValid: true, + originalFileName: 'test-name.png' + }; + dynamoMock.on(UpdateItemCommand).resolves({}); + + const event = createMockEvent(); + await processor.processEvent(event, validationResult); + + expect(dynamoMock.commandCalls(UpdateItemCommand)).toHaveLength(1); + const updateCall = dynamoMock.commandCalls(UpdateItemCommand)[0]; + expect(updateCall.args[0].input.TableName).toBe(mockTableName); + }); + + it('should handle DDB retry logic consistently using error constants', async () => { + const validationResult: ValidationResult = { + isValid: true, + originalFileName: 'test-name.png' + }; + + const dynamoError = new Error('ThrottlingException'); + dynamoError.name = 'ThrottlingException'; + dynamoMock.on(UpdateItemCommand).rejects(dynamoError); + + const event = createMockEvent(); + const result = await processor.processEvent(event, validationResult); + + expect(result.success).toBe(false); + expect(result.error).toContain('ThrottlingException'); + + expect(dynamoMock.commandCalls(UpdateItemCommand)).toHaveLength(1); + }); + + it('should handle DynamoDB update failures consistently with single update operation', async () => { + const validationResult: ValidationResult = { + isValid: true, + originalFileName: 'test-name.png' + }; + const dynamoError = new Error('ConditionalCheckFailedException'); + dynamoError.name = 'ConditionalCheckFailedException'; + dynamoMock.on(UpdateItemCommand).rejects(dynamoError); + + const event = createMockEvent(); + const result = await processor.processEvent(event, validationResult); + + expect(result.success).toBe(false); + expect(result.error).toContain('ConditionalCheckFailedException'); + + expect(dynamoMock.commandCalls(UpdateItemCommand)).toHaveLength(1); + + expect(mockMetrics.addMetric).toHaveBeenCalledWith(CloudWatchMetrics.METADATA_UPDATE_FAILURE, 'Count', 1); + }); + }); +}); diff --git a/source/lambda/files-metadata-management/test/utils/file-validator.test.ts b/source/lambda/files-metadata-management/test/utils/file-validator.test.ts new file mode 100644 index 00000000..e0d3e530 --- /dev/null +++ b/source/lambda/files-metadata-management/test/utils/file-validator.test.ts @@ -0,0 +1,771 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { S3Client, GetObjectCommand } from '@aws-sdk/client-s3'; +import { mockClient } from 'aws-sdk-client-mock'; +import * as FileType from 'file-type'; +import type { FileTypeResult } from 'file-type'; +import { FileValidator } from '../../utils/file-validator'; +import { logger as mockLogger, tracer as mockTracer } from '../../power-tools-init'; +import { MAGIC_NUMBER_BUFFER_SIZE, ALL_SUPPORTED_FILE_TYPES } from '../../utils/constants'; +import { extractFileExtension, extractContentTypeFromFileName } from '../../utils/utils'; + +// Mock dependencies +jest.mock('../../power-tools-init', () => ({ + logger: { + info: jest.fn(), + error: jest.fn(), + warn: jest.fn(), + debug: jest.fn() + }, + tracer: { + captureAWSv3Client: jest.fn((client) => client), + captureMethod: jest.fn(() => (_target: any, _propertyKey: string, descriptor: PropertyDescriptor) => descriptor) + } +})); + +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn(() => ({ region: 'us-east-1' })) +})); + +jest.mock('../../utils/utils', () => ({ + extractFileExtension: jest.fn((fileName: string) => { + const lastDotIndex = fileName.lastIndexOf('.'); + if (lastDotIndex === -1 || lastDotIndex === fileName.length - 1) { + return 'unknown'; + } + return fileName.substring(lastDotIndex + 1).toLowerCase(); + }), + extractContentTypeFromFileName: jest.fn((fileName: string) => { + const extension = fileName.split('.').pop()?.toLowerCase() || ''; + const mimeTypes: Record = { + 'jpg': 'image/jpeg', + 'jpeg': 'image/jpeg', + 'png': 'image/png', + 'gif': 'image/gif', + 'pdf': 'application/pdf', + 'txt': 'text/plain', + 'csv': 'text/csv' + }; + return mimeTypes[extension] || 'application/octet-stream'; + }) +})); + +jest.mock('file-type', () => ({ + fromBuffer: jest.fn() +})); + +const s3Mock = mockClient(S3Client); +const mockFromBuffer = FileType.fromBuffer as jest.MockedFunction; +const mockExtractFileExtension = extractFileExtension as jest.MockedFunction; +const mockExtractContentTypeFromFileName = extractContentTypeFromFileName as jest.MockedFunction; + +describe('FileValidator', () => { + let fileValidator: FileValidator; + const mockBucketName = 'test-bucket'; + const mockObjectKey = 'useCase1/user123/conv456/msg789/test-file.jpg'; + + beforeEach(() => { + jest.clearAllMocks(); + s3Mock.reset(); + fileValidator = new FileValidator(); + }); + + describe('Constructor', () => { + it('should initialize FileValidator with S3 client', () => { + expect(fileValidator).toBeInstanceOf(FileValidator); + expect(mockTracer.captureAWSv3Client).toHaveBeenCalled(); + }); + }); + + describe('validateFile - Successful Validation', () => { + it('should successfully validate a JPEG file with correct magic numbers', async () => { + const mockBuffer = Buffer.from([0xFF, 0xD8, 0xFF, 0xE0]); // JPEG magic numbers + const mockDetectedType: FileTypeResult = { mime: 'image/jpeg', ext: 'jpg' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + mockExtractFileExtension.mockReturnValue('jpg'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(true); + expect(result.validationErrors).toBe(''); + expect(mockLogger.info).toHaveBeenCalledWith( + `Starting file validation for: ${mockObjectKey}`, + expect.any(String) + ); + expect(mockLogger.info).toHaveBeenCalledWith( + 'File validation completed', + expect.any(String) + ); + }); + + it('should successfully validate a PNG file with correct magic numbers', async () => { + const mockBuffer = Buffer.from([0x89, 0x50, 0x4E, 0x47]); // PNG magic numbers + const mockDetectedType: FileTypeResult = { mime: 'image/png', ext: 'png' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/png'); + mockExtractFileExtension.mockReturnValue('png'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, 'test/file.png'); + + expect(result.isValid).toBe(true); + expect(result.validationErrors).toBe(''); + }); + + it('should successfully validate a PDF file with correct magic numbers', async () => { + const mockBuffer = Buffer.from([0x25, 0x50, 0x44, 0x46]); // PDF magic numbers + const mockDetectedType: FileTypeResult = { mime: 'application/pdf', ext: 'pdf' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('application/pdf'); + mockExtractFileExtension.mockReturnValue('pdf'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, 'test/document.pdf'); + + expect(result.isValid).toBe(true); + expect(result.validationErrors).toBe(''); + }); + + it('should accept JPEG files with .jpeg extension when detected as jpg', async () => { + const mockBuffer = Buffer.from([0xFF, 0xD8, 0xFF, 0xE0]); // JPEG magic numbers + const mockDetectedType: FileTypeResult = { mime: 'image/jpeg', ext: 'jpg' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + mockExtractFileExtension.mockReturnValue('jpeg'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, 'test/image.jpeg'); + + expect(result.isValid).toBe(true); // Should pass since jpeg and jpg are equivalent + expect(result.validationErrors).toBe(''); + }); + + it('should accept DOC files detected as CFB format', async () => { + const mockBuffer = Buffer.from([0xD0, 0xCF, 0x11, 0xE0]); // CFB magic numbers + const mockDetectedType: FileTypeResult = { mime: 'application/x-cfb', ext: 'cfb' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('application/msword'); + mockExtractFileExtension.mockReturnValue('doc'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, 'test/Reinvent 2025.doc'); + + expect(result.isValid).toBe(true); // Should pass with CFB equivalence + expect(result.validationErrors).toBe(''); + }); + + it('should accept XLS files detected as CFB format', async () => { + const mockBuffer = Buffer.from([0xD0, 0xCF, 0x11, 0xE0]); // CFB magic numbers + const mockDetectedType: FileTypeResult = { mime: 'application/x-cfb', ext: 'cfb' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('application/vnd.ms-excel'); + mockExtractFileExtension.mockReturnValue('xls'); // File has .xls extension + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, 'test/spreadsheet.xls'); + + expect(result.isValid).toBe(true); // Should pass with CFB equivalence + expect(result.validationErrors).toBe(''); + }); + }); + + describe('validateFile - Validation Failures', () => { + it('should fail validation for unsupported declared content type', async () => { + mockExtractContentTypeFromFileName.mockReturnValue('application/x-executable'); + + const result = await fileValidator.validateFile(mockBucketName, 'test/malware.exe'); + + expect(result.isValid).toBe(false); + expect(result.validationErrors).toContain('Unsupported declared content type: application/x-executable'); + expect(s3Mock.commandCalls(GetObjectCommand)).toHaveLength(0); // Should not call S3 + }); + + it('should fail validation when file type cannot be detected from magic numbers', async () => { + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield Buffer.from([0x00, 0x00, 0x00, 0x00]); // Invalid magic numbers + } + } as any + }); + mockFromBuffer.mockResolvedValue(undefined); + + const result = await fileValidator.validateFile(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.validationErrors).toContain('Unable to detect file type from magic numbers'); + }); + + it('should fail validation when detected file type is not supported', async () => { + const mockBuffer = Buffer.from([0x7F, 0x45, 0x4C, 0x46]); // ELF executable magic numbers + const mockDetectedType: FileTypeResult = { mime: 'application/x-tar', ext: 'tar' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + mockExtractFileExtension.mockReturnValue('jpg'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, 'test/fake.jpg'); + + expect(result.isValid).toBe(false); + expect(result.validationErrors).toContain('Unsupported file type detected: application/x-tar'); + expect(result.validationErrors).toContain('Content type mismatch: declared \'image/jpeg\' but detected \'application/x-tar\''); + expect(result.validationErrors).toContain('File extension mismatch: filename suggests \'jpg\' but detected \'tar\''); + }); + + it('should fail validation when declared content type does not match detected type', async () => { + const mockBuffer = Buffer.from([0x89, 0x50, 0x4E, 0x47]); // PNG magic numbers + const mockDetectedType: FileTypeResult = { mime: 'image/png', ext: 'png' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); // Declared as JPEG + mockExtractFileExtension.mockReturnValue('jpg'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, 'test/fake.jpg'); + + expect(result.isValid).toBe(false); + expect(result.validationErrors).toContain('Content type mismatch: declared \'image/jpeg\' but detected \'image/png\''); + expect(result.validationErrors).toContain('File extension mismatch: filename suggests \'jpg\' but detected \'png\''); + }); + + it('should fail validation when file extension does not match detected type', async () => { + const mockBuffer = Buffer.from([0x89, 0x50, 0x4E, 0x47]); // PNG magic numbers + const mockDetectedType: FileTypeResult = { mime: 'image/png', ext: 'png' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/png'); + mockExtractFileExtension.mockReturnValue('jpg'); // Extension suggests JPEG + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, 'test/file.jpg'); + + expect(result.isValid).toBe(false); + expect(result.validationErrors).toContain('File extension mismatch: filename suggests \'jpg\' but detected \'png\''); + }); + + it('should handle files without extensions gracefully', async () => { + const mockBuffer = Buffer.from([0x89, 0x50, 0x4E, 0x47]); // PNG magic numbers + const mockDetectedType: FileTypeResult = { mime: 'image/png', ext: 'png' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/png'); + mockExtractFileExtension.mockReturnValue('unknown'); // No extension + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, 'test/file_no_extension'); + + expect(result.isValid).toBe(true); // Should pass since unknown extension is ignored + expect(result.validationErrors).toBe(''); + }); + }); + + describe('validateFile - Error Handling', () => { + it('should handle S3 GetObject NoSuchKey error', async () => { + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + const s3Error = new Error('NoSuchKey'); + s3Error.name = 'NoSuchKey'; + s3Mock.on(GetObjectCommand).rejects(s3Error); + + const result = await fileValidator.validateFile(mockBucketName, 'nonexistent/file.jpg'); + + expect(result.isValid).toBe(false); + expect(result.validationErrors).toContain('Validation process failed: S3 partial download failed: NoSuchKey'); + expect(mockLogger.error).toHaveBeenCalledWith( + `File validation failed for: nonexistent/file.jpg`, + expect.any(String) + ); + }); + + it('should handle S3 AccessDenied error', async () => { + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + const s3Error = new Error('AccessDenied'); + s3Error.name = 'AccessDenied'; + s3Mock.on(GetObjectCommand).rejects(s3Error); + + const result = await fileValidator.validateFile(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.validationErrors).toContain('Validation process failed: S3 partial download failed: AccessDenied'); + expect(mockLogger.error).toHaveBeenCalledWith( + `Failed to download partial file from S3: ${mockBucketName}/${mockObjectKey}`, + expect.any(String) + ); + }); + + it('should handle empty S3 response body', async () => { + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + s3Mock.on(GetObjectCommand).resolves({ + Body: undefined + }); + + const result = await fileValidator.validateFile(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.validationErrors).toContain('Validation process failed: S3 partial download failed: Empty response body from S3'); + expect(mockLogger.error).toHaveBeenCalledWith( + `Failed to download partial file from S3: ${mockBucketName}/${mockObjectKey}`, + expect.any(String) + ); + }); + + it('should handle FileType.fromBuffer throwing an error', async () => { + const mockBuffer = Buffer.from([0xFF, 0xD8, 0xFF, 0xE0]); + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockRejectedValue(new Error('FileType parsing error')); + + const result = await fileValidator.validateFile(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.validationErrors).toContain('Validation process failed: FileType parsing error'); + expect(mockLogger.error).toHaveBeenCalledWith( + `File validation failed for: ${mockObjectKey}`, + expect.any(String) + ); + }); + + it('should handle non-Error objects thrown', async () => { + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + s3Mock.on(GetObjectCommand).rejects('String error'); + + const result = await fileValidator.validateFile(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.validationErrors).toContain('Validation process failed: S3 partial download failed: String error'); + expect(mockLogger.error).toHaveBeenCalledWith( + `Failed to download partial file from S3: ${mockBucketName}/${mockObjectKey}`, + expect.any(String) + ); + }); + }); + + describe('downloadPartialFileFromS3 - S3 Integration', () => { + it('should download partial file with correct Range header', async () => { + const mockBuffer = Buffer.from([0xFF, 0xD8, 0xFF, 0xE0]); + const mockDetectedType: FileTypeResult = { mime: 'image/jpeg', ext: 'jpg' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + mockExtractFileExtension.mockReturnValue('jpg'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + await fileValidator.validateFile(mockBucketName, mockObjectKey); + + expect(s3Mock.commandCalls(GetObjectCommand)).toHaveLength(1); + const getObjectCall = s3Mock.commandCalls(GetObjectCommand)[0]; + expect(getObjectCall.args[0].input).toMatchObject({ + Bucket: mockBucketName, + Key: mockObjectKey, + Range: `bytes=0-${MAGIC_NUMBER_BUFFER_SIZE - 1}` + }); + }); + + it('should handle multiple chunks in S3 response stream', async () => { + const chunk1 = Buffer.from([0xFF, 0xD8]); + const chunk2 = Buffer.from([0xFF, 0xE0]); + const mockDetectedType: FileTypeResult = { mime: 'image/jpeg', ext: 'jpg' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + mockExtractFileExtension.mockReturnValue('jpg'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield chunk1; + yield chunk2; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(true); + expect(mockFromBuffer).toHaveBeenCalledWith(Buffer.concat([chunk1, chunk2])); + }); + }); + + describe('Logging Behavior', () => { + it('should log debug information during validation process', async () => { + const mockBuffer = Buffer.from([0xFF, 0xD8, 0xFF, 0xE0]); + const mockDetectedType: FileTypeResult = { mime: 'image/jpeg', ext: 'jpg' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + mockExtractFileExtension.mockReturnValue('jpg'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + await fileValidator.validateFile(mockBucketName, mockObjectKey); + + expect(mockLogger.debug).toHaveBeenCalledWith( + 'File type detection results', + expect.any(String) + ); + }); + + it('should use JSON.stringify for all log messages', async () => { + const mockBuffer = Buffer.from([0xFF, 0xD8, 0xFF, 0xE0]); + const mockDetectedType: FileTypeResult = { mime: 'image/jpeg', ext: 'jpg' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + mockExtractFileExtension.mockReturnValue('jpg'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + await fileValidator.validateFile(mockBucketName, mockObjectKey); + + // Verify that all logger calls use JSON.stringify for structured logging + expect(mockLogger.info).toHaveBeenCalledWith( + expect.any(String), + expect.stringMatching(/^\{.*\}$/) // JSON string pattern + ); + expect(mockLogger.debug).toHaveBeenCalledWith( + expect.any(String), + expect.stringMatching(/^\{.*\}$/) // JSON string pattern + ); + }); + }); + + describe('Performance and Optimization', () => { + it('should only download MAGIC_NUMBER_BUFFER_SIZE bytes for validation', async () => { + const mockBuffer = Buffer.from([0xFF, 0xD8, 0xFF, 0xE0]); + const mockDetectedType: FileTypeResult = { mime: 'image/jpeg', ext: 'jpg' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + mockExtractFileExtension.mockReturnValue('jpg'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + await fileValidator.validateFile(mockBucketName, mockObjectKey); + + const getObjectCall = s3Mock.commandCalls(GetObjectCommand)[0]; + expect(getObjectCall.args[0].input.Range).toBe(`bytes=0-${MAGIC_NUMBER_BUFFER_SIZE - 1}`); + }); + + it('should skip S3 download for unsupported declared content types', async () => { + mockExtractContentTypeFromFileName.mockReturnValue('application/x-malware'); + + const result = await fileValidator.validateFile(mockBucketName, 'test/malware.exe'); + + expect(result.isValid).toBe(false); + expect(s3Mock.commandCalls(GetObjectCommand)).toHaveLength(0); + }); + }); + + describe('NO_MAGIC_NUMBER_EXTENSIONS Validation', () => { + it('should successfully validate TXT files without magic numbers', async () => { + mockExtractContentTypeFromFileName.mockReturnValue('text/plain'); + mockExtractFileExtension.mockReturnValue('txt'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield Buffer.from('Hello world'); + } + } as any + }); + mockFromBuffer.mockResolvedValue(undefined); + + const result = await fileValidator.validateFile(mockBucketName, 'test/document.txt'); + + expect(result.isValid).toBe(true); + expect(result.validationErrors).toBe(''); + expect(mockLogger.info).toHaveBeenCalledWith( + 'File validation completed (no magic numbers)', + expect.stringContaining('"extension":"txt"') + ); + }); + + it('should successfully validate MD files without magic numbers', async () => { + mockExtractContentTypeFromFileName.mockReturnValue('text/markdown'); + mockExtractFileExtension.mockReturnValue('md'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield Buffer.from('# Markdown Title\n\nContent here'); + } + } as any + }); + mockFromBuffer.mockResolvedValue(undefined); + + const result = await fileValidator.validateFile(mockBucketName, 'test/readme.md'); + + expect(result.isValid).toBe(true); + expect(result.validationErrors).toBe(''); + expect(mockLogger.info).toHaveBeenCalledWith( + 'File validation completed (no magic numbers)', + expect.stringContaining('"extension":"md"') + ); + }); + + it('should fail validation for unsupported extension when no magic numbers detected', async () => { + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + mockExtractFileExtension.mockReturnValue('jpg'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield Buffer.from('fake image content'); + } + } as any + }); + mockFromBuffer.mockResolvedValue(undefined); + + const result = await fileValidator.validateFile(mockBucketName, 'test/fake.jpg'); + + expect(result.isValid).toBe(false); + expect(result.validationErrors).toContain('Unable to detect file type from magic numbers'); + }); + }); + + describe('Extension with MIME variants', () => { + it('should return true for DOC files with correct declared type and CFB detected type', async () => { + const mockBuffer = Buffer.from([0xD0, 0xCF, 0x11, 0xE0]); // CFB magic numbers + const mockDetectedType: FileTypeResult = { mime: 'application/x-cfb', ext: 'cfb' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('application/msword'); + mockExtractFileExtension.mockReturnValue('doc'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, 'test/document.doc'); + + expect(result.isValid).toBe(true); + expect(result.validationErrors).toBe(''); + }); + + it('should return true for XLS files with correct declared type and CFB detected type', async () => { + const mockBuffer = Buffer.from([0xD0, 0xCF, 0x11, 0xE0]); // CFB magic numbers + const mockDetectedType: FileTypeResult = { mime: 'application/x-cfb', ext: 'cfb' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('application/vnd.ms-excel'); + mockExtractFileExtension.mockReturnValue('xls'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, 'test/spreadsheet.xls'); + + expect(result.isValid).toBe(true); + expect(result.validationErrors).toBe(''); + }); + + it('should return false when declared type does not match expected type for extension with variants', async () => { + const mockBuffer = Buffer.from([0xD0, 0xCF, 0x11, 0xE0]); // CFB magic numbers + const mockDetectedType: FileTypeResult = { mime: 'application/x-cfb', ext: 'cfb' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + mockExtractFileExtension.mockReturnValue('doc'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, 'test/document.doc'); + + expect(result.isValid).toBe(false); + expect(result.validationErrors).toContain('Content type mismatch'); + }); + }); + + describe('Extension without variants', () => { + it('should return false when declared and detected types do not match for extension without variants', async () => { + const mockBuffer = Buffer.from([0x89, 0x50, 0x4E, 0x47]); + const mockDetectedType: FileTypeResult = { mime: 'image/png', ext: 'png' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + mockExtractFileExtension.mockReturnValue('png'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, 'test/image.png'); + + expect(result.isValid).toBe(false); + expect(result.validationErrors).toContain('Content type mismatch'); + }); + + it('should return true when declared and detected types match for extension without variants', async () => { + const mockBuffer = Buffer.from([0x89, 0x50, 0x4E, 0x47]); + const mockDetectedType: FileTypeResult = { mime: 'image/png', ext: 'png' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/png'); + mockExtractFileExtension.mockReturnValue('png'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, 'test/image.png'); + + expect(result.isValid).toBe(true); + expect(result.validationErrors).toBe(''); + }); + }); + + describe('Edge Cases', () => { + it('should handle object keys with complex paths', async () => { + const complexObjectKey = 'folder/subfolder/deep/path/file.with.dots.jpg'; + const mockBuffer = Buffer.from([0xFF, 0xD8, 0xFF, 0xE0]); + const mockDetectedType: FileTypeResult = { mime: 'image/jpeg', ext: 'jpg' as any }; + + mockExtractContentTypeFromFileName.mockReturnValue('image/jpeg'); + mockExtractFileExtension.mockReturnValue('jpg'); + s3Mock.on(GetObjectCommand).resolves({ + Body: { + async *[Symbol.asyncIterator]() { + yield mockBuffer; + } + } as any + }); + mockFromBuffer.mockResolvedValue(mockDetectedType); + + const result = await fileValidator.validateFile(mockBucketName, complexObjectKey); + + expect(result.isValid).toBe(true); + expect(mockExtractContentTypeFromFileName).toHaveBeenCalledWith('file.with.dots.jpg'); + }); + + it('should handle object keys without file extensions', async () => { + const noExtensionKey = 'folder/file_without_extension'; + mockExtractContentTypeFromFileName.mockReturnValue('application/octet-stream'); + + const result = await fileValidator.validateFile(mockBucketName, noExtensionKey); + + expect(result.isValid).toBe(false); // Unsupported content type + expect(mockExtractContentTypeFromFileName).toHaveBeenCalledWith('file_without_extension'); + }); + + it('should handle empty object keys gracefully', async () => { + const emptyKey = ''; + mockExtractContentTypeFromFileName.mockReturnValue('application/octet-stream'); + + const result = await fileValidator.validateFile(mockBucketName, emptyKey); + + expect(result.isValid).toBe(false); + expect(mockExtractContentTypeFromFileName).toHaveBeenCalledWith(''); + }); + }); +}); diff --git a/source/lambda/files-metadata-management/test/utils/http-response-formatter.test.ts b/source/lambda/files-metadata-management/test/utils/http-response-formatter.test.ts new file mode 100644 index 00000000..8ed01470 --- /dev/null +++ b/source/lambda/files-metadata-management/test/utils/http-response-formatter.test.ts @@ -0,0 +1,103 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { formatResponse, formatError } from '../../utils/http-response-formatters'; + +describe('When formatting messages as HTTP responses', () => { + beforeAll(() => { + process.env.AWS_SDK_USER_AGENT = '{ "customUserAgent": "AWSSOLUTION/SO0999/v9.9.9" }'; + process.env.AWS_REGION = 'us-east-1'; + }); + + it('Should format the message into a default response correctly', () => { + const response = formatResponse('Test response'); + expect(response).toEqual({ + 'statusCode': 200, + 'headers': { + 'Content-Type': 'application/json', + 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + 'Access-Control-Allow-Credentials': 'true', + 'Access-Control-Allow-Origin': '*' // NOSONAR - javascript:S5122 - Domain not known at this point. + }, + 'isBase64Encoded': false, + 'body': 'Test response' + }); + }); + + it('Should format the message into a response correctly with extra headers', () => { + const response = formatResponse({ 'test-body': 'Test response' }, 200, { + 'x-amz-testHeader': 'test-header-value' + }); + expect(response).toEqual({ + 'statusCode': 200, + 'headers': { + 'Content-Type': 'application/json', + 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + 'Access-Control-Allow-Origin': '*', // NOSONAR - javascript:S5122 - Domain not known at this point. + 'Access-Control-Allow-Credentials': 'true', + 'x-amz-testHeader': 'test-header-value' + }, + 'isBase64Encoded': false, + 'body': '{"test-body":"Test response"}' + }); + }); + + afterAll(() => { + delete process.env.AWS_SDK_USER_AGENT; + delete process.env.AWS_REGION; + }); +}); + +describe('When formatting error responses as HTTP responses', () => { + beforeAll(() => { + process.env.AWS_SDK_USER_AGENT = '{ "customUserAgent": "AWSSOLUTION/SO0999/v9.9.9" }'; + process.env.AWS_REGION = 'us-east-1'; + }); + + it('Should format the error into a default response correctly', () => { + const response = formatError({ + message: 'Test Error' + }); + expect(response).toEqual({ + 'statusCode': 400, + 'headers': { + 'Content-Type': 'application/json', + 'x-amzn-ErrorType': 'CustomExecutionError', + 'Access-Control-Allow-Origin': '*', + 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET' + }, + 'isBase64Encoded': false, + 'body': '{"message":"Test Error"}' + }); + }); + + it('Should format a custom error response correctly', () => { + expect( + formatError({ + message: 'Test Error', + originalStatusCode: 417, + extraHeaders: { mockHeader: 'mockValue' } + }) + ).toEqual({ + 'statusCode': 400, + 'headers': { + 'Content-Type': 'application/json', + 'x-amzn-ErrorType': 'CustomExecutionError', + 'Access-Control-Allow-Origin': '*', + 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + 'mockHeader': 'mockValue' + }, + 'isBase64Encoded': false, + 'body': '{"message":"Test Error","originalStatusCode":417}' + }); + }); + + afterAll(() => { + delete process.env.AWS_SDK_USER_AGENT; + delete process.env.AWS_REGION; + }); +}); diff --git a/source/lambda/files-metadata-management/test/utils/metadata-validator.test.ts b/source/lambda/files-metadata-management/test/utils/metadata-validator.test.ts new file mode 100644 index 00000000..0dce8064 --- /dev/null +++ b/source/lambda/files-metadata-management/test/utils/metadata-validator.test.ts @@ -0,0 +1,416 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { S3Client, HeadObjectCommand } from '@aws-sdk/client-s3'; +import { mockClient } from 'aws-sdk-client-mock'; +import { MetadataValidator } from '../../utils/metadata-validator'; +import { logger as mockLogger, metrics as mockMetrics } from '../../power-tools-init'; +import { retryWithBackoff } from '../../utils/utils'; +import { CloudWatchMetrics, ERROR_MESSAGES, VALIDATION_CONSTANTS } from '../../utils/constants'; + +const mockRetryWithBackoff = retryWithBackoff as jest.MockedFunction; + +import { ValidationResult } from '../../models/types'; + +jest.mock('../../power-tools-init', () => ({ + logger: { + info: jest.fn(), + error: jest.fn(), + warn: jest.fn(), + debug: jest.fn() + }, + tracer: { + captureAWSv3Client: jest.fn((client) => client), + captureMethod: jest.fn(() => (_target: any, _propertyKey: string, descriptor: PropertyDescriptor) => descriptor) + }, + metrics: { + addMetric: jest.fn(), + addDimension: jest.fn(), + setDefaultDimensions: jest.fn() + } +})); + +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn(() => ({ region: 'us-east-1' })) +})); + +jest.mock('../../utils/utils', () => ({ + retryWithBackoff: jest.fn(async (operation, retrySettings) => { + let lastError: any; + + for (let attempt = 0; attempt <= retrySettings.maxRetries; attempt++) { + try { + return await operation(); + } catch (error) { + lastError = error instanceof Error ? error : new Error(String(error)); + if (attempt === retrySettings.maxRetries) { + break; + } + } + } + throw lastError; + }) +})); + +const s3Mock = mockClient(S3Client); + +describe('MetadataValidator', () => { + let validator: MetadataValidator; + const mockBucketName = 'test-bucket'; + const mockObjectKey = 'useCase1/user123/conv456/msg789/test-file.jpg'; + + beforeEach(() => { + jest.clearAllMocks(); + s3Mock.reset(); + + mockRetryWithBackoff.mockImplementation(async (operation: () => Promise) => { + return await operation(); + }); + + validator = new MetadataValidator(); + }); + + afterEach(() => { + s3Mock.restore(); + }); + + describe('Constructor', () => { + it('should initialize MetadataValidator with S3 client', () => { + expect(validator).toBeInstanceOf(MetadataValidator); + expect(mockLogger.info).toHaveBeenCalledWith( + `MetadataValidator initialized - component: MetadataValidator, requiredMetadataKey: ${VALIDATION_CONSTANTS.REQUIRED_TAG_KEY}, requiredMetadataValue: ${VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE}` + ); + }); + }); + + describe('validateMetadata - Successful Validation', () => { + it('should successfully validate metadata with correct required tag', async () => { + s3Mock.on(HeadObjectCommand).resolves({ + Metadata: { + [VALIDATION_CONSTANTS.REQUIRED_TAG_KEY]: VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE, + 'filename': 'original-file.jpg' + } + }); + + const result: ValidationResult = await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(true); + expect(result.error).toBeUndefined(); + expect(result.originalFileName).toBe('original-file.jpg'); + + expect(s3Mock.commandCalls(HeadObjectCommand)).toHaveLength(1); + const headObjectCall = s3Mock.commandCalls(HeadObjectCommand)[0]; + expect(headObjectCall.args[0].input).toEqual({ + Bucket: mockBucketName, + Key: mockObjectKey + }); + + expect(mockMetrics.addMetric).toHaveBeenCalledWith( + CloudWatchMetrics.METADATA_VALIDATION_SUCCESS, + 'Count', + 1 + ); + expect(mockMetrics.addMetric).toHaveBeenCalledWith( + CloudWatchMetrics.METADATA_S3_HEAD_OBJECT_CALLS, + 'Count', + 1 + ); + + expect(mockLogger.debug).toHaveBeenCalledWith( + `${ERROR_MESSAGES.METADATA_VALIDATION_SUCCESS} - component: MetadataValidator, fileKey: ${mockObjectKey}, isValid: true, fileSize: 0, metadataValid: true, originalFileName: original-file.jpg` + ); + }); + + it('should handle metadata with additional fields beyond required tag', async () => { + s3Mock.on(HeadObjectCommand).resolves({ + Metadata: { + [VALIDATION_CONSTANTS.REQUIRED_TAG_KEY]: VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE, + 'filename': 'test-file.png', + 'additional-field': 'some-value', + 'another-field': 'another-value' + } + }); + + const result: ValidationResult = await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(true); + expect(result.error).toBeUndefined(); + expect(result.originalFileName).toBe('test-file.png'); + }); + }); + + describe('validateMetadata - Validation Failures', () => { + it('should fail validation when required metadata tag is missing', async () => { + s3Mock.on(HeadObjectCommand).resolves({ + Metadata: { + 'other-tag': 'some-value', + 'filename': 'test-file.jpg' + } + }); + + const result: ValidationResult = await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.error).toBe('Metadata validation failed'); + expect(result.originalFileName).toBe('test-file.jpg'); + + expect(mockMetrics.addMetric).toHaveBeenCalledWith( + CloudWatchMetrics.METADATA_VALIDATION_FAILURE, + 'Count', + 1 + ); + + expect(mockLogger.warn).toHaveBeenCalledWith( + `${ERROR_MESSAGES.SECURITY_VIOLATION_DETECTED} - component: MetadataValidator, requiredKey: ${VALIDATION_CONSTANTS.REQUIRED_TAG_KEY}, requiredValue: ${VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE}, actualValue: undefined, violationType: missing-metadata` + ); + + expect(mockLogger.warn).toHaveBeenCalledWith( + `${ERROR_MESSAGES.VALIDATION_FAILED} - component: MetadataValidator, fileKey: ${mockObjectKey}, isValid: false, fileSize: 0, metadataValid: false, failureReasons: metadata validation failed` + ); + }); + + it('should fail validation when required metadata tag has wrong value', async () => { + s3Mock.on(HeadObjectCommand).resolves({ + Metadata: { + [VALIDATION_CONSTANTS.REQUIRED_TAG_KEY]: 'wrong-value' + } + }); + + const result: ValidationResult = await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.error).toBe('Metadata validation failed'); + + expect(mockLogger.warn).toHaveBeenCalledWith( + `${ERROR_MESSAGES.SECURITY_VIOLATION_DETECTED} - component: MetadataValidator, requiredKey: ${VALIDATION_CONSTANTS.REQUIRED_TAG_KEY}, requiredValue: ${VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE}, actualValue: wrong-value, violationType: invalid-value` + ); + }); + + it('should fail validation when metadata is completely empty', async () => { + s3Mock.on(HeadObjectCommand).resolves({ + Metadata: {} + }); + + const result: ValidationResult = await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.error).toBe('Metadata validation failed'); + + expect(mockLogger.warn).toHaveBeenCalledWith( + `${ERROR_MESSAGES.SECURITY_VIOLATION_DETECTED} - component: MetadataValidator, requiredKey: ${VALIDATION_CONSTANTS.REQUIRED_TAG_KEY}, requiredValue: ${VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE}, actualValue: undefined, violationType: missing-metadata` + ); + }); + + it('should fail validation when HeadObject returns no metadata', async () => { + s3Mock.on(HeadObjectCommand).resolves({}); + + const result: ValidationResult = await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.error).toBe('Metadata validation failed'); + }); + }); + + describe('validateMetadata - Error Handling', () => { + it('should handle S3 HeadObject NoSuchKey error', async () => { + const s3Error = new Error('NoSuchKey'); + s3Error.name = 'NoSuchKey'; + s3Mock.on(HeadObjectCommand).rejects(s3Error); + + const result: ValidationResult = await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.error).toBe('NoSuchKey'); + + expect(mockMetrics.addMetric).toHaveBeenCalledWith( + CloudWatchMetrics.METADATA_VALIDATION_FAILURE, + 'Count', + 1 + ); + + expect(mockLogger.error).toHaveBeenCalledWith( + `${ERROR_MESSAGES.SYSTEM_ERROR} - component: MetadataValidator, bucketName: ${mockBucketName}, objectKey: ${mockObjectKey}, error: NoSuchKey, systemError: true` + ); + }); + + it('should handle S3 AccessDenied error', async () => { + const s3Error = new Error('AccessDenied'); + s3Error.name = 'AccessDenied'; + s3Mock.on(HeadObjectCommand).rejects(s3Error); + + const result: ValidationResult = await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.error).toBe('AccessDenied'); + + expect(mockLogger.error).toHaveBeenCalledWith( + `${ERROR_MESSAGES.SYSTEM_ERROR} - component: MetadataValidator, bucketName: ${mockBucketName}, objectKey: ${mockObjectKey}, error: AccessDenied, systemError: true` + ); + }); + + it('should handle timeout errors', async () => { + const timeoutError = new Error('Request timeout'); + timeoutError.name = 'TimeoutError'; + s3Mock.on(HeadObjectCommand).rejects(timeoutError); + + const result: ValidationResult = await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.error).toBe('Request timeout'); + }); + + it('should handle throttling errors', async () => { + const throttleError = new Error('ThrottlingException'); + throttleError.name = 'ThrottlingException'; + s3Mock.on(HeadObjectCommand).rejects(throttleError); + + const result: ValidationResult = await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.error).toBe('ThrottlingException'); + }); + + it('should handle unknown errors gracefully', async () => { + const unknownError = new Error('Unknown system error'); + s3Mock.on(HeadObjectCommand).rejects(unknownError); + + const result: ValidationResult = await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.error).toBe('Unknown system error'); + }); + + it('should handle non-Error objects thrown', async () => { + s3Mock.on(HeadObjectCommand).rejects('String error'); + + const result: ValidationResult = await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.error).toBe('String error'); + }); + }); + + describe('Retry Logic Integration', () => { + it('should use retry logic for S3 HeadObject calls', async () => { + mockRetryWithBackoff.mockImplementation(async (operation: () => Promise, retrySettings: any) => { + expect(retrySettings).toEqual({ + maxRetries: VALIDATION_CONSTANTS.MAX_RETRIES, + backOffRate: VALIDATION_CONSTANTS.BACKOFF_MULTIPLIER, + initialDelayMs: VALIDATION_CONSTANTS.INITIAL_RETRY_DELAY_MS + }); + return await operation(); + }); + + s3Mock.on(HeadObjectCommand).resolves({ + Metadata: { + [VALIDATION_CONSTANTS.REQUIRED_TAG_KEY]: VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE + } + }); + + await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(mockRetryWithBackoff).toHaveBeenCalledWith(expect.any(Function), { + maxRetries: VALIDATION_CONSTANTS.MAX_RETRIES, + backOffRate: VALIDATION_CONSTANTS.BACKOFF_MULTIPLIER, + initialDelayMs: VALIDATION_CONSTANTS.INITIAL_RETRY_DELAY_MS + }); + }); + + it('should handle retry failures gracefully', async () => { + const retryError = new Error('Max retries exceeded'); + mockRetryWithBackoff.mockRejectedValue(retryError); + + const result: ValidationResult = await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(result.isValid).toBe(false); + expect(result.error).toBe('Max retries exceeded'); + }); + }); + + describe('Metrics Recording', () => { + it('should record S3 HeadObject call metrics', async () => { + s3Mock.on(HeadObjectCommand).resolves({ + Metadata: { + [VALIDATION_CONSTANTS.REQUIRED_TAG_KEY]: VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE + } + }); + + await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(mockMetrics.addMetric).toHaveBeenCalledWith( + CloudWatchMetrics.METADATA_S3_HEAD_OBJECT_CALLS, + 'Count', + 1 + ); + }); + + it('should record validation success metrics', async () => { + s3Mock.on(HeadObjectCommand).resolves({ + Metadata: { + [VALIDATION_CONSTANTS.REQUIRED_TAG_KEY]: VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE + } + }); + + await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(mockMetrics.addMetric).toHaveBeenCalledWith( + CloudWatchMetrics.METADATA_VALIDATION_SUCCESS, + 'Count', + 1 + ); + }); + + it('should record validation failure metrics', async () => { + s3Mock.on(HeadObjectCommand).resolves({ + Metadata: {} + }); + + await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(mockMetrics.addMetric).toHaveBeenCalledWith( + CloudWatchMetrics.METADATA_VALIDATION_FAILURE, + 'Count', + 1 + ); + }); + + it('should record system error metrics', async () => { + const s3Error = new Error('S3 system error'); + s3Mock.on(HeadObjectCommand).rejects(s3Error); + + await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(mockMetrics.addMetric).toHaveBeenCalledWith( + CloudWatchMetrics.METADATA_VALIDATION_FAILURE, + 'Count', + 1 + ); + }); + }); + + describe('Logging Behavior', () => { + it('should log debug information during validation process', async () => { + s3Mock.on(HeadObjectCommand).resolves({ + Metadata: { + [VALIDATION_CONSTANTS.REQUIRED_TAG_KEY]: VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE + } + }); + + await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(mockLogger.debug).toHaveBeenCalledWith( + `Starting metadata validation - component: MetadataValidator, bucketName: ${mockBucketName}, objectKey: ${mockObjectKey}, requiredMetadataKey: ${VALIDATION_CONSTANTS.REQUIRED_TAG_KEY}, requiredMetadataValue: ${VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE}` + ); + }); + + it('should use JSON.stringify for all log messages', async () => { + s3Mock.on(HeadObjectCommand).resolves({ + Metadata: {} + }); + + await validator.validateMetadata(mockBucketName, mockObjectKey); + + expect(mockLogger.warn).toHaveBeenCalledWith(expect.stringContaining('MetadataValidator')); + }); + }); +}); diff --git a/source/lambda/files-metadata-management/test/utils/utils.test.ts b/source/lambda/files-metadata-management/test/utils/utils.test.ts new file mode 100644 index 00000000..b1a1b67f --- /dev/null +++ b/source/lambda/files-metadata-management/test/utils/utils.test.ts @@ -0,0 +1,263 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { + handleLambdaError, + getRetrySettings, + checkEnv, + retryWithBackoff, + delay, + calculateTTL, + extractFileExtension, + categorizeProcessingError +} from '../../utils/utils'; +import RequestValidationError from '../../utils/error'; + +jest.mock('../../power-tools-init', () => ({ + logger: { + info: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + warn: jest.fn() + }, + tracer: { + getRootXrayTraceId: jest.fn().mockReturnValue('test-trace-id'), + captureMethod: () => () => {}, + captureAWSv3Client: jest.fn() + }, + metrics: { + setDefaultDimensions: jest.fn() + } +})); + +jest.mock('../../utils/http-response-formatters', () => ({ + formatError: jest.fn().mockReturnValue({ + statusCode: 500, + body: JSON.stringify({ message: 'Error' }) + }) +})); + +describe('Utils Functions', () => { + describe('getRetrySettings', () => { + it('should return default retry settings', () => { + const settings = getRetrySettings(); + expect(settings).toHaveProperty('maxRetries'); + expect(settings).toHaveProperty('backOffRate'); + expect(settings).toHaveProperty('initialDelayMs'); + expect(typeof settings.maxRetries).toBe('number'); + expect(typeof settings.backOffRate).toBe('number'); + expect(typeof settings.initialDelayMs).toBe('number'); + }); + }); + + describe('handleLambdaError', () => { + it('should handle RequestValidationError', () => { + const error = new RequestValidationError('Test validation error'); + const result = handleLambdaError(error, 'testAction', 'TestContext'); + + expect(result).toBeDefined(); + expect(result.statusCode).toBe(500); + }); + + it('should handle generic errors', () => { + const error = new Error('Generic error'); + const result = handleLambdaError(error, 'testAction'); + + expect(result).toBeDefined(); + expect(result.statusCode).toBe(500); + }); + }); + + describe('retryWithBackoff', () => { + it('should succeed on first attempt', async () => { + const mockOperation = jest.fn().mockResolvedValue('success'); + const retrySettings = { maxRetries: 3, backOffRate: 2, initialDelayMs: 100 }; + + const result = await retryWithBackoff(mockOperation, retrySettings); + + expect(result).toBe('success'); + expect(mockOperation).toHaveBeenCalledTimes(1); + }); + + it('should retry on failure and eventually succeed', async () => { + const mockOperation = jest + .fn() + .mockRejectedValueOnce(new Error('First failure')) + .mockRejectedValueOnce(new Error('Second failure')) + .mockResolvedValue('success'); + const retrySettings = { maxRetries: 3, backOffRate: 2, initialDelayMs: 10 }; + + const result = await retryWithBackoff(mockOperation, retrySettings); + + expect(result).toBe('success'); + expect(mockOperation).toHaveBeenCalledTimes(3); + }); + + it('should throw proper Error object after all retries exhausted', async () => { + const mockOperation = jest.fn().mockRejectedValue(new Error('Persistent failure')); + const retrySettings = { maxRetries: 2, backOffRate: 2, initialDelayMs: 10 }; + + await expect(retryWithBackoff(mockOperation, retrySettings)).rejects.toThrow('Persistent failure'); + expect(mockOperation).toHaveBeenCalledTimes(3); // Initial + 2 retries + }); + + it('should handle non-Error objects and convert them to Error', async () => { + const mockOperation = jest.fn().mockRejectedValue('string error'); + const retrySettings = { maxRetries: 1, backOffRate: 2, initialDelayMs: 10 }; + + await expect(retryWithBackoff(mockOperation, retrySettings)).rejects.toThrow('string error'); + expect(mockOperation).toHaveBeenCalledTimes(2); // Initial + 1 retry + }); + }); + + describe('checkEnv', () => { + const originalEnv = process.env; + + beforeEach(() => { + jest.resetModules(); + process.env = { ...originalEnv }; + }); + + afterAll(() => { + process.env = originalEnv; + }); + + it('should not throw when all required environment variables are set', () => { + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-bucket'; + + expect(() => { + checkEnv(); + }).not.toThrow(); + }); + + it('should throw error when required environment variables are missing', () => { + delete process.env.MULTIMODAL_METADATA_TABLE_NAME; + delete process.env.MULTIMODAL_DATA_BUCKET; + + expect(() => { + checkEnv(); + }).toThrow('Missing required environment variables'); + }); + }); + + describe('delay', () => { + it('should delay execution for specified milliseconds', async () => { + const startTime = Date.now(); + await delay(50); + const endTime = Date.now(); + + expect(endTime - startTime).toBeGreaterThanOrEqual(45); // Allow some tolerance + }); + + it('should handle zero delay', async () => { + const startTime = Date.now(); + await delay(0); + const endTime = Date.now(); + + expect(endTime - startTime).toBeLessThan(10); + }); + }); + + describe('calculateTTL', () => { + it('should calculate TTL correctly', () => { + const uploadTimestamp = 1640995200000; // Jan 1, 2022 00:00:00 UTC + const ttlMS = 48 * 60 * 60 * 1000; // 48 hours in milliseconds + + const result = calculateTTL(uploadTimestamp, ttlMS); + + expect(result).toBe(Math.floor((uploadTimestamp + ttlMS) / 1000)); + }); + + it('should handle different TTL values', () => { + const uploadTimestamp = 1640995200000; + const ttlMS = 24 * 60 * 60 * 1000; // 24 hours + + const result = calculateTTL(uploadTimestamp, ttlMS); + + expect(result).toBe(1641081600); // Expected TTL timestamp + }); + }); + + describe('extractFileExtension', () => { + it('should extract file extension correctly', () => { + expect(extractFileExtension('test.jpg')).toBe('jpg'); + expect(extractFileExtension('document.pdf')).toBe('pdf'); + expect(extractFileExtension('image.PNG')).toBe('png'); + expect(extractFileExtension('file.tar.gz')).toBe('gz'); + }); + + it('should handle files without extensions', () => { + expect(extractFileExtension('README')).toBe('unknown'); + expect(extractFileExtension('file.')).toBe('unknown'); + expect(extractFileExtension('')).toBe('unknown'); + }); + + it('should handle edge cases', () => { + expect(extractFileExtension('.hidden')).toBe('hidden'); + expect(extractFileExtension('path/to/file.txt')).toBe('txt'); + }); + }); + + describe('categorizeProcessingError', () => { + it('should categorize system errors correctly', () => { + const timeoutError = new Error('Request timeout'); + timeoutError.name = 'TimeoutError'; + expect(categorizeProcessingError(timeoutError)).toBe('system-error'); + + const serviceError = new Error('ServiceUnavailable'); + expect(categorizeProcessingError(serviceError)).toBe('system-error'); + + const internalError = new Error('InternalError occurred'); + expect(categorizeProcessingError(internalError)).toBe('system-error'); + + const throttlingError = new Error('Throttling detected'); + expect(categorizeProcessingError(throttlingError)).toBe('system-error'); + }); + + it('should categorize DynamoDB errors correctly', () => { + const conditionalError = new Error('Condition failed'); + conditionalError.name = 'ConditionalCheckFailedException'; + expect(categorizeProcessingError(conditionalError)).toBe('dynamodb-error'); + + const resourceError = new Error('Resource not found'); + resourceError.name = 'ResourceNotFoundException'; + expect(categorizeProcessingError(resourceError)).toBe('dynamodb-error'); + + const throughputError = new Error('Throughput exceeded'); + throughputError.name = 'ProvisionedThroughputExceededException'; + expect(categorizeProcessingError(throughputError)).toBe('dynamodb-error'); + }); + + it('should categorize S3 errors correctly', () => { + const noKeyError = new Error('NoSuchKey: The specified key does not exist'); + expect(categorizeProcessingError(noKeyError)).toBe('s3-error'); + + const accessError = new Error('AccessDenied: Access denied'); + expect(categorizeProcessingError(accessError)).toBe('s3-error'); + + const bucketError = new Error('NoSuchBucket: The specified bucket does not exist'); + expect(categorizeProcessingError(bucketError)).toBe('s3-error'); + }); + + it('should categorize validation errors correctly', () => { + const formatError = new Error('Invalid file key format'); + expect(categorizeProcessingError(formatError)).toBe('validation-error'); + + const validationError = new Error('Validation failed'); + expect(categorizeProcessingError(validationError)).toBe('validation-error'); + + const validationException = new Error('Invalid input'); + validationException.name = 'ValidationException'; + expect(categorizeProcessingError(validationException)).toBe('validation-error'); + }); + + it('should default to application error for unknown errors', () => { + const unknownError = new Error('Unknown error occurred'); + expect(categorizeProcessingError(unknownError)).toBe('application-error'); + + const customError = new Error('Custom business logic error'); + expect(categorizeProcessingError(customError)).toBe('application-error'); + }); + }); +}); diff --git a/source/lambda/files-metadata-management/tsconfig.json b/source/lambda/files-metadata-management/tsconfig.json new file mode 100644 index 00000000..030c85e7 --- /dev/null +++ b/source/lambda/files-metadata-management/tsconfig.json @@ -0,0 +1,76 @@ +{ + "compilerOptions": { + "target": "ES2018", + "module": "commonjs", + "lib": [ + "es2018", + "dom" + ], + "declaration": true, + "strict": true, + "noImplicitAny": true, + "strictNullChecks": true, + "noImplicitThis": true, + "alwaysStrict": true, + "noUnusedLocals": false, + "noUnusedParameters": false, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": false, + "inlineSourceMap": true, + "inlineSources": true, + "experimentalDecorators": true, + "strictPropertyInitialization": false, + "typeRoots": [ + "./node_modules/@types" + ], + "esModuleInterop": true, + "resolveJsonModule": true, + "outDir": "./dist", + "moduleResolution": "Node", + "rootDir": ".", + "paths": { + "aws-sdk-lib": [ + "../layers/aws-sdk-lib/dist" + ], + "aws-node-user-agent-config": [ + "../layers/aws-node-user-agent-config/dist" + ], + "aws-lambda": [ + "../layers/aws-sdk-lib/node_modules/@types/aws-lambda" + ], + "@aws-lambda-powertools/logger": [ + "../layers/aws-node-user-agent-config/node_modules/@aws-lambda-powertools/logger" + ], + "@aws-lambda-powertools/logger/middleware": [ + "../layers/aws-node-user-agent-config/node_modules/@aws-lambda-powertools/logger/lib/cjs/middleware/middy.d.ts" + ], + "@aws-lambda-powertools/tracer": [ + "../layers/aws-node-user-agent-config/node_modules/@aws-lambda-powertools/tracer" + ], + "@aws-lambda-powertools/tracer/middleware": [ + "../layers/aws-node-user-agent-config/node_modules/@aws-lambda-powertools/tracer/lib/cjs/middleware/middy.d.ts" + ], + "@aws-lambda-powertools/metrics": [ + "../layers/aws-node-user-agent-config/node_modules/@aws-lambda-powertools/metrics" + ], + "@aws-lambda-powertools/metrics/middleware": [ + "../layers/aws-node-user-agent-config/node_modules/@aws-lambda-powertools/metrics/lib/cjs/middleware/middy.d.ts" + ], + "@aws-sdk/client-dynamodb": [ + "../layers/aws-sdk-lib/node_modules/@aws-sdk/client-dynamodb" + ], + "@aws-sdk/client-s3": [ + "../layers/aws-sdk-lib/node_modules/@aws-sdk/client-s3" + ], + "@middy/core": [ + "../layers/aws-node-user-agent-config/node_modules/@middy/core" + ], + "@smithy/types": [ + "../layers/aws-sdk-lib/node_modules/@smithy/types" + ], + "@aws-sdk/util-dynamodb": [ + "../layers/aws-sdk-lib/node_modules/@aws-sdk/util-dynamodb" + ] + } + } + } diff --git a/source/lambda/files-metadata-management/utils/constants.ts b/source/lambda/files-metadata-management/utils/constants.ts new file mode 100644 index 00000000..7f8e8bbe --- /dev/null +++ b/source/lambda/files-metadata-management/utils/constants.ts @@ -0,0 +1,104 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export const RETRY_CONFIG = { + maxRetries: 3, + backOffRate: 2, + initialDelayMs: 1000 +}; + +export enum CloudWatchNamespace { + FILE_HANDLING = 'Solution/FileHandling' +} + +export enum CloudWatchMetrics { + METADATA_UPDATE_SUCCESS = 'MetadataUpdateSuccess', + METADATA_UPDATE_FAILURE = 'MetadataUpdateFailure', + FILE_VALIDATION_SUCCESS = 'FileValidationSuccess', + FILE_VALIDATION_FAILURE = 'FileValidationFailure', + FILES_UPLOADED = 'FilesUploaded', + FILES_UPLOADED_WITH_EXTENSION = 'FilesExtUploaded', + FILE_EXTENSION = 'FileExtension', + FILE_SIZE = 'FileSize', + METADATA_VALIDATION_SUCCESS = 'MetadataValidationSuccess', + METADATA_VALIDATION_FAILURE = 'MetadataValidationFailure', + METADATA_S3_HEAD_OBJECT_CALLS = 'MetadataS3HeadObjectCalls' +} + +export const MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR = 'MULTIMODAL_METADATA_TABLE_NAME'; +export const MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR = 'MULTIMODAL_DATA_BUCKET'; +export const REQUIRED_ENV_VARS = [MULTIMODAL_FILES_METADATA_TABLE_NAME_ENV_VAR, MULTIMODAL_FILES_BUCKET_NAME_ENV_VAR]; + +// TTL configuration - 48 hours from upload time (in ms) +export const MULTIMODAL_FILE_TTL_MS = 172800000; + +// File key validation regex pattern +export const FILE_KEY_PATTERN = /^([^\/]+)\/([^\/]+)\/([^\/]+)\/([^\/]+)\/(.+)$/; + +// Event validation constants +export const EVENT_VALIDATION = { + EXPECTED_SOURCE: 'aws.s3', + EXPECTED_DETAIL_TYPE: 'Object Created' +}; + +// Error Messages +export const ERROR_MESSAGES = { + INVALID_EVENT_TYPE: 'Invalid event type', + UNEXPECTED_BUCKET: 'Event from unexpected bucket', + INVALID_FILE_KEY_FORMAT: 'Invalid file key format', + SECURITY_VIOLATION_DETECTED: 'Missing or invalid source metadata - security violation detected', + SYSTEM_ERROR: 'Metadata validation system error', + VALIDATION_FAILED: 'Metadata validation failed', + DYNAMODB_UPDATE_FAILED: 'DynamoDB update attempt failed', + METADATA_UPDATE_FAILED_AFTER_RETRIES: 'Failed to update metadata after retries', + FILE_MARKED_INVALID_METADATA: 'File marked invalid due to metadata validation failure', + METADATA_UPDATE_SUCCESS: 'Updated metadata for file', + PROCESSING_SUCCESS: 'Successfully processed event for file', + PROCESSING_FAILED: 'Failed to process event for object', + SYSTEM_ERROR_PROCESSING: 'System error processing event for object', + METADATA_VALIDATION_SUCCESS: 'Metadata validation successful' +}; + +// Validation Constants +export const VALIDATION_CONSTANTS = { + REQUIRED_TAG_KEY: 'source', + REQUIRED_TAG_VALUE: 'gaab', + TIMEOUT_MS: 5000, + MAX_RETRIES: 3, + INITIAL_RETRY_DELAY_MS: 1000, + BACKOFF_MULTIPLIER: 2 +}; + +export const MAGIC_NUMBER_BUFFER_SIZE = 4096; // 4KB + +export const EXTENSION_TO_MIME_TYPE: Record = { + 'png': 'image/png', + 'jpg': 'image/jpeg', + 'jpeg': 'image/jpeg', + 'gif': 'image/gif', + 'webp': 'image/webp', + 'pdf': 'application/pdf', + 'csv': 'text/csv', + 'doc': 'application/msword', + 'docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', + 'xls': 'application/vnd.ms-excel', + 'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + 'html': 'text/html', + 'htm': 'text/html', + 'txt': 'text/plain', + 'md': 'text/markdown' +}; + +// Extensions that can have multiple valid MIME types +export const EXTENSION_MIME_VARIANTS: Record = { + 'doc': ['application/msword', 'application/x-cfb', 'application/vnd.ms-office'], + 'xls': ['application/vnd.ms-excel', 'application/x-cfb', 'application/vnd.ms-office', 'application/msexcel', 'application/x-msexcel', 'application/x-ms-excel'] +}; + +// File extensions that don't have magic numbers +export const NO_MAGIC_NUMBER_EXTENSIONS = new Set(['txt', 'md', 'csv', 'html', 'htm']); + +export const ALL_SUPPORTED_FILE_TYPES = new Set([ + ...Object.values(EXTENSION_TO_MIME_TYPE), + ...Object.values(EXTENSION_MIME_VARIANTS).flat() +]); diff --git a/source/lambda/files-metadata-management/utils/error.ts b/source/lambda/files-metadata-management/utils/error.ts new file mode 100644 index 00000000..fa794c69 --- /dev/null +++ b/source/lambda/files-metadata-management/utils/error.ts @@ -0,0 +1,12 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +/** + * Custom error class for request validation errors + */ +export default class RequestValidationError extends Error { + constructor(message: string) { + super(message); + this.name = 'CustomHttpError'; + } +} diff --git a/source/lambda/files-metadata-management/utils/eventbridge-processor.ts b/source/lambda/files-metadata-management/utils/eventbridge-processor.ts new file mode 100644 index 00000000..2733e73b --- /dev/null +++ b/source/lambda/files-metadata-management/utils/eventbridge-processor.ts @@ -0,0 +1,215 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { DynamoDBClient, UpdateItemCommand, UpdateItemCommandInput } from '@aws-sdk/client-dynamodb'; +import { EventBridgeEvent } from 'aws-lambda'; +import { AWSClientManager } from 'aws-sdk-lib'; +import { logger, tracer, metrics } from '../power-tools-init'; +import { + retryWithBackoff, + getRetrySettings, + calculateTTL, + extractFileExtension, + categorizeProcessingError +} from './utils'; +import { FILE_KEY_PATTERN, MULTIMODAL_FILE_TTL_MS, CloudWatchMetrics, ERROR_MESSAGES } from './constants'; +import { FileKeyComponents, MetadataUpdateResult, FileStatus, ValidationResult } from '../models/types'; + +export class EventBridgeProcessor { + private readonly dynamoClient: DynamoDBClient; + private readonly tableName: string; + private readonly bucketName: string; + + constructor(tableName: string, bucketName: string) { + this.tableName = tableName; + this.bucketName = bucketName; + this.dynamoClient = AWSClientManager.getServiceClient('dynamodb', tracer); + } + + /** + * Processes a single EventBridge S3 event and updates file metadata + * @param event - EventBridge S3 event + * @param validationResult - Result from metadata validation + * @returns Processing result + */ + async processEvent( + event: EventBridgeEvent, + validationResult: ValidationResult + ): Promise { + try { + if (event.detail.bucket.name !== this.bucketName) { + const errorMessage = `${ERROR_MESSAGES.UNEXPECTED_BUCKET}: ${event.detail.bucket.name}`; + logger.error( + `${errorMessage} - actualBucket: ${event.detail.bucket.name}, expectedBucket: ${this.bucketName}` + ); + throw new Error(errorMessage); + } + + const objectKey = event.detail.object.key; + const fileSize = event.detail.object.size || 0; + const fileKeyComponents = this.parseFileKey(objectKey); + + await this.updateFileMetadata(fileKeyComponents, fileSize, event.time, validationResult); + + const displayFileName = validationResult.originalFileName; + const fileExtension = extractFileExtension(displayFileName); + + // Set UseCaseId as default dimension for all metrics + metrics.setDefaultDimensions({ UseCaseId: fileKeyComponents.useCaseId }); + metrics.addMetric(CloudWatchMetrics.METADATA_UPDATE_SUCCESS, 'Count', 1); + metrics.addMetric(CloudWatchMetrics.FILES_UPLOADED, 'Count', 1); + metrics.addMetric(CloudWatchMetrics.FILE_SIZE, 'Bytes', fileSize); + + // Publish metrics before adding FileExtension dimension + metrics.publishStoredMetrics(); + + // Add FileExtension dimension for extension-specific tracking + metrics.addDimension(CloudWatchMetrics.FILE_EXTENSION, fileExtension); + metrics.addMetric(CloudWatchMetrics.FILES_UPLOADED_WITH_EXTENSION, 'Count', 1); + + logger.info( + `${ERROR_MESSAGES.PROCESSING_SUCCESS} - fileKey: ${fileKeyComponents.fileKey}, fileName: ${displayFileName}` + ); + + return { + success: true, + fileKey: fileKeyComponents.fileKey, + fileName: displayFileName + }; + } catch (error) { + const errorObj = error instanceof Error ? error : new Error('Unknown error'); + const objectKey = event.detail?.object?.key || 'unknown'; + + const errorCategory = categorizeProcessingError(errorObj); + + metrics.addMetric(CloudWatchMetrics.METADATA_UPDATE_FAILURE, 'Count', 1); + if (errorCategory === 'system-error') { + logger.warn( + `${ERROR_MESSAGES.SYSTEM_ERROR_PROCESSING} - objectKey: ${objectKey}, error: ${errorObj.message}, errorCategory: ${errorCategory}, stack: ${errorObj.stack}` + ); + } else { + logger.error( + `${ERROR_MESSAGES.PROCESSING_FAILED} - objectKey: ${objectKey}, error: ${errorObj.message}, errorCategory: ${errorCategory}, stack: ${errorObj.stack}` + ); + } + + return { + success: false, + fileKey: objectKey, + fileName: 'unknown', + error: errorObj.message + }; + } + } + + /** + * Parses and validates the S3 object key to extract file key components + * @param objectKey - S3 object key + * @returns Parsed file key components + */ + private parseFileKey(objectKey: string): FileKeyComponents { + const match = objectKey.match(FILE_KEY_PATTERN); + + if (!match) { + const errorMessage = `${ERROR_MESSAGES.INVALID_FILE_KEY_FORMAT}: ${objectKey}. Expected format: useCaseId/userId/conversationId/messageId/fileName.ext`; + logger.error(`${errorMessage} - objectKey: ${objectKey}`); + throw new Error(errorMessage); + } + + const [, useCaseId, userId, conversationId, messageId, fileName] = match; + + return { + useCaseId, + userId, + conversationId, + messageId, + fileName, + fileKey: `${useCaseId}/${userId}/${conversationId}/${messageId}` + }; + } + + /** + * Updates file metadata in DynamoDB based on validation result + * @param fileKeyComponents - Parsed file key components + * @param fileSize - File size from EventBridge event + * @param eventTime - EventBridge event timestamp + * @param validationResult - Metadata validation result + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###updateFileMetadata' }) + private async updateFileMetadata( + fileKeyComponents: FileKeyComponents, + fileSize: number, + eventTime: string, + validationResult: ValidationResult + ): Promise { + const retrySettings = getRetrySettings(); + + const operation = async (): Promise => { + try { + const uploadTimestamp = new Date(eventTime).getTime(); + const ttl = calculateTTL(uploadTimestamp, MULTIMODAL_FILE_TTL_MS); + + // Determine final status based on validation result + const finalStatus = validationResult.isValid ? FileStatus.UPLOADED : FileStatus.INVALID; + + // Build update expression - only update status, uploadTimestamp and TTL + const updateExpression = 'SET #status = :status, #uploadTimestamp = :uploadTimestamp, #ttl = :ttl'; + const expressionAttributeNames: Record = { + '#status': 'status', + '#uploadTimestamp': 'uploadTimestamp', + '#ttl': 'ttl' + }; + const expressionAttributeValues: Record = { + ':status': { S: finalStatus }, + ':uploadTimestamp': { N: uploadTimestamp.toString() }, + ':ttl': { N: ttl.toString() }, + ':pendingStatus': { S: FileStatus.PENDING } + }; + + const updateParams: UpdateItemCommandInput = { + TableName: this.tableName, + Key: { + fileKey: { S: fileKeyComponents.fileKey }, + fileName: { S: validationResult.originalFileName } + }, + UpdateExpression: updateExpression, + ConditionExpression: + 'attribute_exists(fileKey) AND attribute_exists(fileName) AND #status = :pendingStatus', + ExpressionAttributeNames: expressionAttributeNames, + ExpressionAttributeValues: expressionAttributeValues + }; + + const command = new UpdateItemCommand(updateParams); + await this.dynamoClient.send(command); + + if (finalStatus === FileStatus.INVALID) { + logger.info( + `${ERROR_MESSAGES.FILE_MARKED_INVALID_METADATA} - fileKey: ${fileKeyComponents.fileKey}, fileName: ${validationResult.originalFileName}, reason: metadata-validation-failure, error: ${validationResult.error}` + ); + } + + logger.debug( + `${ERROR_MESSAGES.METADATA_UPDATE_SUCCESS} - fileKey: ${fileKeyComponents.fileKey}, fileName: ${validationResult.originalFileName}, status: ${finalStatus}, fileSize: ${fileSize}, uploadTimestamp: ${uploadTimestamp}, ttl: ${ttl}` + ); + } catch (error) { + const errorObj = error instanceof Error ? error : new Error('Unknown error'); + + logger.warn( + `${ERROR_MESSAGES.DYNAMODB_UPDATE_FAILED} - error: ${errorObj.message}, fileKey: ${fileKeyComponents.fileKey}, fileName: ${validationResult.originalFileName}` + ); + + throw errorObj; + } + }; + + try { + await retryWithBackoff(operation, retrySettings); + } catch (error) { + const errorObj = error instanceof Error ? error : new Error('Unknown error'); + logger.error( + `${ERROR_MESSAGES.METADATA_UPDATE_FAILED_AFTER_RETRIES} - maxRetries: ${retrySettings.maxRetries + 1}, lastError: ${errorObj.message}` + ); + throw new Error(`${ERROR_MESSAGES.METADATA_UPDATE_FAILED_AFTER_RETRIES}: ${errorObj.message}`); + } + } +} diff --git a/source/lambda/files-metadata-management/utils/file-validator.ts b/source/lambda/files-metadata-management/utils/file-validator.ts new file mode 100644 index 00000000..73ab2ae6 --- /dev/null +++ b/source/lambda/files-metadata-management/utils/file-validator.ts @@ -0,0 +1,242 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { S3Client, GetObjectCommand } from '@aws-sdk/client-s3'; +import * as FileType from 'file-type'; +import { logger, tracer } from '../power-tools-init'; +import { customAwsConfig } from 'aws-node-user-agent-config'; +import { FileValidationResult } from '../models/types'; +import { MAGIC_NUMBER_BUFFER_SIZE, ALL_SUPPORTED_FILE_TYPES, EXTENSION_TO_MIME_TYPE, EXTENSION_MIME_VARIANTS, NO_MAGIC_NUMBER_EXTENSIONS } from './constants'; +import { extractFileExtension, extractContentTypeFromFileName } from './utils'; + +/** + * Service for validating uploaded files using magic numbers and content type verification + */ +export class FileValidator { + private readonly s3Client: S3Client; + + constructor() { + const awsConfig = customAwsConfig(); + this.s3Client = tracer.captureAWSv3Client(new S3Client(awsConfig)); + } + + /** + * Validates a file by downloading it from S3 and checking its magic numbers + * @param input - File validation input parameters + * @returns Validation result with detected file type and any errors + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateFile' }) + async validateFile(bucketName: string, objectKey: string): Promise { + let validationErrors: string = ''; + + // Extract filename from object key and get declared content type + const fileName = objectKey.split('/').pop() || objectKey; + const declaredContentType = extractContentTypeFromFileName(fileName); + + try { + logger.info( + `Starting file validation for: ${objectKey}`, + JSON.stringify({ + fileName: fileName, + declaredContentType: declaredContentType + }) + ); + + // Check if declared content type is supported (no S3 calls needed) + if (!ALL_SUPPORTED_FILE_TYPES.has(declaredContentType)) { + validationErrors += `Unsupported declared content type: ${declaredContentType}; `; + return { + isValid: false, + validationErrors + }; + } + + // Download only the first few KB for magic number detection (optimization) + const magicNumberBuffer = await this.downloadPartialFileFromS3( + bucketName, + objectKey, + MAGIC_NUMBER_BUFFER_SIZE + ); + + // Detect actual file type using magic numbers + const detectedType = await FileType.fromBuffer(magicNumberBuffer); + + // Handle files that don't have magic numbers + if (!detectedType) { + const extension = extractFileExtension(fileName); + if (NO_MAGIC_NUMBER_EXTENSIONS.has(extension)) { + // skip magic number validation and just validate the declared content type + const isValid = validationErrors.length === 0; + + logger.info( + `File validation completed (no magic numbers)`, + JSON.stringify({ + fileName: fileName, + isValid, + extension: extension, + declaredContentType: declaredContentType + }) + ); + + return { + isValid, + validationErrors + }; + } else { + validationErrors += 'Unable to detect file type from magic numbers; '; + return { + isValid: false, + validationErrors + }; + } + } + + logger.debug( + `File type detection results`, + JSON.stringify({ + fileName: fileName, + detectedMimeType: detectedType.mime, + detectedExtension: detectedType.ext, + declaredContentType: declaredContentType + }) + ); + + // Validate detected type is supported + if (!ALL_SUPPORTED_FILE_TYPES.has(detectedType.mime)) { + validationErrors += `Unsupported file type detected: ${detectedType.mime}; `; + } + + const expectedExtension = extractFileExtension(fileName); + + // Validate content type mismatch, but allow known variants for specific extensions + if (!this.isValidMimeTypeForExtension(expectedExtension, declaredContentType, detectedType.mime)) { + validationErrors += `Content type mismatch: declared '${declaredContentType}' but detected '${detectedType.mime}'; `; + } + + // Validate file extension matches detected type + if (expectedExtension !== 'unknown' && !this.areExtensionsEquivalent(expectedExtension, detectedType.ext)) { + validationErrors += `File extension mismatch: filename suggests '${expectedExtension}' but detected '${detectedType.ext}'; `; + } + + const isValid = validationErrors.length === 0; + + logger.info( + `File validation completed`, + JSON.stringify({ + fileName: fileName, + isValid, + detectedMimeType: detectedType.mime, + detectedExtension: detectedType.ext, + validationErrorCount: validationErrors.length, + }) + ); + + return { + isValid, + validationErrors + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : 'Unknown validation error'; + logger.error( + `File validation failed for: ${objectKey}`, + JSON.stringify({ + error: errorMessage, + fileName: fileName + }) + ); + + validationErrors += `Validation process failed: ${errorMessage}; `; + + return { + isValid: false, + validationErrors + }; + } + } + + /** + * Downloads partial file from S3 for magic number detection (optimization) + * @param bucketName - S3 bucket name + * @param objectKey - S3 object key + * @param maxBytes - Maximum bytes to download + * @returns Partial file buffer for magic number detection + */ + @tracer.captureMethod({ captureResponse: false, subSegmentName: '###downloadPartialFileFromS3' }) + private async downloadPartialFileFromS3(bucketName: string, objectKey: string, maxBytes: number): Promise { + try { + const command = new GetObjectCommand({ + Bucket: bucketName, + Key: objectKey, + Range: `bytes=0-${maxBytes - 1}` // Download only first maxBytes + }); + + const response = await this.s3Client.send(command); + + if (!response.Body) { + throw new Error('Empty response body from S3'); + } + + // Convert stream to buffer + const chunks: Uint8Array[] = []; + const stream = response.Body as any; + + for await (const chunk of stream) { + chunks.push(chunk); + } + + return Buffer.concat(chunks); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : 'Unknown S3 error'; + logger.error( + `Failed to download partial file from S3: ${bucketName}/${objectKey}`, + JSON.stringify({ + error: errorMessage + }) + ); + throw new Error(`S3 partial download failed: ${errorMessage}`); + } + } + + /** + * Checks if two file extensions are equivalent (e.g., 'jpeg' and 'jpg') + * @param extension1 - First extension to compare + * @param extension2 - Second extension to compare + * @returns True if extensions are equivalent + */ + private areExtensionsEquivalent(extension1: string, extension2: string): boolean { + if (extension1 === extension2) { + return true; + } + + const equivalentGroups = [ + new Set(['jpeg', 'jpg']), + new Set(['html', 'htm']), + new Set(['doc', 'docx', 'cfb']), + new Set(['xls', 'xlsx', 'cfb']) + ]; + + return equivalentGroups.some(group => group.has(extension1) && group.has(extension2)); + } + + /** + * Checks if the detected MIME type is valid for the given extension and declared type + * @param extension - File extension + * @param declaredMimeType - MIME type declared based on file extension + * @param detectedMimeType - MIME type detected by file-type library + * @returns True if the MIME type combination is valid + */ + private isValidMimeTypeForExtension(extension: string, declaredMimeType: string, detectedMimeType: string): boolean { + if (declaredMimeType === detectedMimeType) { + return true; + } + + const variants = EXTENSION_MIME_VARIANTS[extension]; + if (variants) { + // Both declared and detected types must be in the allowed variants + const expectedDeclaredType = EXTENSION_TO_MIME_TYPE[extension]; + return declaredMimeType === expectedDeclaredType && variants.includes(detectedMimeType); + } + + return false; + } +} \ No newline at end of file diff --git a/source/lambda/files-metadata-management/utils/http-response-formatters.ts b/source/lambda/files-metadata-management/utils/http-response-formatters.ts new file mode 100644 index 00000000..35698e7b --- /dev/null +++ b/source/lambda/files-metadata-management/utils/http-response-formatters.ts @@ -0,0 +1,57 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export interface ErrorResponse { + message: string; + originalStatusCode?: number; + extraHeaders?: Record; +} + +/** + * Formats success responses for Lambda functions + * @param body - Response body (string or object) + * @param statusCode - HTTP status code (default: 200) + * @param extraHeaders - Additional headers + * @returns Formatted success response + */ +export const formatResponse = (body: any, statusCode: number = 200, extraHeaders?: Record): any => { + return { + statusCode, + headers: { + 'Content-Type': 'application/json', + 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + 'Access-Control-Allow-Credentials': 'true', + 'Access-Control-Allow-Origin': '*', // NOSONAR - javascript:S5122 - Domain not known at this point. + ...extraHeaders + }, + isBase64Encoded: false, + body: typeof body === 'string' ? body : JSON.stringify(body) + }; +}; + +/** + * Formats error responses for Lambda functions + * @param error - Error response object + * @returns Formatted error response + */ +export const formatError = (error: ErrorResponse): any => { + const responseBody: any = { message: error.message }; + if (error.originalStatusCode) { + responseBody.originalStatusCode = error.originalStatusCode; + } + + return { + statusCode: 400, + headers: { + 'Content-Type': 'application/json', + 'x-amzn-ErrorType': 'CustomExecutionError', + 'Access-Control-Allow-Origin': '*', // NOSONAR - javascript:S5122 - Domain not known at this point. + 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + ...error.extraHeaders + }, + isBase64Encoded: false, + body: JSON.stringify(responseBody) + }; +}; diff --git a/source/lambda/files-metadata-management/utils/metadata-validator.ts b/source/lambda/files-metadata-management/utils/metadata-validator.ts new file mode 100644 index 00000000..9e9a4e1e --- /dev/null +++ b/source/lambda/files-metadata-management/utils/metadata-validator.ts @@ -0,0 +1,124 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { S3Client, HeadObjectCommand } from '@aws-sdk/client-s3'; +import { MetricUnit } from '@aws-lambda-powertools/metrics'; +import { customAwsConfig } from 'aws-node-user-agent-config'; + +import { logger, tracer, metrics } from '../power-tools-init'; +import { retryWithBackoff } from './utils'; +import { CloudWatchMetrics, VALIDATION_CONSTANTS, ERROR_MESSAGES } from './constants'; +import { RetrySettings, ValidationResult } from '../models/types'; + +export class MetadataValidator { + private readonly s3Client: S3Client; + + constructor() { + const awsConfig = customAwsConfig(); + this.s3Client = tracer.captureAWSv3Client(new S3Client(awsConfig)); + + logger.info( + `MetadataValidator initialized - component: MetadataValidator, requiredMetadataKey: ${VALIDATION_CONSTANTS.REQUIRED_TAG_KEY}, requiredMetadataValue: ${VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE}` + ); + } + + /** + * Validates metadata for an S3 object + * @param bucketName - S3 bucket name + * @param objectKey - S3 object key + * @returns Validation result + */ + async validateMetadata(bucketName: string, objectKey: string): Promise { + try { + logger.debug( + `Starting metadata validation - component: MetadataValidator, bucketName: ${bucketName}, objectKey: ${objectKey}, requiredMetadataKey: ${VALIDATION_CONSTANTS.REQUIRED_TAG_KEY}, requiredMetadataValue: ${VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE}` + ); + + const headObjectResponse = await this.fetchMetadataAndSize(bucketName, objectKey); + const metadata = headObjectResponse.Metadata || {}; + const fileSize = headObjectResponse.ContentLength || 0; + const originalFileName = metadata['filename'] || '' + + const metadataValid = this.validateSourceTag(metadata); + const isValid = metadataValid; + + if (isValid) { + metrics.addMetric(CloudWatchMetrics.METADATA_VALIDATION_SUCCESS, MetricUnit.Count, 1); + logger.debug(`${ERROR_MESSAGES.METADATA_VALIDATION_SUCCESS} - component: MetadataValidator, fileKey: ${objectKey}, isValid: true, fileSize: ${fileSize}, metadataValid: ${metadataValid}, originalFileName: ${originalFileName}`); + } else { + metrics.addMetric(CloudWatchMetrics.METADATA_VALIDATION_FAILURE, MetricUnit.Count, 1); + + logger.warn( + `${ERROR_MESSAGES.VALIDATION_FAILED} - component: MetadataValidator, fileKey: ${objectKey}, isValid: false, fileSize: ${fileSize}, metadataValid: ${metadataValid}, failureReasons: metadata validation failed` + ); + } + + const errorMessage = !isValid ? 'Metadata validation failed' : undefined; + + return { + isValid, + error: errorMessage, + originalFileName + }; + } catch (error) { + const errorObj = error instanceof Error ? error : new Error('Unknown error'); + metrics.addMetric(CloudWatchMetrics.METADATA_VALIDATION_FAILURE, MetricUnit.Count, 1); + + logger.error( + `${ERROR_MESSAGES.SYSTEM_ERROR} - component: MetadataValidator, bucketName: ${bucketName}, objectKey: ${objectKey}, error: ${errorObj.message}, systemError: true` + ); + + return { + isValid: false, + error: errorObj.message, + originalFileName: '' + }; + } + } + + /** + * Fetches metadata and size from S3 object using HeadObject API with retry logic + * @param bucketName - S3 bucket name + * @param objectKey - S3 object key + * @returns S3 HeadObject response including metadata and content length + */ + private async fetchMetadataAndSize(bucketName: string, objectKey: string) { + const operation = async () => { + const command = new HeadObjectCommand({ + Bucket: bucketName, + Key: objectKey + }); + + const response = await this.s3Client.send(command); + metrics.addMetric(CloudWatchMetrics.METADATA_S3_HEAD_OBJECT_CALLS, MetricUnit.Count, 1); + + return response; + }; + + const retrySettings: RetrySettings = { + maxRetries: VALIDATION_CONSTANTS.MAX_RETRIES, + backOffRate: VALIDATION_CONSTANTS.BACKOFF_MULTIPLIER, + initialDelayMs: VALIDATION_CONSTANTS.INITIAL_RETRY_DELAY_MS + }; + + return await retryWithBackoff(operation, retrySettings); + } + + /** + * Validates source metadata against required values + * @param metadata - S3 object metadata + * @returns True if source metadata is valid + */ + private validateSourceTag(metadata: Record): boolean { + const sourceValue = metadata[VALIDATION_CONSTANTS.REQUIRED_TAG_KEY]; + + if (!sourceValue || sourceValue !== VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE) { + logger.warn( + `${ERROR_MESSAGES.SECURITY_VIOLATION_DETECTED} - component: MetadataValidator, requiredKey: ${VALIDATION_CONSTANTS.REQUIRED_TAG_KEY}, requiredValue: ${VALIDATION_CONSTANTS.REQUIRED_TAG_VALUE}, actualValue: ${sourceValue}, violationType: ${sourceValue ? 'invalid-value' : 'missing-metadata'}` + ); + return false; + } + + return true; + } +} diff --git a/source/lambda/files-metadata-management/utils/utils.ts b/source/lambda/files-metadata-management/utils/utils.ts new file mode 100644 index 00000000..0b56585e --- /dev/null +++ b/source/lambda/files-metadata-management/utils/utils.ts @@ -0,0 +1,240 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { logger, tracer } from '../power-tools-init'; +import { formatError } from './http-response-formatters'; +import RequestValidationError from './error'; +import { RETRY_CONFIG, REQUIRED_ENV_VARS, EXTENSION_TO_MIME_TYPE } from './constants'; +import { RetrySettings } from '../models/types'; + +/** + * Validates that required environment variables are set + * @param requiredVars - Array of required environment variable names + */ +export const checkEnv = () => { + let missingVars: string[] = []; + for (let envVar of REQUIRED_ENV_VARS) { + if (!process.env[envVar]) { + missingVars.push(envVar); + } + } + if (missingVars.length > 0) { + const errMsg = `Missing required environment variables: ${missingVars.join( + ', ' + )}. This should not happen and indicates an issue with your deployment.`; + logger.error(errMsg); + throw new Error(errMsg); + } +}; + +/** + * Gets retry settings for DynamoDB operations + * @returns RetrySettings object with default values + */ +export function getRetrySettings(): RetrySettings { + return { + maxRetries: RETRY_CONFIG.maxRetries, + backOffRate: RETRY_CONFIG.backOffRate, + initialDelayMs: RETRY_CONFIG.initialDelayMs + }; +} + +/** + * Generic error handler for Lambda operations + * @param error - The error that occurred + * @param action - The action that was being performed + * @param context - Optional context for error messages (e.g., 'Files Handler') + * @returns Formatted error response + */ +export const handleLambdaError = (error: unknown, action: string, context: string = ''): any => { + const rootTraceId = tracer.getRootXrayTraceId(); + let errorMessage; + const contextPrefix = context ? `${context} ` : ''; + + if (error instanceof RequestValidationError) { + logger.error(`Validation of ${contextPrefix} request failed with error: ${error}`); + logger.error( + `Error while validating ${contextPrefix} request for action: ${action}, root trace id: ${rootTraceId}` + ); + errorMessage = `Request Validation Error - Please contact support and quote the following trace id: ${rootTraceId}`; + } else { + logger.error(`${contextPrefix} Management Error: ${error}`); + logger.error(`Error while executing ${contextPrefix} action: ${action}, root trace id: ${rootTraceId}`); + errorMessage = `Internal Error - Please contact support and quote the following trace id: ${rootTraceId}`; + } + + return formatError({ + message: errorMessage, + extraHeaders: { '_X_AMZN_TRACE_ID': rootTraceId as string } + }); +}; + +/** + * Delays execution for the specified number of milliseconds + * @param delayMillis - Number of milliseconds to delay + * @returns Promise that resolves after the delay + */ +export function delay(delayMillis: number): Promise { + return new Promise((resolve) => setTimeout(resolve, delayMillis)); +} + +/** + * Retry function with exponential backoff + * @param operation - The operation to retry + * @param retrySettings - Retry configuration + * @returns Promise with operation result + */ +export const retryWithBackoff = async (operation: () => Promise, retrySettings: RetrySettings): Promise => { + let lastError: Error | undefined; + let delayMs = retrySettings.initialDelayMs; + + for (let attempt = 0; attempt <= retrySettings.maxRetries; attempt++) { + try { + return await operation(); + } catch (error) { + lastError = error instanceof Error ? error : new Error(String(error)); + + if (attempt === retrySettings.maxRetries) { + break; + } + + logger.warn( + `Operation failed, retrying in ${delayMs}ms (attempt ${attempt + 1}/${retrySettings.maxRetries + 1}): ${lastError.message}` + ); + + await delay(delayMs); + delayMs *= retrySettings.backOffRate; + } + } + + // Ensure we always throw a proper Error object + throw lastError || new Error('Operation failed after all retry attempts'); +}; + +/** + * Calculates TTL timestamp for file cleanup + * @param uploadTimestamp - The upload timestamp in milliseconds (UTC) + * @param ttlMS - TTL duration in milliseconds + * @returns TTL timestamp in seconds (Unix epoch UTC) for DynamoDB TTL + */ +export const calculateTTL = (uploadTimestamp: number, ttlMS: number): number => { + return Math.floor((uploadTimestamp + ttlMS) / 1000); +}; + +/** + * Extracts file extension from filename + * @param fileName - The filename to extract extension from + * @returns File extension (without dot) or 'unknown' if no extension + */ +export const extractFileExtension = (fileName: string): string => { + const lastDotIndex = fileName.lastIndexOf('.'); + if (lastDotIndex === -1 || lastDotIndex === fileName.length - 1) { + return 'unknown'; + } + return fileName.substring(lastDotIndex + 1).toLowerCase(); +}; + +/** + * Maps file extension to expected MIME type for validation + * @param fileName - The filename to extract content type from + * @returns Expected MIME type based on file extension + */ +export const extractContentTypeFromFileName = (fileName: string): string => { + const extension = extractFileExtension(fileName); + + return EXTENSION_TO_MIME_TYPE[extension] || 'application/octet-stream'; +}; + +/** + * Determines if an error is a system error (not a security violation) + * @param error - Error message + * @param systemErrorPatterns - Array of system error patterns to check against + * @returns True if this is a system error + */ +export const isSystemError = (error?: string, systemErrorPatterns: string[] = []): boolean => { + if (!error) return false; + return systemErrorPatterns.some((pattern) => error.toLowerCase().includes(pattern.toLowerCase())); +}; + +/** + * Determines if an error is a security violation (missing/invalid metadata) + * @param error - Error message + * @param securityViolationPatterns - Array of security violation patterns to check against + * @returns True if this is a security violation + */ +export const isSecurityViolation = (error?: string, securityViolationPatterns: string[] = []): boolean => { + if (!error) return false; + return securityViolationPatterns.some((pattern) => error.toLowerCase().includes(pattern.toLowerCase())); +}; + +/** + * Determines if an error is transient and should be retried + * @param error - Error message + * @param transientErrorPatterns - Array of transient error patterns to check against + * @returns True if this is a transient error + */ +export const isTransientError = (error?: string, transientErrorPatterns: string[] = []): boolean => { + if (!error) return false; + return transientErrorPatterns.some((pattern) => error.toLowerCase().includes(pattern.toLowerCase())); +}; + +/** + * Determines if an error should not be retried + * @param error - The error to check + * @param nonRetryableErrors - Array of non-retryable error types + * @returns True if the error should not be retried + */ +export const isNonRetryableError = (error: Error, nonRetryableErrors: string[] = []): boolean => { + return nonRetryableErrors.some((errorType) => error.name === errorType || error.message.includes(errorType)); +}; + +/** + * Categorizes processing errors for better error handling and metrics + * @param error - Error object + * @returns Error category + */ +export const categorizeProcessingError = (error: Error): string => { + const errorMessage = error.message.toLowerCase(); + const errorName = error.name; + + // System/infrastructure errors + if ( + errorMessage.includes('timeout') || + errorMessage.includes('serviceunavailable') || + errorMessage.includes('internalerror') || + errorMessage.includes('throttling') || + errorName === 'TimeoutError' + ) { + return 'system-error'; + } + + // DynamoDB specific errors + if ( + errorName === 'ConditionalCheckFailedException' || + errorName === 'ResourceNotFoundException' || + errorName === 'ProvisionedThroughputExceededException' + ) { + return 'dynamodb-error'; + } + + // S3 specific errors + if ( + errorMessage.includes('nosuchkey') || + errorMessage.includes('accessdenied') || + errorMessage.includes('nosuchbucket') + ) { + return 's3-error'; + } + + // Validation errors + if ( + errorMessage.includes('invalid file key format') || + errorMessage.includes('validation') || + errorName === 'ValidationException' + ) { + return 'validation-error'; + } + + // Default to application error + return 'application-error'; +}; diff --git a/source/lambda/invoke-agent/poetry.lock b/source/lambda/invoke-agent/poetry.lock index bb323b29..0e6bf2c3 100644 --- a/source/lambda/invoke-agent/poetry.lock +++ b/source/lambda/invoke-agent/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "aws-lambda-powertools" @@ -47,27 +47,27 @@ wrapt = "*" [[package]] name = "boto3" -version = "1.40.15" +version = "1.40.53" description = "The AWS SDK for Python" optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "boto3-1.40.15-py3-none-any.whl", hash = "sha256:52b8aa78c9906c4e49dcec6817c041df33c9825073bf66e7df8fc00afbe47b4b"}, - {file = "boto3-1.40.15.tar.gz", hash = "sha256:271b379ce5ad35ca82f1009e917528a182eed0e2de197ccffb0c51acadec5c79"}, + {file = "boto3-1.40.53-py3-none-any.whl", hash = "sha256:65ded2738de259bd9030feb4772ec7b53d5b661befa88ce836117c3df8265309"}, + {file = "boto3-1.40.53.tar.gz", hash = "sha256:3f8cf56034cfde20dd0abca01349f64ab65734d90c3fbf7357e8a84cb64a62ee"}, ] [package.dependencies] -botocore = ">=1.40.15,<1.41.0" +botocore = ">=1.40.53,<1.41.0" jmespath = ">=0.7.1,<2.0.0" -s3transfer = ">=0.13.0,<0.14.0" +s3transfer = ">=0.14.0,<0.15.0" [package.extras] crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-layer" -version = "3.0.7" +version = "4.0.0" description = "Layer for AWS Boto3 python SDK" optional = false python-versions = "^3.13" @@ -76,8 +76,8 @@ files = [] develop = true [package.dependencies] -boto3 = "1.40.15" -botocore = "1.40.15" +boto3 = "1.40.53" +botocore = "1.40.53" urllib3 = "2.5.0" [package.source] @@ -86,14 +86,14 @@ url = "../layers/aws_boto3" [[package]] name = "botocore" -version = "1.40.15" +version = "1.40.53" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "botocore-1.40.15-py3-none-any.whl", hash = "sha256:b364e039d2b67e509cfb089cb39b295251e48a60cc68fd591defbe10b44d83f9"}, - {file = "botocore-1.40.15.tar.gz", hash = "sha256:4960800e4c5a7b43db22550979c22f5a324cbaf75ef494bbb2cf400ef1e6aca7"}, + {file = "botocore-1.40.53-py3-none-any.whl", hash = "sha256:840322b0af4be7a6e2effddb4eb388053c25af0618f627f37d8b03cc1edbc928"}, + {file = "botocore-1.40.53.tar.gz", hash = "sha256:4ebb9e6648c4896d3f0cdda5ff30b5de9a83aeb591be89a16f98cc5ee3cd371c"}, ] [package.dependencies] @@ -462,7 +462,7 @@ test-randomorder = ["pytest-randomly"] [[package]] name = "custom-boto3-init" -version = "3.0.7" +version = "4.0.0" description = "Initialize boto config for AWS Python SDK with custom configuration" optional = false python-versions = "^3.13" @@ -953,14 +953,14 @@ tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asy [[package]] name = "s3transfer" -version = "0.13.1" +version = "0.14.0" description = "An Amazon S3 Transfer Manager" optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "s3transfer-0.13.1-py3-none-any.whl", hash = "sha256:a981aa7429be23fe6dfc13e80e4020057cbab622b08c0315288758d67cabc724"}, - {file = "s3transfer-0.13.1.tar.gz", hash = "sha256:c3fdba22ba1bd367922f27ec8032d6a1cf5f10c934fb5d68cf60fd5a23d936cf"}, + {file = "s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456"}, + {file = "s3transfer-0.14.0.tar.gz", hash = "sha256:eff12264e7c8b4985074ccce27a3b38a485bb7f7422cc8046fee9be4983e4125"}, ] [package.dependencies] diff --git a/source/lambda/invoke-agent/pyproject.toml b/source/lambda/invoke-agent/pyproject.toml index 0d4f00a8..960cc086 100644 --- a/source/lambda/invoke-agent/pyproject.toml +++ b/source/lambda/invoke-agent/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "invoke-agent" -version = "3.0.7" +version = "4.0.0" authors = [ "Amazon Web Services" ] description = "Lambda implementation for chat feature" packages = [ diff --git a/source/lambda/layers/aws-node-user-agent-config/.gitignore b/source/lambda/layers/aws-node-user-agent-config/.gitignore deleted file mode 100644 index d94f489e..00000000 --- a/source/lambda/layers/aws-node-user-agent-config/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.d.ts -*.js \ No newline at end of file diff --git a/source/lambda/layers/aws-node-user-agent-config/jest.config.js b/source/lambda/layers/aws-node-user-agent-config/jest.config.js new file mode 100644 index 00000000..31a70a4a --- /dev/null +++ b/source/lambda/layers/aws-node-user-agent-config/jest.config.js @@ -0,0 +1,26 @@ +/********************************************************************************************************************** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * + * * + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance * + * with the License. A copy of the License is located at * + * * + * http://www.apache.org/licenses/LICENSE-2.0 * + * * + * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES * + * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions * + * and limitations under the License. * + *********************************************************************************************************************/ + +module.exports = { + modulePaths: [ + '/../layers/', + '/../layers/aws-sdk-lib/node_modules/', + '/../layers/common-node-lib/' + ], + modulePathIgnorePatterns: ['/dist/'], + testMatch: ['test/**/*.[t]s?(x)', '**/?(*.)+(spec|test).[t]s?(x)'], + collectCoverage: true, + collectCoverageFrom: ['**/*.ts', '!**/test/*.ts', '!**/dist'], + coverageReporters: ['text', ['lcov', { projectRoot: '../../../' }]], + preset: 'ts-jest' +}; diff --git a/source/lambda/layers/aws-node-user-agent-config/package-lock.json b/source/lambda/layers/aws-node-user-agent-config/package-lock.json index aa57b057..ed7c688c 100644 --- a/source/lambda/layers/aws-node-user-agent-config/package-lock.json +++ b/source/lambda/layers/aws-node-user-agent-config/package-lock.json @@ -1,12 +1,12 @@ { "name": "@amzn/aws-node-user-agent-config", - "version": "3.0.7", + "version": "4.0.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@amzn/aws-node-user-agent-config", - "version": "3.0.7", + "version": "4.0.0", "license": "Apache-2.0", "dependencies": { "@aws-lambda-powertools/logger": "^2.11.0", @@ -21,7 +21,7 @@ "@types/node": "^22.10.1", "eslint": "^9.16.0", "jest": "^29.7.0", - "prettier": "^3.4.2", + "prettier": "^3.6.2", "ts-jest": "^29.2.5", "ts-node": "^10.9.2" } @@ -847,10 +847,11 @@ } }, "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -3514,9 +3515,9 @@ "dev": true }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -4021,9 +4022,9 @@ } }, "node_modules/prettier": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.4.2.tgz", - "integrity": "sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ==", + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", "dev": true, "bin": { "prettier": "bin/prettier.cjs" @@ -5317,9 +5318,9 @@ } }, "js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, "requires": { "argparse": "^1.0.7", @@ -7333,9 +7334,9 @@ "dev": true }, "js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "requires": { "argparse": "^2.0.1" @@ -7716,9 +7717,9 @@ "dev": true }, "prettier": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.4.2.tgz", - "integrity": "sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ==", + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", "dev": true }, "pretty-format": { diff --git a/source/lambda/layers/aws-node-user-agent-config/package.json b/source/lambda/layers/aws-node-user-agent-config/package.json index ee28fef2..8eccb523 100644 --- a/source/lambda/layers/aws-node-user-agent-config/package.json +++ b/source/lambda/layers/aws-node-user-agent-config/package.json @@ -1,6 +1,6 @@ { "name": "@amzn/aws-node-user-agent-config", - "version": "3.0.7", + "version": "4.0.0", "description": "AWS Nodejs SDK Config initialization layer", "main": "index.js", "scripts": { @@ -8,7 +8,7 @@ "test": "jest --coverage --silent --verbose", "test-debug": "jest --coverage", "clean": "rm -rf node_modules", - "code-formatter": "./node_modules/prettier/bin-prettier.js --config ../../../.prettierrc.yml '**/*.{js,json,css,md}' !package*.json --write", + "code-formatter": "prettier --config ../../../../.prettierrc.yml --ignore-path ../../../../.prettierignore --write '**/*.{js,ts,json,css,md}'", "code-linter": "./node_modules/eslint/bin/eslint.js . -c ../../.eslintrc.js --ext .js" }, "devDependencies": { @@ -16,7 +16,7 @@ "@types/node": "^22.10.1", "eslint": "^9.16.0", "jest": "^29.7.0", - "prettier": "^3.4.2", + "prettier": "^3.6.2", "ts-jest": "^29.2.5", "ts-node": "^10.9.2" }, diff --git a/source/lambda/layers/aws-sdk-lib/aws-client-manager.test.ts b/source/lambda/layers/aws-sdk-lib/aws-client-manager.test.ts new file mode 100644 index 00000000..035e0e46 --- /dev/null +++ b/source/lambda/layers/aws-sdk-lib/aws-client-manager.test.ts @@ -0,0 +1,76 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn(() => ({ region: 'us-east-1' })) +})); + +import { AWSClientManager } from './aws-client-manager'; +import { DynamoDBClient } from '@aws-sdk/client-dynamodb'; +import { S3Client } from '@aws-sdk/client-s3'; + +describe('AWSClientManager', () => { + beforeEach(() => { + AWSClientManager.resetClients(); + process.env.AWS_SDK_USER_AGENT = '{"customUserAgent": "AWSSOLUTION/SO0276/v2.0.0"}'; + }); + + afterEach(() => { + delete process.env.AWS_SDK_USER_AGENT; + }); + + it('should return singleton DynamoDB client', () => { + const client1 = AWSClientManager.getServiceClient('dynamodb'); + const client2 = AWSClientManager.getServiceClient('dynamodb'); + + expect(client1).toBe(client2); + expect(client1).toBeInstanceOf(DynamoDBClient); + }); + + it('should return singleton S3 client', () => { + const client1 = AWSClientManager.getServiceClient('s3'); + const client2 = AWSClientManager.getServiceClient('s3'); + + expect(client1).toBe(client2); + expect(client1).toBeInstanceOf(S3Client); + }); + + it('should return different clients for different services', () => { + const ddbClient = AWSClientManager.getServiceClient('dynamodb'); + const s3Client = AWSClientManager.getServiceClient('s3'); + + expect(ddbClient).not.toBe(s3Client); + }); + + it('should capture client with tracer when provided', () => { + const mockTracer = { captureAWSv3Client: jest.fn((client) => client) }; + + AWSClientManager.getServiceClient('dynamodb', mockTracer); + + expect(mockTracer.captureAWSv3Client).toHaveBeenCalledTimes(1); + }); + + it('should not call tracer on subsequent calls', () => { + const mockTracer = { captureAWSv3Client: jest.fn((client) => client) }; + + AWSClientManager.getServiceClient('dynamodb', mockTracer); + AWSClientManager.getServiceClient('dynamodb', mockTracer); + + expect(mockTracer.captureAWSv3Client).toHaveBeenCalledTimes(1); + }); + + it('should reset clients', () => { + const client1 = AWSClientManager.getServiceClient('dynamodb'); + + AWSClientManager.resetClients(); + + const client2 = AWSClientManager.getServiceClient('dynamodb'); + expect(client1).not.toBe(client2); + }); + + it('should throw error for unsupported service', () => { + expect(() => { + AWSClientManager.getServiceClient('unsupported' as any); + }).toThrow("No client factory found for service 'unsupported'"); + }); +}); diff --git a/source/lambda/layers/aws-sdk-lib/aws-client-manager.ts b/source/lambda/layers/aws-sdk-lib/aws-client-manager.ts new file mode 100644 index 00000000..3cee3178 --- /dev/null +++ b/source/lambda/layers/aws-sdk-lib/aws-client-manager.ts @@ -0,0 +1,50 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { DynamoDBClient } from '@aws-sdk/client-dynamodb'; +import { S3Client } from '@aws-sdk/client-s3'; +import { CloudFormationClient } from '@aws-sdk/client-cloudformation'; +import { APIGatewayClient } from '@aws-sdk/client-api-gateway'; +import { CognitoIdentityProviderClient } from '@aws-sdk/client-cognito-identity-provider'; +import { SSMClient } from '@aws-sdk/client-ssm'; +import { customAwsConfig } from 'aws-node-user-agent-config'; + +type AWSClientType = DynamoDBClient | S3Client | CloudFormationClient | APIGatewayClient | CognitoIdentityProviderClient | SSMClient; +type ServiceName = 'dynamodb' | 's3' | 'cloudformation' | 'apigateway' | 'cognito' | 'ssm'; + +const CLIENT_FACTORIES: Record AWSClientType> = { + dynamodb: () => new DynamoDBClient(customAwsConfig()), + s3: () => new S3Client(customAwsConfig()), + cloudformation: () => new CloudFormationClient(customAwsConfig()), + apigateway: () => new APIGatewayClient(customAwsConfig()), + cognito: () => new CognitoIdentityProviderClient(customAwsConfig()), + ssm: () => new SSMClient(customAwsConfig()) +}; + +class AWSClientManager { + private static clientInstances = new Map(); + + public static getServiceClient( + serviceName: ServiceName, + tracer?: { captureAWSv3Client: (client: any) => any } + ): T { + if (!AWSClientManager.clientInstances.has(serviceName)) { + const clientFactory = CLIENT_FACTORIES[serviceName]; + if (!clientFactory) { + throw new Error(`No client factory found for service '${serviceName}'.`); + } + const client = clientFactory(); + if (tracer) { + tracer.captureAWSv3Client(client); + } + AWSClientManager.clientInstances.set(serviceName, client); + } + return AWSClientManager.clientInstances.get(serviceName) as T; + } + + public static resetClients(): void { + AWSClientManager.clientInstances.clear(); + } +} + +export { AWSClientManager }; diff --git a/source/lambda/layers/aws-sdk-lib/index.ts b/source/lambda/layers/aws-sdk-lib/index.ts new file mode 100644 index 00000000..1e3b5b93 --- /dev/null +++ b/source/lambda/layers/aws-sdk-lib/index.ts @@ -0,0 +1,4 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export { AWSClientManager } from './aws-client-manager'; diff --git a/source/lambda/layers/aws-sdk-lib/jest.config.js b/source/lambda/layers/aws-sdk-lib/jest.config.js new file mode 100644 index 00000000..c3dc38f5 --- /dev/null +++ b/source/lambda/layers/aws-sdk-lib/jest.config.js @@ -0,0 +1,9 @@ +module.exports = { + preset: 'ts-jest', + testEnvironment: 'node', + collectCoverageFrom: ['*.ts', '!*.test.ts', '!jest.config.js'], + coveragePathIgnorePatterns: ['/node_modules/', '/dist/'], + moduleNameMapper: { + '^aws-node-user-agent-config$': '/../aws-node-user-agent-config/dist' + } +}; diff --git a/source/lambda/layers/aws-sdk-lib/package-lock.json b/source/lambda/layers/aws-sdk-lib/package-lock.json index ff07aea9..d80b3675 100644 --- a/source/lambda/layers/aws-sdk-lib/package-lock.json +++ b/source/lambda/layers/aws-sdk-lib/package-lock.json @@ -1,35 +1,42 @@ { "name": "@amzn/aws-sdk-layer", - "version": "3.0.7", + "version": "4.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@amzn/aws-sdk-layer", - "version": "3.0.7", + "version": "4.0.0", "license": "Apache-2.0", "dependencies": { - "@aws-sdk/client-api-gateway": "^3.775.0", - "@aws-sdk/client-cloudformation": "^3.699.0", - "@aws-sdk/client-cognito-identity-provider": "^3.708.0", - "@aws-sdk/client-dynamodb": "^3.705.0", - "@aws-sdk/client-kendra": "^3.706.0", - "@aws-sdk/client-s3": "^3.705.0", - "@aws-sdk/client-secrets-manager": "^3.699.0", - "@aws-sdk/client-ssm": "^3.699.0", - "@aws-sdk/lib-dynamodb": "^3.705.0", - "@aws-sdk/types": "^3.696.0", - "@aws-sdk/util-arn-parser": "^3.693.0", - "@aws-sdk/util-dynamodb": "^3.705.0", - "@smithy/types": "^3.7.2", + "@aws-sdk/client-api-gateway": "^3.875.0", + "@aws-sdk/client-bedrock-agentcore-control": "^3.875.0", + "@aws-sdk/client-cloudformation": "^3.875.0", + "@aws-sdk/client-cognito-identity-provider": "^3.875.0", + "@aws-sdk/client-dynamodb": "^3.875.0", + "@aws-sdk/client-kendra": "^3.875.0", + "@aws-sdk/client-s3": "^3.875.0", + "@aws-sdk/client-secrets-manager": "^3.875.0", + "@aws-sdk/client-ssm": "^3.875.0", + "@aws-sdk/lib-dynamodb": "^3.875.0", + "@aws-sdk/s3-presigned-post": "^3.875.0", + "@aws-sdk/s3-request-presigner": "^3.875.0", + "@aws-sdk/types": "^3.875.0", + "@aws-sdk/util-arn-parser": "^3.873.0", + "@aws-sdk/util-dynamodb": "^3.875.0", + "@smithy/types": "^4.3.2", "@types/aws-lambda": "^8.10.146" + }, + "devDependencies": { + "@types/jest": "^30.0.0", + "jest": "^29.7.0", + "ts-jest": "^29.4.5" } }, "node_modules/@aws-crypto/crc32": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/@aws-crypto/crc32/-/crc32-5.2.0.tgz", "integrity": "sha512-nLbCWqQNgUiwwtFsen1AdzAtvuLRsQS8rYgMuxCrdKf9kOssamGLuPwyTY9wyYblNr9+1XM8v6zoDTPPSIeANg==", - "license": "Apache-2.0", "dependencies": { "@aws-crypto/util": "^5.2.0", "@aws-sdk/types": "^3.222.0", @@ -43,7 +50,6 @@ "version": "5.2.0", "resolved": "https://registry.npmjs.org/@aws-crypto/crc32c/-/crc32c-5.2.0.tgz", "integrity": "sha512-+iWb8qaHLYKrNvGRbiYRHSdKRWhto5XlZUEBwDjYNf+ly5SVYG6zEoYIdxvf5R3zyeP16w4PLBn3rH1xc74Rag==", - "license": "Apache-2.0", "dependencies": { "@aws-crypto/util": "^5.2.0", "@aws-sdk/types": "^3.222.0", @@ -228,975 +234,1196 @@ } }, "node_modules/@aws-sdk/client-api-gateway": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-api-gateway/-/client-api-gateway-3.775.0.tgz", - "integrity": "sha512-6B+eJgEU13EMuZ3gmwbyaQN6HO24be9gGtVYfGQUdrTZivLhGjW2hkAxaUZdP1YkwXmSiQFKThZ347HW75ryZQ==", + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-api-gateway/-/client-api-gateway-3.887.0.tgz", + "integrity": "sha512-eodlzzEXU9L2vll/fisqE61gxw00oH07VDR8ea3VvDJAYa0ys31a5kmFHnbHsU8t5lrK8JhvDMePxHC3VjV5uA==", + "dependencies": { + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/credential-provider-node": "3.887.0", + "@aws-sdk/middleware-host-header": "3.887.0", + "@aws-sdk/middleware-logger": "3.887.0", + "@aws-sdk/middleware-recursion-detection": "3.887.0", + "@aws-sdk/middleware-sdk-api-gateway": "3.887.0", + "@aws-sdk/middleware-user-agent": "3.887.0", + "@aws-sdk/region-config-resolver": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@aws-sdk/util-endpoints": "3.887.0", + "@aws-sdk/util-user-agent-browser": "3.887.0", + "@aws-sdk/util-user-agent-node": "3.887.0", + "@smithy/config-resolver": "^4.2.1", + "@smithy/core": "^3.11.0", + "@smithy/fetch-http-handler": "^5.2.1", + "@smithy/hash-node": "^4.1.1", + "@smithy/invalid-dependency": "^4.1.1", + "@smithy/middleware-content-length": "^4.1.1", + "@smithy/middleware-endpoint": "^4.2.1", + "@smithy/middleware-retry": "^4.2.1", + "@smithy/middleware-serde": "^4.1.1", + "@smithy/middleware-stack": "^4.1.1", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/node-http-handler": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "@smithy/url-parser": "^4.1.1", + "@smithy/util-base64": "^4.1.0", + "@smithy/util-body-length-browser": "^4.1.0", + "@smithy/util-body-length-node": "^4.1.0", + "@smithy/util-defaults-mode-browser": "^4.1.1", + "@smithy/util-defaults-mode-node": "^4.1.1", + "@smithy/util-endpoints": "^3.1.1", + "@smithy/util-middleware": "^4.1.1", + "@smithy/util-retry": "^4.1.1", + "@smithy/util-stream": "^4.3.1", + "@smithy/util-utf8": "^4.1.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@aws-sdk/client-bedrock-agentcore-control": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-bedrock-agentcore-control/-/client-bedrock-agentcore-control-3.879.0.tgz", + "integrity": "sha512-EZGOjq1LNa/jf2JObNl621difWr8RZmTnklC3SwPVGnPnp97oUBqhUt2JX0ZRLC2iHRb2rW8HvxxhGC8y1JNXw==", "license": "Apache-2.0", "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", - "@aws-sdk/core": "3.775.0", - "@aws-sdk/credential-provider-node": "3.775.0", - "@aws-sdk/middleware-host-header": "3.775.0", - "@aws-sdk/middleware-logger": "3.775.0", - "@aws-sdk/middleware-recursion-detection": "3.775.0", - "@aws-sdk/middleware-sdk-api-gateway": "3.775.0", - "@aws-sdk/middleware-user-agent": "3.775.0", - "@aws-sdk/region-config-resolver": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@aws-sdk/util-endpoints": "3.775.0", - "@aws-sdk/util-user-agent-browser": "3.775.0", - "@aws-sdk/util-user-agent-node": "3.775.0", - "@smithy/config-resolver": "^4.1.0", - "@smithy/core": "^3.2.0", - "@smithy/fetch-http-handler": "^5.0.2", - "@smithy/hash-node": "^4.0.2", - "@smithy/invalid-dependency": "^4.0.2", - "@smithy/middleware-content-length": "^4.0.2", - "@smithy/middleware-endpoint": "^4.1.0", - "@smithy/middleware-retry": "^4.1.0", - "@smithy/middleware-serde": "^4.0.3", - "@smithy/middleware-stack": "^4.0.2", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/node-http-handler": "^4.0.4", - "@smithy/protocol-http": "^5.1.0", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", - "@smithy/url-parser": "^4.0.2", + "@aws-sdk/core": "3.879.0", + "@aws-sdk/credential-provider-node": "3.879.0", + "@aws-sdk/middleware-host-header": "3.873.0", + "@aws-sdk/middleware-logger": "3.876.0", + "@aws-sdk/middleware-recursion-detection": "3.873.0", + "@aws-sdk/middleware-user-agent": "3.879.0", + "@aws-sdk/region-config-resolver": "3.873.0", + "@aws-sdk/types": "3.862.0", + "@aws-sdk/util-endpoints": "3.879.0", + "@aws-sdk/util-user-agent-browser": "3.873.0", + "@aws-sdk/util-user-agent-node": "3.879.0", + "@smithy/config-resolver": "^4.1.5", + "@smithy/core": "^3.9.0", + "@smithy/fetch-http-handler": "^5.1.1", + "@smithy/hash-node": "^4.0.5", + "@smithy/invalid-dependency": "^4.0.5", + "@smithy/middleware-content-length": "^4.0.5", + "@smithy/middleware-endpoint": "^4.1.19", + "@smithy/middleware-retry": "^4.1.20", + "@smithy/middleware-serde": "^4.0.9", + "@smithy/middleware-stack": "^4.0.5", + "@smithy/node-config-provider": "^4.1.4", + "@smithy/node-http-handler": "^4.1.1", + "@smithy/protocol-http": "^5.1.3", + "@smithy/smithy-client": "^4.5.0", + "@smithy/types": "^4.3.2", + "@smithy/url-parser": "^4.0.5", "@smithy/util-base64": "^4.0.0", "@smithy/util-body-length-browser": "^4.0.0", "@smithy/util-body-length-node": "^4.0.0", - "@smithy/util-defaults-mode-browser": "^4.0.8", - "@smithy/util-defaults-mode-node": "^4.0.8", - "@smithy/util-endpoints": "^3.0.2", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-retry": "^4.0.2", - "@smithy/util-stream": "^4.2.0", + "@smithy/util-defaults-mode-browser": "^4.0.27", + "@smithy/util-defaults-mode-node": "^4.0.27", + "@smithy/util-endpoints": "^3.0.7", + "@smithy/util-middleware": "^4.0.5", + "@smithy/util-retry": "^4.0.7", "@smithy/util-utf8": "^4.0.0", - "tslib": "^2.6.2" + "@smithy/util-waiter": "^4.0.7", + "@types/uuid": "^9.0.1", + "tslib": "^2.6.2", + "uuid": "^9.0.1" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-api-gateway/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/client-sso": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-sso/-/client-sso-3.879.0.tgz", + "integrity": "sha512-+Pc3OYFpRYpKLKRreovPM63FPPud1/SF9vemwIJfz6KwsBCJdvg7vYD1xLSIp5DVZLeetgf4reCyAA5ImBfZuw==", "license": "Apache-2.0", "dependencies": { + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/core": "3.879.0", + "@aws-sdk/middleware-host-header": "3.873.0", + "@aws-sdk/middleware-logger": "3.876.0", + "@aws-sdk/middleware-recursion-detection": "3.873.0", + "@aws-sdk/middleware-user-agent": "3.879.0", + "@aws-sdk/region-config-resolver": "3.873.0", + "@aws-sdk/types": "3.862.0", + "@aws-sdk/util-endpoints": "3.879.0", + "@aws-sdk/util-user-agent-browser": "3.873.0", + "@aws-sdk/util-user-agent-node": "3.879.0", + "@smithy/config-resolver": "^4.1.5", + "@smithy/core": "^3.9.0", + "@smithy/fetch-http-handler": "^5.1.1", + "@smithy/hash-node": "^4.0.5", + "@smithy/invalid-dependency": "^4.0.5", + "@smithy/middleware-content-length": "^4.0.5", + "@smithy/middleware-endpoint": "^4.1.19", + "@smithy/middleware-retry": "^4.1.20", + "@smithy/middleware-serde": "^4.0.9", + "@smithy/middleware-stack": "^4.0.5", + "@smithy/node-config-provider": "^4.1.4", + "@smithy/node-http-handler": "^4.1.1", + "@smithy/protocol-http": "^5.1.3", + "@smithy/smithy-client": "^4.5.0", + "@smithy/types": "^4.3.2", + "@smithy/url-parser": "^4.0.5", + "@smithy/util-base64": "^4.0.0", + "@smithy/util-body-length-browser": "^4.0.0", + "@smithy/util-body-length-node": "^4.0.0", + "@smithy/util-defaults-mode-browser": "^4.0.27", + "@smithy/util-defaults-mode-node": "^4.0.27", + "@smithy/util-endpoints": "^3.0.7", + "@smithy/util-middleware": "^4.0.5", + "@smithy/util-retry": "^4.0.7", + "@smithy/util-utf8": "^4.0.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-cloudformation": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-cloudformation/-/client-cloudformation-3.775.0.tgz", - "integrity": "sha512-Vs3T8ooDh0zujfLFMBSSX/2unuwrjcwin0UKfZO4Eda5s1DT1Hhx4pGRDH5ob9gTAWtv76a4Rsdf7QddDjc75A==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/core": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/core/-/core-3.879.0.tgz", + "integrity": "sha512-AhNmLCrx980LsK+SfPXGh7YqTyZxsK0Qmy18mWmkfY0TSq7WLaSDB5zdQbgbnQCACCHy8DUYXbi4KsjlIhv3PA==", "license": "Apache-2.0", "dependencies": { - "@aws-crypto/sha256-browser": "5.2.0", - "@aws-crypto/sha256-js": "5.2.0", - "@aws-sdk/core": "3.775.0", - "@aws-sdk/credential-provider-node": "3.775.0", - "@aws-sdk/middleware-host-header": "3.775.0", - "@aws-sdk/middleware-logger": "3.775.0", - "@aws-sdk/middleware-recursion-detection": "3.775.0", - "@aws-sdk/middleware-user-agent": "3.775.0", - "@aws-sdk/region-config-resolver": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@aws-sdk/util-endpoints": "3.775.0", - "@aws-sdk/util-user-agent-browser": "3.775.0", - "@aws-sdk/util-user-agent-node": "3.775.0", - "@smithy/config-resolver": "^4.1.0", - "@smithy/core": "^3.2.0", - "@smithy/fetch-http-handler": "^5.0.2", - "@smithy/hash-node": "^4.0.2", - "@smithy/invalid-dependency": "^4.0.2", - "@smithy/middleware-content-length": "^4.0.2", - "@smithy/middleware-endpoint": "^4.1.0", - "@smithy/middleware-retry": "^4.1.0", - "@smithy/middleware-serde": "^4.0.3", - "@smithy/middleware-stack": "^4.0.2", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/node-http-handler": "^4.0.4", - "@smithy/protocol-http": "^5.1.0", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", - "@smithy/url-parser": "^4.0.2", + "@aws-sdk/types": "3.862.0", + "@aws-sdk/xml-builder": "3.873.0", + "@smithy/core": "^3.9.0", + "@smithy/node-config-provider": "^4.1.4", + "@smithy/property-provider": "^4.0.5", + "@smithy/protocol-http": "^5.1.3", + "@smithy/signature-v4": "^5.1.3", + "@smithy/smithy-client": "^4.5.0", + "@smithy/types": "^4.3.2", "@smithy/util-base64": "^4.0.0", "@smithy/util-body-length-browser": "^4.0.0", - "@smithy/util-body-length-node": "^4.0.0", - "@smithy/util-defaults-mode-browser": "^4.0.8", - "@smithy/util-defaults-mode-node": "^4.0.8", - "@smithy/util-endpoints": "^3.0.2", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-retry": "^4.0.2", + "@smithy/util-middleware": "^4.0.5", "@smithy/util-utf8": "^4.0.0", - "@smithy/util-waiter": "^4.0.3", - "@types/uuid": "^9.0.1", - "tslib": "^2.6.2", - "uuid": "^9.0.1" + "fast-xml-parser": "5.2.5", + "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-cloudformation/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/credential-provider-env": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-env/-/credential-provider-env-3.879.0.tgz", + "integrity": "sha512-JgG7A8SSbr5IiCYL8kk39Y9chdSB5GPwBorDW8V8mr19G9L+qd6ohED4fAocoNFaDnYJ5wGAHhCfSJjzcsPBVQ==", "license": "Apache-2.0", "dependencies": { + "@aws-sdk/core": "3.879.0", + "@aws-sdk/types": "3.862.0", + "@smithy/property-provider": "^4.0.5", + "@smithy/types": "^4.3.2", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-cognito-identity-provider": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-cognito-identity-provider/-/client-cognito-identity-provider-3.775.0.tgz", - "integrity": "sha512-hGH8F84SChSW6G6YTRwaewiOKyFWYj4CbAIK8WH4Z1Veg67csIQ8K6rlYuhn+nEMd6F+cMdUtn0EGeI7VDdNWg==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/credential-provider-http": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-http/-/credential-provider-http-3.879.0.tgz", + "integrity": "sha512-2hM5ByLpyK+qORUexjtYyDZsgxVCCUiJQZRMGkNXFEGz6zTpbjfTIWoh3zRgWHEBiqyPIyfEy50eIF69WshcuA==", "license": "Apache-2.0", "dependencies": { - "@aws-crypto/sha256-browser": "5.2.0", - "@aws-crypto/sha256-js": "5.2.0", - "@aws-sdk/core": "3.775.0", - "@aws-sdk/credential-provider-node": "3.775.0", - "@aws-sdk/middleware-host-header": "3.775.0", - "@aws-sdk/middleware-logger": "3.775.0", - "@aws-sdk/middleware-recursion-detection": "3.775.0", - "@aws-sdk/middleware-user-agent": "3.775.0", - "@aws-sdk/region-config-resolver": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@aws-sdk/util-endpoints": "3.775.0", - "@aws-sdk/util-user-agent-browser": "3.775.0", - "@aws-sdk/util-user-agent-node": "3.775.0", - "@smithy/config-resolver": "^4.1.0", - "@smithy/core": "^3.2.0", - "@smithy/fetch-http-handler": "^5.0.2", - "@smithy/hash-node": "^4.0.2", - "@smithy/invalid-dependency": "^4.0.2", - "@smithy/middleware-content-length": "^4.0.2", - "@smithy/middleware-endpoint": "^4.1.0", - "@smithy/middleware-retry": "^4.1.0", - "@smithy/middleware-serde": "^4.0.3", - "@smithy/middleware-stack": "^4.0.2", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/node-http-handler": "^4.0.4", - "@smithy/protocol-http": "^5.1.0", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", - "@smithy/url-parser": "^4.0.2", - "@smithy/util-base64": "^4.0.0", - "@smithy/util-body-length-browser": "^4.0.0", - "@smithy/util-body-length-node": "^4.0.0", - "@smithy/util-defaults-mode-browser": "^4.0.8", - "@smithy/util-defaults-mode-node": "^4.0.8", - "@smithy/util-endpoints": "^3.0.2", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-retry": "^4.0.2", - "@smithy/util-utf8": "^4.0.0", + "@aws-sdk/core": "3.879.0", + "@aws-sdk/types": "3.862.0", + "@smithy/fetch-http-handler": "^5.1.1", + "@smithy/node-http-handler": "^4.1.1", + "@smithy/property-provider": "^4.0.5", + "@smithy/protocol-http": "^5.1.3", + "@smithy/smithy-client": "^4.5.0", + "@smithy/types": "^4.3.2", + "@smithy/util-stream": "^4.2.4", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-cognito-identity-provider/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/credential-provider-ini": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-ini/-/credential-provider-ini-3.879.0.tgz", + "integrity": "sha512-07M8zfb73KmMBqVO5/V3Ea9kqDspMX0fO0kaI1bsjWI6ngnMye8jCE0/sIhmkVAI0aU709VA0g+Bzlopnw9EoQ==", "license": "Apache-2.0", "dependencies": { + "@aws-sdk/core": "3.879.0", + "@aws-sdk/credential-provider-env": "3.879.0", + "@aws-sdk/credential-provider-http": "3.879.0", + "@aws-sdk/credential-provider-process": "3.879.0", + "@aws-sdk/credential-provider-sso": "3.879.0", + "@aws-sdk/credential-provider-web-identity": "3.879.0", + "@aws-sdk/nested-clients": "3.879.0", + "@aws-sdk/types": "3.862.0", + "@smithy/credential-provider-imds": "^4.0.7", + "@smithy/property-provider": "^4.0.5", + "@smithy/shared-ini-file-loader": "^4.0.5", + "@smithy/types": "^4.3.2", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-dynamodb": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-dynamodb/-/client-dynamodb-3.775.0.tgz", - "integrity": "sha512-OoZKPUIyGVnEMkA6GuEu2vB6ZS5Q4GyJZLhELv8+srwB0OVQ2i2jchExvUySQyvxAkGXbeKGGRKAw3IpVGfKcA==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/credential-provider-node": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-node/-/credential-provider-node-3.879.0.tgz", + "integrity": "sha512-FYaAqJbnSTrVL2iZkNDj2hj5087yMv2RN2GA8DJhe7iOJjzhzRojrtlfpWeJg6IhK0sBKDH+YXbdeexCzUJvtA==", "license": "Apache-2.0", "dependencies": { - "@aws-crypto/sha256-browser": "5.2.0", - "@aws-crypto/sha256-js": "5.2.0", - "@aws-sdk/core": "3.775.0", - "@aws-sdk/credential-provider-node": "3.775.0", - "@aws-sdk/middleware-endpoint-discovery": "3.775.0", - "@aws-sdk/middleware-host-header": "3.775.0", - "@aws-sdk/middleware-logger": "3.775.0", - "@aws-sdk/middleware-recursion-detection": "3.775.0", - "@aws-sdk/middleware-user-agent": "3.775.0", - "@aws-sdk/region-config-resolver": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@aws-sdk/util-endpoints": "3.775.0", - "@aws-sdk/util-user-agent-browser": "3.775.0", - "@aws-sdk/util-user-agent-node": "3.775.0", - "@smithy/config-resolver": "^4.1.0", - "@smithy/core": "^3.2.0", - "@smithy/fetch-http-handler": "^5.0.2", - "@smithy/hash-node": "^4.0.2", - "@smithy/invalid-dependency": "^4.0.2", - "@smithy/middleware-content-length": "^4.0.2", - "@smithy/middleware-endpoint": "^4.1.0", - "@smithy/middleware-retry": "^4.1.0", - "@smithy/middleware-serde": "^4.0.3", - "@smithy/middleware-stack": "^4.0.2", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/node-http-handler": "^4.0.4", - "@smithy/protocol-http": "^5.1.0", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", - "@smithy/url-parser": "^4.0.2", - "@smithy/util-base64": "^4.0.0", - "@smithy/util-body-length-browser": "^4.0.0", - "@smithy/util-body-length-node": "^4.0.0", - "@smithy/util-defaults-mode-browser": "^4.0.8", - "@smithy/util-defaults-mode-node": "^4.0.8", - "@smithy/util-endpoints": "^3.0.2", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-retry": "^4.0.2", - "@smithy/util-utf8": "^4.0.0", - "@smithy/util-waiter": "^4.0.3", - "@types/uuid": "^9.0.1", - "tslib": "^2.6.2", - "uuid": "^9.0.1" + "@aws-sdk/credential-provider-env": "3.879.0", + "@aws-sdk/credential-provider-http": "3.879.0", + "@aws-sdk/credential-provider-ini": "3.879.0", + "@aws-sdk/credential-provider-process": "3.879.0", + "@aws-sdk/credential-provider-sso": "3.879.0", + "@aws-sdk/credential-provider-web-identity": "3.879.0", + "@aws-sdk/types": "3.862.0", + "@smithy/credential-provider-imds": "^4.0.7", + "@smithy/property-provider": "^4.0.5", + "@smithy/shared-ini-file-loader": "^4.0.5", + "@smithy/types": "^4.3.2", + "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-dynamodb/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/credential-provider-process": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-process/-/credential-provider-process-3.879.0.tgz", + "integrity": "sha512-7r360x1VyEt35Sm1JFOzww2WpnfJNBbvvnzoyLt7WRfK0S/AfsuWhu5ltJ80QvJ0R3AiSNbG+q/btG2IHhDYPQ==", "license": "Apache-2.0", "dependencies": { + "@aws-sdk/core": "3.879.0", + "@aws-sdk/types": "3.862.0", + "@smithy/property-provider": "^4.0.5", + "@smithy/shared-ini-file-loader": "^4.0.5", + "@smithy/types": "^4.3.2", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-kendra": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-kendra/-/client-kendra-3.775.0.tgz", - "integrity": "sha512-xLQDJXmZJVEaeJarbMYJQsyon7HU542J/Cisl0uTwSajgt/BlLZ6QpN2tfCNxNGPWFM2kciFx37BIIA1oKGXXA==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/credential-provider-sso": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-sso/-/credential-provider-sso-3.879.0.tgz", + "integrity": "sha512-gd27B0NsgtKlaPNARj4IX7F7US5NuU691rGm0EUSkDsM7TctvJULighKoHzPxDQlrDbVI11PW4WtKS/Zg5zPlQ==", "license": "Apache-2.0", "dependencies": { - "@aws-crypto/sha256-browser": "5.2.0", - "@aws-crypto/sha256-js": "5.2.0", - "@aws-sdk/core": "3.775.0", - "@aws-sdk/credential-provider-node": "3.775.0", - "@aws-sdk/middleware-host-header": "3.775.0", - "@aws-sdk/middleware-logger": "3.775.0", - "@aws-sdk/middleware-recursion-detection": "3.775.0", - "@aws-sdk/middleware-user-agent": "3.775.0", - "@aws-sdk/region-config-resolver": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@aws-sdk/util-endpoints": "3.775.0", - "@aws-sdk/util-user-agent-browser": "3.775.0", - "@aws-sdk/util-user-agent-node": "3.775.0", - "@smithy/config-resolver": "^4.1.0", - "@smithy/core": "^3.2.0", - "@smithy/fetch-http-handler": "^5.0.2", - "@smithy/hash-node": "^4.0.2", - "@smithy/invalid-dependency": "^4.0.2", - "@smithy/middleware-content-length": "^4.0.2", - "@smithy/middleware-endpoint": "^4.1.0", - "@smithy/middleware-retry": "^4.1.0", - "@smithy/middleware-serde": "^4.0.3", - "@smithy/middleware-stack": "^4.0.2", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/node-http-handler": "^4.0.4", - "@smithy/protocol-http": "^5.1.0", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", - "@smithy/url-parser": "^4.0.2", - "@smithy/util-base64": "^4.0.0", - "@smithy/util-body-length-browser": "^4.0.0", - "@smithy/util-body-length-node": "^4.0.0", - "@smithy/util-defaults-mode-browser": "^4.0.8", - "@smithy/util-defaults-mode-node": "^4.0.8", - "@smithy/util-endpoints": "^3.0.2", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-retry": "^4.0.2", - "@smithy/util-utf8": "^4.0.0", - "@types/uuid": "^9.0.1", - "tslib": "^2.6.2", - "uuid": "^9.0.1" + "@aws-sdk/client-sso": "3.879.0", + "@aws-sdk/core": "3.879.0", + "@aws-sdk/token-providers": "3.879.0", + "@aws-sdk/types": "3.862.0", + "@smithy/property-provider": "^4.0.5", + "@smithy/shared-ini-file-loader": "^4.0.5", + "@smithy/types": "^4.3.2", + "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-kendra/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/credential-provider-web-identity": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-web-identity/-/credential-provider-web-identity-3.879.0.tgz", + "integrity": "sha512-Jy4uPFfGzHk1Mxy+/Wr43vuw9yXsE2yiF4e4598vc3aJfO0YtA2nSfbKD3PNKRORwXbeKqWPfph9SCKQpWoxEg==", "license": "Apache-2.0", "dependencies": { + "@aws-sdk/core": "3.879.0", + "@aws-sdk/nested-clients": "3.879.0", + "@aws-sdk/types": "3.862.0", + "@smithy/property-provider": "^4.0.5", + "@smithy/types": "^4.3.2", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-s3": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-s3/-/client-s3-3.775.0.tgz", - "integrity": "sha512-Z/BeVmYc3nj4FNE46MtvBYeCVvBZwlujMEvr5UOChP14899QWkBfOvf74RwQY9qy5/DvhVFkHlA8en1L6+0NrA==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/middleware-host-header": { + "version": "3.873.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-host-header/-/middleware-host-header-3.873.0.tgz", + "integrity": "sha512-KZ/W1uruWtMOs7D5j3KquOxzCnV79KQW9MjJFZM/M0l6KI8J6V3718MXxFHsTjUE4fpdV6SeCNLV1lwGygsjJA==", "license": "Apache-2.0", "dependencies": { - "@aws-crypto/sha1-browser": "5.2.0", - "@aws-crypto/sha256-browser": "5.2.0", - "@aws-crypto/sha256-js": "5.2.0", - "@aws-sdk/core": "3.775.0", - "@aws-sdk/credential-provider-node": "3.775.0", - "@aws-sdk/middleware-bucket-endpoint": "3.775.0", - "@aws-sdk/middleware-expect-continue": "3.775.0", - "@aws-sdk/middleware-flexible-checksums": "3.775.0", - "@aws-sdk/middleware-host-header": "3.775.0", - "@aws-sdk/middleware-location-constraint": "3.775.0", - "@aws-sdk/middleware-logger": "3.775.0", - "@aws-sdk/middleware-recursion-detection": "3.775.0", - "@aws-sdk/middleware-sdk-s3": "3.775.0", - "@aws-sdk/middleware-ssec": "3.775.0", - "@aws-sdk/middleware-user-agent": "3.775.0", - "@aws-sdk/region-config-resolver": "3.775.0", - "@aws-sdk/signature-v4-multi-region": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@aws-sdk/util-endpoints": "3.775.0", - "@aws-sdk/util-user-agent-browser": "3.775.0", - "@aws-sdk/util-user-agent-node": "3.775.0", - "@aws-sdk/xml-builder": "3.775.0", - "@smithy/config-resolver": "^4.1.0", - "@smithy/core": "^3.2.0", - "@smithy/eventstream-serde-browser": "^4.0.2", - "@smithy/eventstream-serde-config-resolver": "^4.1.0", - "@smithy/eventstream-serde-node": "^4.0.2", - "@smithy/fetch-http-handler": "^5.0.2", - "@smithy/hash-blob-browser": "^4.0.2", - "@smithy/hash-node": "^4.0.2", - "@smithy/hash-stream-node": "^4.0.2", - "@smithy/invalid-dependency": "^4.0.2", - "@smithy/md5-js": "^4.0.2", - "@smithy/middleware-content-length": "^4.0.2", - "@smithy/middleware-endpoint": "^4.1.0", - "@smithy/middleware-retry": "^4.1.0", - "@smithy/middleware-serde": "^4.0.3", - "@smithy/middleware-stack": "^4.0.2", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/node-http-handler": "^4.0.4", - "@smithy/protocol-http": "^5.1.0", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", - "@smithy/url-parser": "^4.0.2", - "@smithy/util-base64": "^4.0.0", - "@smithy/util-body-length-browser": "^4.0.0", - "@smithy/util-body-length-node": "^4.0.0", - "@smithy/util-defaults-mode-browser": "^4.0.8", - "@smithy/util-defaults-mode-node": "^4.0.8", - "@smithy/util-endpoints": "^3.0.2", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-retry": "^4.0.2", - "@smithy/util-stream": "^4.2.0", - "@smithy/util-utf8": "^4.0.0", - "@smithy/util-waiter": "^4.0.3", + "@aws-sdk/types": "3.862.0", + "@smithy/protocol-http": "^5.1.3", + "@smithy/types": "^4.3.2", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-s3/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/middleware-logger": { + "version": "3.876.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-logger/-/middleware-logger-3.876.0.tgz", + "integrity": "sha512-cpWJhOuMSyz9oV25Z/CMHCBTgafDCbv7fHR80nlRrPdPZ8ETNsahwRgltXP1QJJ8r3X/c1kwpOR7tc+RabVzNA==", "license": "Apache-2.0", "dependencies": { + "@aws-sdk/types": "3.862.0", + "@smithy/types": "^4.3.2", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-secrets-manager": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-secrets-manager/-/client-secrets-manager-3.775.0.tgz", - "integrity": "sha512-/ne/Mz+rKJvGkYbyy5ouQhNq/yPERz5hMfwKVJM1mAPtmDbExel2Y8IZNQ5j18Q64RMkWo6Rzj4ET2mhA5UKPw==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/middleware-recursion-detection": { + "version": "3.873.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-recursion-detection/-/middleware-recursion-detection-3.873.0.tgz", + "integrity": "sha512-OtgY8EXOzRdEWR//WfPkA/fXl0+WwE8hq0y9iw2caNyKPtca85dzrrZWnPqyBK/cpImosrpR1iKMYr41XshsCg==", "license": "Apache-2.0", "dependencies": { - "@aws-crypto/sha256-browser": "5.2.0", - "@aws-crypto/sha256-js": "5.2.0", - "@aws-sdk/core": "3.775.0", - "@aws-sdk/credential-provider-node": "3.775.0", - "@aws-sdk/middleware-host-header": "3.775.0", - "@aws-sdk/middleware-logger": "3.775.0", - "@aws-sdk/middleware-recursion-detection": "3.775.0", - "@aws-sdk/middleware-user-agent": "3.775.0", - "@aws-sdk/region-config-resolver": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@aws-sdk/util-endpoints": "3.775.0", - "@aws-sdk/util-user-agent-browser": "3.775.0", - "@aws-sdk/util-user-agent-node": "3.775.0", - "@smithy/config-resolver": "^4.1.0", - "@smithy/core": "^3.2.0", - "@smithy/fetch-http-handler": "^5.0.2", - "@smithy/hash-node": "^4.0.2", - "@smithy/invalid-dependency": "^4.0.2", - "@smithy/middleware-content-length": "^4.0.2", - "@smithy/middleware-endpoint": "^4.1.0", - "@smithy/middleware-retry": "^4.1.0", - "@smithy/middleware-serde": "^4.0.3", - "@smithy/middleware-stack": "^4.0.2", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/node-http-handler": "^4.0.4", - "@smithy/protocol-http": "^5.1.0", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", - "@smithy/url-parser": "^4.0.2", - "@smithy/util-base64": "^4.0.0", - "@smithy/util-body-length-browser": "^4.0.0", - "@smithy/util-body-length-node": "^4.0.0", - "@smithy/util-defaults-mode-browser": "^4.0.8", - "@smithy/util-defaults-mode-node": "^4.0.8", - "@smithy/util-endpoints": "^3.0.2", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-retry": "^4.0.2", - "@smithy/util-utf8": "^4.0.0", - "@types/uuid": "^9.0.1", - "tslib": "^2.6.2", - "uuid": "^9.0.1" + "@aws-sdk/types": "3.862.0", + "@smithy/protocol-http": "^5.1.3", + "@smithy/types": "^4.3.2", + "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-secrets-manager/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/middleware-user-agent": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-user-agent/-/middleware-user-agent-3.879.0.tgz", + "integrity": "sha512-DDSV8228lQxeMAFKnigkd0fHzzn5aauZMYC3CSj6e5/qE7+9OwpkUcjHfb7HZ9KWG6L2/70aKZXHqiJ4xKhOZw==", "license": "Apache-2.0", "dependencies": { + "@aws-sdk/core": "3.879.0", + "@aws-sdk/types": "3.862.0", + "@aws-sdk/util-endpoints": "3.879.0", + "@smithy/core": "^3.9.0", + "@smithy/protocol-http": "^5.1.3", + "@smithy/types": "^4.3.2", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-ssm": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-ssm/-/client-ssm-3.775.0.tgz", - "integrity": "sha512-F2bLhFFAU4Ls7YKFBSyNNKFeSIPwBO02eVYjgLFxrgaY9Kk6Tlx6DnwOE+Z2m3Q2tqavy8AQpRzyO+PI8cay2A==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/nested-clients": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/nested-clients/-/nested-clients-3.879.0.tgz", + "integrity": "sha512-7+n9NpIz9QtKYnxmw1fHi9C8o0GrX8LbBR4D50c7bH6Iq5+XdSuL5AFOWWQ5cMD0JhqYYJhK/fJsVau3nUtC4g==", "license": "Apache-2.0", "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", - "@aws-sdk/core": "3.775.0", - "@aws-sdk/credential-provider-node": "3.775.0", - "@aws-sdk/middleware-host-header": "3.775.0", - "@aws-sdk/middleware-logger": "3.775.0", - "@aws-sdk/middleware-recursion-detection": "3.775.0", - "@aws-sdk/middleware-user-agent": "3.775.0", - "@aws-sdk/region-config-resolver": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@aws-sdk/util-endpoints": "3.775.0", - "@aws-sdk/util-user-agent-browser": "3.775.0", - "@aws-sdk/util-user-agent-node": "3.775.0", - "@smithy/config-resolver": "^4.1.0", - "@smithy/core": "^3.2.0", - "@smithy/fetch-http-handler": "^5.0.2", - "@smithy/hash-node": "^4.0.2", - "@smithy/invalid-dependency": "^4.0.2", - "@smithy/middleware-content-length": "^4.0.2", - "@smithy/middleware-endpoint": "^4.1.0", - "@smithy/middleware-retry": "^4.1.0", - "@smithy/middleware-serde": "^4.0.3", - "@smithy/middleware-stack": "^4.0.2", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/node-http-handler": "^4.0.4", - "@smithy/protocol-http": "^5.1.0", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", - "@smithy/url-parser": "^4.0.2", + "@aws-sdk/core": "3.879.0", + "@aws-sdk/middleware-host-header": "3.873.0", + "@aws-sdk/middleware-logger": "3.876.0", + "@aws-sdk/middleware-recursion-detection": "3.873.0", + "@aws-sdk/middleware-user-agent": "3.879.0", + "@aws-sdk/region-config-resolver": "3.873.0", + "@aws-sdk/types": "3.862.0", + "@aws-sdk/util-endpoints": "3.879.0", + "@aws-sdk/util-user-agent-browser": "3.873.0", + "@aws-sdk/util-user-agent-node": "3.879.0", + "@smithy/config-resolver": "^4.1.5", + "@smithy/core": "^3.9.0", + "@smithy/fetch-http-handler": "^5.1.1", + "@smithy/hash-node": "^4.0.5", + "@smithy/invalid-dependency": "^4.0.5", + "@smithy/middleware-content-length": "^4.0.5", + "@smithy/middleware-endpoint": "^4.1.19", + "@smithy/middleware-retry": "^4.1.20", + "@smithy/middleware-serde": "^4.0.9", + "@smithy/middleware-stack": "^4.0.5", + "@smithy/node-config-provider": "^4.1.4", + "@smithy/node-http-handler": "^4.1.1", + "@smithy/protocol-http": "^5.1.3", + "@smithy/smithy-client": "^4.5.0", + "@smithy/types": "^4.3.2", + "@smithy/url-parser": "^4.0.5", "@smithy/util-base64": "^4.0.0", "@smithy/util-body-length-browser": "^4.0.0", "@smithy/util-body-length-node": "^4.0.0", - "@smithy/util-defaults-mode-browser": "^4.0.8", - "@smithy/util-defaults-mode-node": "^4.0.8", - "@smithy/util-endpoints": "^3.0.2", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-retry": "^4.0.2", + "@smithy/util-defaults-mode-browser": "^4.0.27", + "@smithy/util-defaults-mode-node": "^4.0.27", + "@smithy/util-endpoints": "^3.0.7", + "@smithy/util-middleware": "^4.0.5", + "@smithy/util-retry": "^4.0.7", "@smithy/util-utf8": "^4.0.0", - "@smithy/util-waiter": "^4.0.3", - "@types/uuid": "^9.0.1", - "tslib": "^2.6.2", - "uuid": "^9.0.1" + "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-ssm/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/region-config-resolver": { + "version": "3.873.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/region-config-resolver/-/region-config-resolver-3.873.0.tgz", + "integrity": "sha512-q9sPoef+BBG6PJnc4x60vK/bfVwvRWsPgcoQyIra057S/QGjq5VkjvNk6H8xedf6vnKlXNBwq9BaANBXnldUJg==", "license": "Apache-2.0", "dependencies": { + "@aws-sdk/types": "3.862.0", + "@smithy/node-config-provider": "^4.1.4", + "@smithy/types": "^4.3.2", + "@smithy/util-config-provider": "^4.0.0", + "@smithy/util-middleware": "^4.0.5", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-sso": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-sso/-/client-sso-3.775.0.tgz", - "integrity": "sha512-vqG1S2ap77WP4D5qt4bEPE0duQ4myN+cDr1NeP8QpSTajetbkDGVo7h1VViYMcUoFUVWBj6Qf1X1VfOq+uaxbA==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/token-providers": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/token-providers/-/token-providers-3.879.0.tgz", + "integrity": "sha512-47J7sCwXdnw9plRZNAGVkNEOlSiLb/kR2slnDIHRK9NB/ECKsoqgz5OZQJ9E2f0yqOs8zSNJjn3T01KxpgW8Qw==", "license": "Apache-2.0", "dependencies": { - "@aws-crypto/sha256-browser": "5.2.0", - "@aws-crypto/sha256-js": "5.2.0", - "@aws-sdk/core": "3.775.0", - "@aws-sdk/middleware-host-header": "3.775.0", - "@aws-sdk/middleware-logger": "3.775.0", - "@aws-sdk/middleware-recursion-detection": "3.775.0", - "@aws-sdk/middleware-user-agent": "3.775.0", - "@aws-sdk/region-config-resolver": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@aws-sdk/util-endpoints": "3.775.0", - "@aws-sdk/util-user-agent-browser": "3.775.0", - "@aws-sdk/util-user-agent-node": "3.775.0", - "@smithy/config-resolver": "^4.1.0", - "@smithy/core": "^3.2.0", - "@smithy/fetch-http-handler": "^5.0.2", - "@smithy/hash-node": "^4.0.2", - "@smithy/invalid-dependency": "^4.0.2", - "@smithy/middleware-content-length": "^4.0.2", - "@smithy/middleware-endpoint": "^4.1.0", - "@smithy/middleware-retry": "^4.1.0", - "@smithy/middleware-serde": "^4.0.3", - "@smithy/middleware-stack": "^4.0.2", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/node-http-handler": "^4.0.4", - "@smithy/protocol-http": "^5.1.0", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", - "@smithy/url-parser": "^4.0.2", - "@smithy/util-base64": "^4.0.0", - "@smithy/util-body-length-browser": "^4.0.0", - "@smithy/util-body-length-node": "^4.0.0", - "@smithy/util-defaults-mode-browser": "^4.0.8", - "@smithy/util-defaults-mode-node": "^4.0.8", - "@smithy/util-endpoints": "^3.0.2", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-retry": "^4.0.2", - "@smithy/util-utf8": "^4.0.0", + "@aws-sdk/core": "3.879.0", + "@aws-sdk/nested-clients": "3.879.0", + "@aws-sdk/types": "3.862.0", + "@smithy/property-provider": "^4.0.5", + "@smithy/shared-ini-file-loader": "^4.0.5", + "@smithy/types": "^4.3.2", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/client-sso/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/types": { + "version": "3.862.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/types/-/types-3.862.0.tgz", + "integrity": "sha512-Bei+RL0cDxxV+lW2UezLbCYYNeJm6Nzee0TpW0FfyTRBhH9C1XQh4+x+IClriXvgBnRquTMMYsmJfvx8iyLKrg==", "license": "Apache-2.0", "dependencies": { + "@smithy/types": "^4.3.2", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/core": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/core/-/core-3.775.0.tgz", - "integrity": "sha512-8vpW4WihVfz0DX+7WnnLGm3GuQER++b0IwQG35JlQMlgqnc44M//KbJPsIHA0aJUJVwJAEShgfr5dUbY8WUzaA==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/util-endpoints": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-endpoints/-/util-endpoints-3.879.0.tgz", + "integrity": "sha512-aVAJwGecYoEmbEFju3127TyJDF9qJsKDUUTRMDuS8tGn+QiWQFnfInmbt+el9GU1gEJupNTXV+E3e74y51fb7A==", "license": "Apache-2.0", "dependencies": { - "@aws-sdk/types": "3.775.0", - "@smithy/core": "^3.2.0", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/property-provider": "^4.0.2", - "@smithy/protocol-http": "^5.1.0", - "@smithy/signature-v4": "^5.0.2", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", - "@smithy/util-middleware": "^4.0.2", - "fast-xml-parser": "4.4.1", + "@aws-sdk/types": "3.862.0", + "@smithy/types": "^4.3.2", + "@smithy/url-parser": "^4.0.5", + "@smithy/util-endpoints": "^3.0.7", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/core/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/util-user-agent-browser": { + "version": "3.873.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-user-agent-browser/-/util-user-agent-browser-3.873.0.tgz", + "integrity": "sha512-AcRdbK6o19yehEcywI43blIBhOCSo6UgyWcuOJX5CFF8k39xm1ILCjQlRRjchLAxWrm0lU0Q7XV90RiMMFMZtA==", "license": "Apache-2.0", "dependencies": { + "@aws-sdk/types": "3.862.0", + "@smithy/types": "^4.3.2", + "bowser": "^2.11.0", "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/credential-provider-env": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-env/-/credential-provider-env-3.775.0.tgz", - "integrity": "sha512-6ESVxwCbGm7WZ17kY1fjmxQud43vzJFoLd4bmlR+idQSWdqlzGDYdcfzpjDKTcivdtNrVYmFvcH1JBUwCRAZhw==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/util-user-agent-node": { + "version": "3.879.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-user-agent-node/-/util-user-agent-node-3.879.0.tgz", + "integrity": "sha512-A5KGc1S+CJRzYnuxJQQmH1BtGsz46AgyHkqReKfGiNQA8ET/9y9LQ5t2ABqnSBHHIh3+MiCcQSkUZ0S3rTodrQ==", "license": "Apache-2.0", "dependencies": { - "@aws-sdk/core": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@smithy/property-provider": "^4.0.2", - "@smithy/types": "^4.2.0", + "@aws-sdk/middleware-user-agent": "3.879.0", + "@aws-sdk/types": "3.862.0", + "@smithy/node-config-provider": "^4.1.4", + "@smithy/types": "^4.3.2", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" + }, + "peerDependencies": { + "aws-crt": ">=1.0.0" + }, + "peerDependenciesMeta": { + "aws-crt": { + "optional": true + } } }, - "node_modules/@aws-sdk/credential-provider-env/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", + "node_modules/@aws-sdk/client-bedrock-agentcore-control/node_modules/@aws-sdk/xml-builder": { + "version": "3.873.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/xml-builder/-/xml-builder-3.873.0.tgz", + "integrity": "sha512-kLO7k7cGJ6KaHiExSJWojZurF7SnGMDHXRuQunFnEoD0n1yB6Lqy/S/zHiQ7oJnBhPr9q0TW9qFkrsZb1Uc54w==", "license": "Apache-2.0", "dependencies": { + "@smithy/types": "^4.3.2", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/credential-provider-http": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-http/-/credential-provider-http-3.775.0.tgz", - "integrity": "sha512-PjDQeDH/J1S0yWV32wCj2k5liRo0ssXMseCBEkCsD3SqsU8o5cU82b0hMX4sAib/RkglCSZqGO0xMiN0/7ndww==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/client-cloudformation": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-cloudformation/-/client-cloudformation-3.887.0.tgz", + "integrity": "sha512-EKaczPYFs9A1C/3g17Y76ekn79ItEYS/DyTHW2rwmkBB62Bu3kUTEVD1zzm56HySC0ENlXzjnFhjrBA0247YsA==", "dependencies": { - "@aws-sdk/core": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@smithy/fetch-http-handler": "^5.0.2", - "@smithy/node-http-handler": "^4.0.4", - "@smithy/property-provider": "^4.0.2", - "@smithy/protocol-http": "^5.1.0", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", - "@smithy/util-stream": "^4.2.0", - "tslib": "^2.6.2" + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/credential-provider-node": "3.887.0", + "@aws-sdk/middleware-host-header": "3.887.0", + "@aws-sdk/middleware-logger": "3.887.0", + "@aws-sdk/middleware-recursion-detection": "3.887.0", + "@aws-sdk/middleware-user-agent": "3.887.0", + "@aws-sdk/region-config-resolver": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@aws-sdk/util-endpoints": "3.887.0", + "@aws-sdk/util-user-agent-browser": "3.887.0", + "@aws-sdk/util-user-agent-node": "3.887.0", + "@smithy/config-resolver": "^4.2.1", + "@smithy/core": "^3.11.0", + "@smithy/fetch-http-handler": "^5.2.1", + "@smithy/hash-node": "^4.1.1", + "@smithy/invalid-dependency": "^4.1.1", + "@smithy/middleware-content-length": "^4.1.1", + "@smithy/middleware-endpoint": "^4.2.1", + "@smithy/middleware-retry": "^4.2.1", + "@smithy/middleware-serde": "^4.1.1", + "@smithy/middleware-stack": "^4.1.1", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/node-http-handler": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "@smithy/url-parser": "^4.1.1", + "@smithy/util-base64": "^4.1.0", + "@smithy/util-body-length-browser": "^4.1.0", + "@smithy/util-body-length-node": "^4.1.0", + "@smithy/util-defaults-mode-browser": "^4.1.1", + "@smithy/util-defaults-mode-node": "^4.1.1", + "@smithy/util-endpoints": "^3.1.1", + "@smithy/util-middleware": "^4.1.1", + "@smithy/util-retry": "^4.1.1", + "@smithy/util-utf8": "^4.1.0", + "@smithy/util-waiter": "^4.1.1", + "@types/uuid": "^9.0.1", + "tslib": "^2.6.2", + "uuid": "^9.0.1" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/credential-provider-http/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/client-cognito-identity-provider": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-cognito-identity-provider/-/client-cognito-identity-provider-3.887.0.tgz", + "integrity": "sha512-/kfAIb0e9uqSbyNS6T0qgNPo9qdXsTL6M7yOlSJHTObC7McX+nyBtQnFqiROK6vRFrdosgqCr6Jik4BIsH8v/g==", "dependencies": { + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/credential-provider-node": "3.887.0", + "@aws-sdk/middleware-host-header": "3.887.0", + "@aws-sdk/middleware-logger": "3.887.0", + "@aws-sdk/middleware-recursion-detection": "3.887.0", + "@aws-sdk/middleware-user-agent": "3.887.0", + "@aws-sdk/region-config-resolver": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@aws-sdk/util-endpoints": "3.887.0", + "@aws-sdk/util-user-agent-browser": "3.887.0", + "@aws-sdk/util-user-agent-node": "3.887.0", + "@smithy/config-resolver": "^4.2.1", + "@smithy/core": "^3.11.0", + "@smithy/fetch-http-handler": "^5.2.1", + "@smithy/hash-node": "^4.1.1", + "@smithy/invalid-dependency": "^4.1.1", + "@smithy/middleware-content-length": "^4.1.1", + "@smithy/middleware-endpoint": "^4.2.1", + "@smithy/middleware-retry": "^4.2.1", + "@smithy/middleware-serde": "^4.1.1", + "@smithy/middleware-stack": "^4.1.1", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/node-http-handler": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "@smithy/url-parser": "^4.1.1", + "@smithy/util-base64": "^4.1.0", + "@smithy/util-body-length-browser": "^4.1.0", + "@smithy/util-body-length-node": "^4.1.0", + "@smithy/util-defaults-mode-browser": "^4.1.1", + "@smithy/util-defaults-mode-node": "^4.1.1", + "@smithy/util-endpoints": "^3.1.1", + "@smithy/util-middleware": "^4.1.1", + "@smithy/util-retry": "^4.1.1", + "@smithy/util-utf8": "^4.1.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/credential-provider-ini": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-ini/-/credential-provider-ini-3.775.0.tgz", - "integrity": "sha512-0gJc6cALsgrjeC5U3qDjbz4myIC/j49+gPz9nkvY+C0OYWt1KH1tyfiZUuCRGfuFHhQ+3KMMDSL229TkBP3E7g==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/client-dynamodb": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-dynamodb/-/client-dynamodb-3.887.0.tgz", + "integrity": "sha512-fBFRuYRztEHCFIly1tBU2inklUcsJtal1a6wTancKiodKwSvsmLdebpT2aVkv4p3Yy4VkKIV0iBYFD15BOa0nQ==", "dependencies": { - "@aws-sdk/core": "3.775.0", - "@aws-sdk/credential-provider-env": "3.775.0", - "@aws-sdk/credential-provider-http": "3.775.0", - "@aws-sdk/credential-provider-process": "3.775.0", - "@aws-sdk/credential-provider-sso": "3.775.0", - "@aws-sdk/credential-provider-web-identity": "3.775.0", - "@aws-sdk/nested-clients": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@smithy/credential-provider-imds": "^4.0.2", - "@smithy/property-provider": "^4.0.2", - "@smithy/shared-ini-file-loader": "^4.0.2", - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/credential-provider-node": "3.887.0", + "@aws-sdk/middleware-endpoint-discovery": "3.887.0", + "@aws-sdk/middleware-host-header": "3.887.0", + "@aws-sdk/middleware-logger": "3.887.0", + "@aws-sdk/middleware-recursion-detection": "3.887.0", + "@aws-sdk/middleware-user-agent": "3.887.0", + "@aws-sdk/region-config-resolver": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@aws-sdk/util-endpoints": "3.887.0", + "@aws-sdk/util-user-agent-browser": "3.887.0", + "@aws-sdk/util-user-agent-node": "3.887.0", + "@smithy/config-resolver": "^4.2.1", + "@smithy/core": "^3.11.0", + "@smithy/fetch-http-handler": "^5.2.1", + "@smithy/hash-node": "^4.1.1", + "@smithy/invalid-dependency": "^4.1.1", + "@smithy/middleware-content-length": "^4.1.1", + "@smithy/middleware-endpoint": "^4.2.1", + "@smithy/middleware-retry": "^4.2.1", + "@smithy/middleware-serde": "^4.1.1", + "@smithy/middleware-stack": "^4.1.1", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/node-http-handler": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "@smithy/url-parser": "^4.1.1", + "@smithy/util-base64": "^4.1.0", + "@smithy/util-body-length-browser": "^4.1.0", + "@smithy/util-body-length-node": "^4.1.0", + "@smithy/util-defaults-mode-browser": "^4.1.1", + "@smithy/util-defaults-mode-node": "^4.1.1", + "@smithy/util-endpoints": "^3.1.1", + "@smithy/util-middleware": "^4.1.1", + "@smithy/util-retry": "^4.1.1", + "@smithy/util-utf8": "^4.1.0", + "@smithy/util-waiter": "^4.1.1", + "@types/uuid": "^9.0.1", + "tslib": "^2.6.2", + "uuid": "^9.0.1" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/credential-provider-ini/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/client-kendra": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-kendra/-/client-kendra-3.887.0.tgz", + "integrity": "sha512-omrJyVY/f+uZhqjQz/xcPI9PyiycKDuzAsLD7pPd11xRR6aODcs0N3KyzrfYhkKaGE6liN0+Y8t5bbTrLlIXhQ==", "dependencies": { - "tslib": "^2.6.2" + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/credential-provider-node": "3.887.0", + "@aws-sdk/middleware-host-header": "3.887.0", + "@aws-sdk/middleware-logger": "3.887.0", + "@aws-sdk/middleware-recursion-detection": "3.887.0", + "@aws-sdk/middleware-user-agent": "3.887.0", + "@aws-sdk/region-config-resolver": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@aws-sdk/util-endpoints": "3.887.0", + "@aws-sdk/util-user-agent-browser": "3.887.0", + "@aws-sdk/util-user-agent-node": "3.887.0", + "@smithy/config-resolver": "^4.2.1", + "@smithy/core": "^3.11.0", + "@smithy/fetch-http-handler": "^5.2.1", + "@smithy/hash-node": "^4.1.1", + "@smithy/invalid-dependency": "^4.1.1", + "@smithy/middleware-content-length": "^4.1.1", + "@smithy/middleware-endpoint": "^4.2.1", + "@smithy/middleware-retry": "^4.2.1", + "@smithy/middleware-serde": "^4.1.1", + "@smithy/middleware-stack": "^4.1.1", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/node-http-handler": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "@smithy/url-parser": "^4.1.1", + "@smithy/util-base64": "^4.1.0", + "@smithy/util-body-length-browser": "^4.1.0", + "@smithy/util-body-length-node": "^4.1.0", + "@smithy/util-defaults-mode-browser": "^4.1.1", + "@smithy/util-defaults-mode-node": "^4.1.1", + "@smithy/util-endpoints": "^3.1.1", + "@smithy/util-middleware": "^4.1.1", + "@smithy/util-retry": "^4.1.1", + "@smithy/util-utf8": "^4.1.0", + "@types/uuid": "^9.0.1", + "tslib": "^2.6.2", + "uuid": "^9.0.1" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/credential-provider-node": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-node/-/credential-provider-node-3.775.0.tgz", - "integrity": "sha512-D8Zre5W2sXC/ANPqCWPqwYpU1cKY9DF6ckFZyDrqlcBC0gANgpY6fLrBtYo2fwJsbj+1A24iIpBINV7erdprgA==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/client-s3": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-s3/-/client-s3-3.887.0.tgz", + "integrity": "sha512-WEFiYbCgUBhd3OMj6Q3SCoJ5ekZduLPMnkLQ6czz3UGDuK2GCtdpscEGlbOyKSxm+BdLSV30+vU3gwjdtWUhCg==", "dependencies": { - "@aws-sdk/credential-provider-env": "3.775.0", - "@aws-sdk/credential-provider-http": "3.775.0", - "@aws-sdk/credential-provider-ini": "3.775.0", - "@aws-sdk/credential-provider-process": "3.775.0", - "@aws-sdk/credential-provider-sso": "3.775.0", - "@aws-sdk/credential-provider-web-identity": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@smithy/credential-provider-imds": "^4.0.2", - "@smithy/property-provider": "^4.0.2", - "@smithy/shared-ini-file-loader": "^4.0.2", - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@aws-crypto/sha1-browser": "5.2.0", + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/credential-provider-node": "3.887.0", + "@aws-sdk/middleware-bucket-endpoint": "3.887.0", + "@aws-sdk/middleware-expect-continue": "3.887.0", + "@aws-sdk/middleware-flexible-checksums": "3.887.0", + "@aws-sdk/middleware-host-header": "3.887.0", + "@aws-sdk/middleware-location-constraint": "3.887.0", + "@aws-sdk/middleware-logger": "3.887.0", + "@aws-sdk/middleware-recursion-detection": "3.887.0", + "@aws-sdk/middleware-sdk-s3": "3.887.0", + "@aws-sdk/middleware-ssec": "3.887.0", + "@aws-sdk/middleware-user-agent": "3.887.0", + "@aws-sdk/region-config-resolver": "3.887.0", + "@aws-sdk/signature-v4-multi-region": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@aws-sdk/util-endpoints": "3.887.0", + "@aws-sdk/util-user-agent-browser": "3.887.0", + "@aws-sdk/util-user-agent-node": "3.887.0", + "@aws-sdk/xml-builder": "3.887.0", + "@smithy/config-resolver": "^4.2.1", + "@smithy/core": "^3.11.0", + "@smithy/eventstream-serde-browser": "^4.1.1", + "@smithy/eventstream-serde-config-resolver": "^4.2.1", + "@smithy/eventstream-serde-node": "^4.1.1", + "@smithy/fetch-http-handler": "^5.2.1", + "@smithy/hash-blob-browser": "^4.1.1", + "@smithy/hash-node": "^4.1.1", + "@smithy/hash-stream-node": "^4.1.1", + "@smithy/invalid-dependency": "^4.1.1", + "@smithy/md5-js": "^4.1.1", + "@smithy/middleware-content-length": "^4.1.1", + "@smithy/middleware-endpoint": "^4.2.1", + "@smithy/middleware-retry": "^4.2.1", + "@smithy/middleware-serde": "^4.1.1", + "@smithy/middleware-stack": "^4.1.1", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/node-http-handler": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "@smithy/url-parser": "^4.1.1", + "@smithy/util-base64": "^4.1.0", + "@smithy/util-body-length-browser": "^4.1.0", + "@smithy/util-body-length-node": "^4.1.0", + "@smithy/util-defaults-mode-browser": "^4.1.1", + "@smithy/util-defaults-mode-node": "^4.1.1", + "@smithy/util-endpoints": "^3.1.1", + "@smithy/util-middleware": "^4.1.1", + "@smithy/util-retry": "^4.1.1", + "@smithy/util-stream": "^4.3.1", + "@smithy/util-utf8": "^4.1.0", + "@smithy/util-waiter": "^4.1.1", + "@types/uuid": "^9.0.1", + "tslib": "^2.6.2", + "uuid": "^9.0.1" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/credential-provider-node/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/client-secrets-manager": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-secrets-manager/-/client-secrets-manager-3.887.0.tgz", + "integrity": "sha512-yGxML5pGTWZ3yY0ocYkD5F1y3kiLQ80zLuBfAjhCoAW3hnP9/ng8vGxga/vOpVKIX30DXNzHrcEnQB4oRcn8cA==", "dependencies": { - "tslib": "^2.6.2" + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/credential-provider-node": "3.887.0", + "@aws-sdk/middleware-host-header": "3.887.0", + "@aws-sdk/middleware-logger": "3.887.0", + "@aws-sdk/middleware-recursion-detection": "3.887.0", + "@aws-sdk/middleware-user-agent": "3.887.0", + "@aws-sdk/region-config-resolver": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@aws-sdk/util-endpoints": "3.887.0", + "@aws-sdk/util-user-agent-browser": "3.887.0", + "@aws-sdk/util-user-agent-node": "3.887.0", + "@smithy/config-resolver": "^4.2.1", + "@smithy/core": "^3.11.0", + "@smithy/fetch-http-handler": "^5.2.1", + "@smithy/hash-node": "^4.1.1", + "@smithy/invalid-dependency": "^4.1.1", + "@smithy/middleware-content-length": "^4.1.1", + "@smithy/middleware-endpoint": "^4.2.1", + "@smithy/middleware-retry": "^4.2.1", + "@smithy/middleware-serde": "^4.1.1", + "@smithy/middleware-stack": "^4.1.1", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/node-http-handler": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "@smithy/url-parser": "^4.1.1", + "@smithy/util-base64": "^4.1.0", + "@smithy/util-body-length-browser": "^4.1.0", + "@smithy/util-body-length-node": "^4.1.0", + "@smithy/util-defaults-mode-browser": "^4.1.1", + "@smithy/util-defaults-mode-node": "^4.1.1", + "@smithy/util-endpoints": "^3.1.1", + "@smithy/util-middleware": "^4.1.1", + "@smithy/util-retry": "^4.1.1", + "@smithy/util-utf8": "^4.1.0", + "@types/uuid": "^9.0.1", + "tslib": "^2.6.2", + "uuid": "^9.0.1" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/credential-provider-process": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-process/-/credential-provider-process-3.775.0.tgz", - "integrity": "sha512-A6k68H9rQp+2+7P7SGO90Csw6nrUEm0Qfjpn9Etc4EboZhhCLs9b66umUsTsSBHus4FDIe5JQxfCUyt1wgNogg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/client-ssm": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-ssm/-/client-ssm-3.887.0.tgz", + "integrity": "sha512-KiXWxJqkjQ42SkRROQTnZ1Oe8WaiEGYc8tX7GaG1patXek84sOuRKzuFwBu01fuiimXyfNejWbSkk4km2SKtxQ==", "dependencies": { - "@aws-sdk/core": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@smithy/property-provider": "^4.0.2", - "@smithy/shared-ini-file-loader": "^4.0.2", - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/credential-provider-node": "3.887.0", + "@aws-sdk/middleware-host-header": "3.887.0", + "@aws-sdk/middleware-logger": "3.887.0", + "@aws-sdk/middleware-recursion-detection": "3.887.0", + "@aws-sdk/middleware-user-agent": "3.887.0", + "@aws-sdk/region-config-resolver": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@aws-sdk/util-endpoints": "3.887.0", + "@aws-sdk/util-user-agent-browser": "3.887.0", + "@aws-sdk/util-user-agent-node": "3.887.0", + "@smithy/config-resolver": "^4.2.1", + "@smithy/core": "^3.11.0", + "@smithy/fetch-http-handler": "^5.2.1", + "@smithy/hash-node": "^4.1.1", + "@smithy/invalid-dependency": "^4.1.1", + "@smithy/middleware-content-length": "^4.1.1", + "@smithy/middleware-endpoint": "^4.2.1", + "@smithy/middleware-retry": "^4.2.1", + "@smithy/middleware-serde": "^4.1.1", + "@smithy/middleware-stack": "^4.1.1", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/node-http-handler": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "@smithy/url-parser": "^4.1.1", + "@smithy/util-base64": "^4.1.0", + "@smithy/util-body-length-browser": "^4.1.0", + "@smithy/util-body-length-node": "^4.1.0", + "@smithy/util-defaults-mode-browser": "^4.1.1", + "@smithy/util-defaults-mode-node": "^4.1.1", + "@smithy/util-endpoints": "^3.1.1", + "@smithy/util-middleware": "^4.1.1", + "@smithy/util-retry": "^4.1.1", + "@smithy/util-utf8": "^4.1.0", + "@smithy/util-waiter": "^4.1.1", + "@types/uuid": "^9.0.1", + "tslib": "^2.6.2", + "uuid": "^9.0.1" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/credential-provider-process/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/client-sso": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-sso/-/client-sso-3.887.0.tgz", + "integrity": "sha512-ZKN8BxkRdC6vK6wlnuLSYBhj7uufg14GP5bxqiRaDEooN1y2WcuY95GP13I3brLvM0uboFGbObIVpVrbeHifng==", "dependencies": { + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/middleware-host-header": "3.887.0", + "@aws-sdk/middleware-logger": "3.887.0", + "@aws-sdk/middleware-recursion-detection": "3.887.0", + "@aws-sdk/middleware-user-agent": "3.887.0", + "@aws-sdk/region-config-resolver": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@aws-sdk/util-endpoints": "3.887.0", + "@aws-sdk/util-user-agent-browser": "3.887.0", + "@aws-sdk/util-user-agent-node": "3.887.0", + "@smithy/config-resolver": "^4.2.1", + "@smithy/core": "^3.11.0", + "@smithy/fetch-http-handler": "^5.2.1", + "@smithy/hash-node": "^4.1.1", + "@smithy/invalid-dependency": "^4.1.1", + "@smithy/middleware-content-length": "^4.1.1", + "@smithy/middleware-endpoint": "^4.2.1", + "@smithy/middleware-retry": "^4.2.1", + "@smithy/middleware-serde": "^4.1.1", + "@smithy/middleware-stack": "^4.1.1", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/node-http-handler": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "@smithy/url-parser": "^4.1.1", + "@smithy/util-base64": "^4.1.0", + "@smithy/util-body-length-browser": "^4.1.0", + "@smithy/util-body-length-node": "^4.1.0", + "@smithy/util-defaults-mode-browser": "^4.1.1", + "@smithy/util-defaults-mode-node": "^4.1.1", + "@smithy/util-endpoints": "^3.1.1", + "@smithy/util-middleware": "^4.1.1", + "@smithy/util-retry": "^4.1.1", + "@smithy/util-utf8": "^4.1.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/credential-provider-sso": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-sso/-/credential-provider-sso-3.775.0.tgz", - "integrity": "sha512-du06V7u9HDmRuwZnRjf85shO3dffeKOkQplV5/2vf3LgTPNEI9caNomi/cCGyxKGOeSUHAKrQ1HvpPfOaI6t5Q==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/core": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/core/-/core-3.887.0.tgz", + "integrity": "sha512-oiBsWhuuj1Lzh+FHY+gE0PyYuiDxqFf98F9Pd2WruY5Gu/+/xvDFEPEkIEOae8gWRaLZ5Eh8u+OY9LS4DXZhuQ==", "dependencies": { - "@aws-sdk/client-sso": "3.775.0", - "@aws-sdk/core": "3.775.0", - "@aws-sdk/token-providers": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@smithy/property-provider": "^4.0.2", - "@smithy/shared-ini-file-loader": "^4.0.2", - "@smithy/types": "^4.2.0", + "@aws-sdk/types": "3.887.0", + "@aws-sdk/xml-builder": "3.887.0", + "@smithy/core": "^3.11.0", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/property-provider": "^4.0.5", + "@smithy/protocol-http": "^5.2.1", + "@smithy/signature-v4": "^5.1.3", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "@smithy/util-base64": "^4.1.0", + "@smithy/util-body-length-browser": "^4.1.0", + "@smithy/util-middleware": "^4.1.1", + "@smithy/util-utf8": "^4.1.0", + "fast-xml-parser": "5.2.5", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/credential-provider-sso/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/credential-provider-env": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-env/-/credential-provider-env-3.887.0.tgz", + "integrity": "sha512-kv7L5E8mxlWTMhCK639wrQnFEmwUDfKvKzTMDo2OboXZ0iSbD+hBPoT0gkb49qHNetYnsl63BVOxc0VNiOA04w==", "dependencies": { + "@aws-sdk/core": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@smithy/property-provider": "^4.0.5", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/credential-provider-web-identity": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-web-identity/-/credential-provider-web-identity-3.775.0.tgz", - "integrity": "sha512-z4XLYui5aHsr78mbd5BtZfm55OM5V55qK/X17OPrEqjYDDk3GlI8Oe2ZjTmOVrKwMpmzXKhsakeFHKfDyOvv1A==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/credential-provider-http": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-http/-/credential-provider-http-3.887.0.tgz", + "integrity": "sha512-siLttHxSFgJ5caDgS+BHYs9GBDX7J3pgge4OmJvIQeGO+KaJC12TerBNPJOp+qRaRC3yuVw3T9RpSZa8mmaiyA==", "dependencies": { - "@aws-sdk/core": "3.775.0", - "@aws-sdk/nested-clients": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@smithy/property-provider": "^4.0.2", - "@smithy/types": "^4.2.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@smithy/fetch-http-handler": "^5.2.1", + "@smithy/node-http-handler": "^4.2.1", + "@smithy/property-provider": "^4.0.5", + "@smithy/protocol-http": "^5.2.1", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "@smithy/util-stream": "^4.3.1", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/credential-provider-web-identity/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/credential-provider-ini": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-ini/-/credential-provider-ini-3.887.0.tgz", + "integrity": "sha512-Na9IjKdPuSNU/mBcCQ49HiIgomq/O7kZAuRyGwAXiRPbf86AacKv4dsUyPZY6lCgVIvVniRWgYlVaPgq22EIig==", "dependencies": { + "@aws-sdk/core": "3.887.0", + "@aws-sdk/credential-provider-env": "3.887.0", + "@aws-sdk/credential-provider-http": "3.887.0", + "@aws-sdk/credential-provider-process": "3.887.0", + "@aws-sdk/credential-provider-sso": "3.887.0", + "@aws-sdk/credential-provider-web-identity": "3.887.0", + "@aws-sdk/nested-clients": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@smithy/credential-provider-imds": "^4.0.7", + "@smithy/property-provider": "^4.0.5", + "@smithy/shared-ini-file-loader": "^4.0.5", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/endpoint-cache": { - "version": "3.723.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/endpoint-cache/-/endpoint-cache-3.723.0.tgz", - "integrity": "sha512-2+a4WXRc+07uiPR+zJiPGKSOWaNJQNqitkks+6Hhm/haTLJqNVTgY2OWDh2PXvwMNpKB+AlGdhE65Oy6NzUgXg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/credential-provider-node": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-node/-/credential-provider-node-3.887.0.tgz", + "integrity": "sha512-iJdCq/brBWYpJzJcXY2UhEoW7aA28ixIpvLKjxh5QUBfjCj19cImpj1gGwTIs6/fVcjVUw1tNveTBfn1ziTzVg==", "dependencies": { - "mnemonist": "0.38.3", + "@aws-sdk/credential-provider-env": "3.887.0", + "@aws-sdk/credential-provider-http": "3.887.0", + "@aws-sdk/credential-provider-ini": "3.887.0", + "@aws-sdk/credential-provider-process": "3.887.0", + "@aws-sdk/credential-provider-sso": "3.887.0", + "@aws-sdk/credential-provider-web-identity": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@smithy/credential-provider-imds": "^4.0.7", + "@smithy/property-provider": "^4.0.5", + "@smithy/shared-ini-file-loader": "^4.0.5", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/lib-dynamodb": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/lib-dynamodb/-/lib-dynamodb-3.775.0.tgz", - "integrity": "sha512-zgVMRQDTXE2QrCcVePEQzYwvKP7z6Eal7SO7HZF5xVjoUrQpb5m2HjswB7nbX8gYF/b4ko4Hc7sloyuWuvlx0g==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/credential-provider-process": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-process/-/credential-provider-process-3.887.0.tgz", + "integrity": "sha512-J5TIrQ/DUiyR65gXt1j3TEbLUwMcgYVB/G68/AVgBptPvb9kj+6zFG67bJJHwxtqJxRLVLTtTi9u/YDXTqGBpQ==", "dependencies": { - "@aws-sdk/core": "3.775.0", - "@aws-sdk/util-dynamodb": "3.775.0", - "@smithy/core": "^3.2.0", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@smithy/property-provider": "^4.0.5", + "@smithy/shared-ini-file-loader": "^4.0.5", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" - }, - "peerDependencies": { - "@aws-sdk/client-dynamodb": "^3.775.0" } }, - "node_modules/@aws-sdk/lib-dynamodb/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/credential-provider-sso": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-sso/-/credential-provider-sso-3.887.0.tgz", + "integrity": "sha512-Bv9wUActLu6Kn0MK2s72bgbbNxSLPVop/If4MVbCyJ3n+prJnm5RsTF3isoWQVyyXA5g4tIrS8mE5FpejSbyPQ==", "dependencies": { + "@aws-sdk/client-sso": "3.887.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/token-providers": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@smithy/property-provider": "^4.0.5", + "@smithy/shared-ini-file-loader": "^4.0.5", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-bucket-endpoint": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-bucket-endpoint/-/middleware-bucket-endpoint-3.775.0.tgz", - "integrity": "sha512-qogMIpVChDYr4xiUNC19/RDSw/sKoHkAhouS6Skxiy6s27HBhow1L3Z1qVYXuBmOZGSWPU0xiyZCvOyWrv9s+Q==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/credential-provider-web-identity": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-web-identity/-/credential-provider-web-identity-3.887.0.tgz", + "integrity": "sha512-PRh0KRukY2euN9xvvQ3cqhCAlEkMDJIWDLIfxQ1hTbv7JA3hrcLVrV+Jg5FRWsStDhweHIvD/VzruSkhJQS80g==", "dependencies": { - "@aws-sdk/types": "3.775.0", - "@aws-sdk/util-arn-parser": "3.723.0", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/protocol-http": "^5.1.0", - "@smithy/types": "^4.2.0", - "@smithy/util-config-provider": "^4.0.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/nested-clients": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@smithy/property-provider": "^4.0.5", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-bucket-endpoint/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/endpoint-cache": { + "version": "3.873.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/endpoint-cache/-/endpoint-cache-3.873.0.tgz", + "integrity": "sha512-EHd+5bSp/hZc78SMq9cUCIsX0B4ekZtFUVSSLEXyYv8x/nHFTnTqN9TsxV8bjlztR3aSUeoKSk5qxu/dVGgiQw==", "dependencies": { + "mnemonist": "0.38.3", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-endpoint-discovery": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-endpoint-discovery/-/middleware-endpoint-discovery-3.775.0.tgz", - "integrity": "sha512-L0PmjSg7t+wovRo/Lin1kpei3e7wBhrENWb1Bbccu3PWUIfxolGeWplOmNhSlXjuQe9GXjf3z8kJRYOGBMFOvw==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/lib-dynamodb": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/lib-dynamodb/-/lib-dynamodb-3.887.0.tgz", + "integrity": "sha512-/t9rrvhR+AplmHk0yyQBzZuyarJ82sy1564KIKt59+ZM7GMCYGj5Z6D9OkZ5nSHco1K7E9adpFjHhxE1qxj4eA==", "dependencies": { - "@aws-sdk/endpoint-cache": "3.723.0", - "@aws-sdk/types": "3.775.0", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/protocol-http": "^5.1.0", - "@smithy/types": "^4.2.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/util-dynamodb": "3.887.0", + "@smithy/core": "^3.11.0", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" + }, + "peerDependencies": { + "@aws-sdk/client-dynamodb": "^3.887.0" } }, - "node_modules/@aws-sdk/middleware-endpoint-discovery/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", - "dependencies": { + "node_modules/@aws-sdk/middleware-bucket-endpoint": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-bucket-endpoint/-/middleware-bucket-endpoint-3.887.0.tgz", + "integrity": "sha512-qRCte/3MtNiMhPh4ZEGk9cHfAXq6IDTflvi2t1tkOIVZFyshkSCvNQNJrrE2D/ljVbOK1f3XbBDaF43EoQzIRQ==", + "dependencies": { + "@aws-sdk/types": "3.887.0", + "@aws-sdk/util-arn-parser": "3.873.0", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/types": "^4.5.0", + "@smithy/util-config-provider": "^4.0.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-expect-continue": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-expect-continue/-/middleware-expect-continue-3.775.0.tgz", - "integrity": "sha512-Apd3owkIeUW5dnk3au9np2IdW2N0zc9NjTjHiH+Mx3zqwSrc+m+ANgJVgk9mnQjMzU/vb7VuxJ0eqdEbp5gYsg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/middleware-endpoint-discovery": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-endpoint-discovery/-/middleware-endpoint-discovery-3.887.0.tgz", + "integrity": "sha512-ik+0B5nEMILseILZiBiOomcBuBV+fPzp1+u+zD9CWFjPy2ZnGdXtEgKec2mIjJKD1PxHcJm/lFD8/mHP3Rm9RA==", "dependencies": { - "@aws-sdk/types": "3.775.0", - "@smithy/protocol-http": "^5.1.0", - "@smithy/types": "^4.2.0", + "@aws-sdk/endpoint-cache": "3.873.0", + "@aws-sdk/types": "3.887.0", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-expect-continue/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/middleware-expect-continue": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-expect-continue/-/middleware-expect-continue-3.887.0.tgz", + "integrity": "sha512-AlrTZZScDTG9SYeT82BC5cK/6Q4N0miN5xqMW/pbBqP3fNXlsdJOWKB+EKD3V6DV41EV5GVKHKe/1065xKSQ4w==", "dependencies": { + "@aws-sdk/types": "3.887.0", + "@smithy/protocol-http": "^5.2.1", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { @@ -1204,321 +1431,331 @@ } }, "node_modules/@aws-sdk/middleware-flexible-checksums": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-flexible-checksums/-/middleware-flexible-checksums-3.775.0.tgz", - "integrity": "sha512-OmHLfRIb7IIXsf9/X/pMOlcSV3gzW/MmtPSZTkrz5jCTKzWXd7eRoyOJqewjsaC6KMAxIpNU77FoAd16jOZ21A==", - "license": "Apache-2.0", + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-flexible-checksums/-/middleware-flexible-checksums-3.887.0.tgz", + "integrity": "sha512-QaRGWeeHNxRvY+OUuiQ+4A7H+4HPCWCtfTiQRPzILd3C968r7EFNg2ZWyjoqITW8cj3ZJZp3p8VcH08WBzAhcQ==", "dependencies": { "@aws-crypto/crc32": "5.2.0", "@aws-crypto/crc32c": "5.2.0", "@aws-crypto/util": "5.2.0", - "@aws-sdk/core": "3.775.0", - "@aws-sdk/types": "3.775.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/types": "3.887.0", "@smithy/is-array-buffer": "^4.0.0", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/protocol-http": "^5.1.0", - "@smithy/types": "^4.2.0", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-stream": "^4.2.0", - "@smithy/util-utf8": "^4.0.0", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/types": "^4.5.0", + "@smithy/util-middleware": "^4.1.1", + "@smithy/util-stream": "^4.3.1", + "@smithy/util-utf8": "^4.1.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-flexible-checksums/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/middleware-host-header": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-host-header/-/middleware-host-header-3.887.0.tgz", + "integrity": "sha512-ulzqXv6NNqdu/kr0sgBYupWmahISHY+azpJidtK6ZwQIC+vBUk9NdZeqQpy7KVhIk2xd4+5Oq9rxapPwPI21CA==", "dependencies": { + "@aws-sdk/types": "3.887.0", + "@smithy/protocol-http": "^5.2.1", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-host-header": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-host-header/-/middleware-host-header-3.775.0.tgz", - "integrity": "sha512-tkSegM0Z6WMXpLB8oPys/d+umYIocvO298mGvcMCncpRl77L9XkvSLJIFzaHes+o7djAgIduYw8wKIMStFss2w==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/middleware-location-constraint": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-location-constraint/-/middleware-location-constraint-3.887.0.tgz", + "integrity": "sha512-eU/9Cq4gg2sS32bOomxdx2YF43kb+o70pMhnEBBnVVeqzE8co78SO5FQdWfRTfhNJgTyQ6Vgosx//CNMPIfZPg==", "dependencies": { - "@aws-sdk/types": "3.775.0", - "@smithy/protocol-http": "^5.1.0", - "@smithy/types": "^4.2.0", + "@aws-sdk/types": "3.887.0", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-host-header/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/middleware-logger": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-logger/-/middleware-logger-3.887.0.tgz", + "integrity": "sha512-YbbgLI6jKp2qSoAcHnXrQ5jcuc5EYAmGLVFgMVdk8dfCfJLfGGSaOLxF4CXC7QYhO50s+mPPkhBYejCik02Kug==", "dependencies": { + "@aws-sdk/types": "3.887.0", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-location-constraint": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-location-constraint/-/middleware-location-constraint-3.775.0.tgz", - "integrity": "sha512-8TMXEHZXZTFTckQLyBT5aEI8fX11HZcwZseRifvBKKpj0RZDk4F0EEYGxeNSPpUQ7n+PRWyfAEnnZNRdAj/1NQ==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/middleware-recursion-detection": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-recursion-detection/-/middleware-recursion-detection-3.887.0.tgz", + "integrity": "sha512-tjrUXFtQnFLo+qwMveq5faxP5MQakoLArXtqieHphSqZTXm21wDJM73hgT4/PQQGTwgYjDKqnqsE1hvk0hcfDw==", "dependencies": { - "@aws-sdk/types": "3.775.0", - "@smithy/types": "^4.2.0", + "@aws-sdk/types": "3.887.0", + "@aws/lambda-invoke-store": "^0.0.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-location-constraint/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/middleware-sdk-api-gateway": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-sdk-api-gateway/-/middleware-sdk-api-gateway-3.887.0.tgz", + "integrity": "sha512-ec2ql0zXJUo+kV+nKHtSl5124FmhxDRTCawLDPYMOS2vFXpKb9k3v12bUs/2yDwJDS/bYMq22KsXeiWXOeaqlw==", "dependencies": { + "@aws-sdk/types": "3.887.0", + "@smithy/protocol-http": "^5.2.1", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-logger": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-logger/-/middleware-logger-3.775.0.tgz", - "integrity": "sha512-FaxO1xom4MAoUJsldmR92nT1G6uZxTdNYOFYtdHfd6N2wcNaTuxgjIvqzg5y7QIH9kn58XX/dzf1iTjgqUStZw==", - "license": "Apache-2.0", - "dependencies": { - "@aws-sdk/types": "3.775.0", - "@smithy/types": "^4.2.0", + "node_modules/@aws-sdk/middleware-sdk-s3": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-sdk-s3/-/middleware-sdk-s3-3.887.0.tgz", + "integrity": "sha512-vWMfd8esmMX5YSenzgendh9OSIw7IcKLH46ajaNvDBdF/9X0h6eobgNX/liLzrnNHd6t7Lru2KZXSjrwYgu7pA==", + "dependencies": { + "@aws-sdk/core": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@aws-sdk/util-arn-parser": "3.873.0", + "@smithy/core": "^3.11.0", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/signature-v4": "^5.1.3", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "@smithy/util-config-provider": "^4.0.0", + "@smithy/util-middleware": "^4.1.1", + "@smithy/util-stream": "^4.3.1", + "@smithy/util-utf8": "^4.1.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-logger/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/middleware-ssec": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-ssec/-/middleware-ssec-3.887.0.tgz", + "integrity": "sha512-1ixZks0IDkdac1hjPe4vdLSuD9HznkhblCEb4T0wNyw3Ee1fdXg+MlcPWywzG5zkPXLcIrULUzJg/OSYfaDXcQ==", "dependencies": { + "@aws-sdk/types": "3.887.0", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-recursion-detection": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-recursion-detection/-/middleware-recursion-detection-3.775.0.tgz", - "integrity": "sha512-GLCzC8D0A0YDG5u3F5U03Vb9j5tcOEFhr8oc6PDk0k0vm5VwtZOE6LvK7hcCSoAB4HXyOUM0sQuXrbaAh9OwXA==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/middleware-user-agent": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-user-agent/-/middleware-user-agent-3.887.0.tgz", + "integrity": "sha512-YjBz2J4l3uCeMv2g1natat5YSMRZYdEpEg60g3d7q6hoHUD10SmWy8M+Ca8djF0is70vPmF3Icm2cArK3mtoNA==", "dependencies": { - "@aws-sdk/types": "3.775.0", - "@smithy/protocol-http": "^5.1.0", - "@smithy/types": "^4.2.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@aws-sdk/util-endpoints": "3.887.0", + "@smithy/core": "^3.11.0", + "@smithy/protocol-http": "^5.2.1", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-recursion-detection/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", - "dependencies": { - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18.0.0" - } - }, - "node_modules/@aws-sdk/middleware-sdk-api-gateway": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-sdk-api-gateway/-/middleware-sdk-api-gateway-3.775.0.tgz", - "integrity": "sha512-OGOOP7CjHGH302ynlwDe49+1zELuO5W0hlHEbXxHeM14EFv1Z4Zk/LhvQ/SLS7GCB38p19Lf1Di61C1ycJsSmA==", - "license": "Apache-2.0", - "dependencies": { - "@aws-sdk/types": "3.775.0", - "@smithy/protocol-http": "^5.1.0", - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18.0.0" - } - }, - "node_modules/@aws-sdk/middleware-sdk-api-gateway/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/nested-clients": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/nested-clients/-/nested-clients-3.887.0.tgz", + "integrity": "sha512-h6/dHuAJhJnhSDihcQd0wfJBZoPmPajASVqGk8qDxYDBWxIU9/mYcKvM+kTrKw3f9Wf3S/eR5B/rYHHuxFheSw==", "dependencies": { + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/core": "3.887.0", + "@aws-sdk/middleware-host-header": "3.887.0", + "@aws-sdk/middleware-logger": "3.887.0", + "@aws-sdk/middleware-recursion-detection": "3.887.0", + "@aws-sdk/middleware-user-agent": "3.887.0", + "@aws-sdk/region-config-resolver": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@aws-sdk/util-endpoints": "3.887.0", + "@aws-sdk/util-user-agent-browser": "3.887.0", + "@aws-sdk/util-user-agent-node": "3.887.0", + "@smithy/config-resolver": "^4.2.1", + "@smithy/core": "^3.11.0", + "@smithy/fetch-http-handler": "^5.2.1", + "@smithy/hash-node": "^4.1.1", + "@smithy/invalid-dependency": "^4.1.1", + "@smithy/middleware-content-length": "^4.1.1", + "@smithy/middleware-endpoint": "^4.2.1", + "@smithy/middleware-retry": "^4.2.1", + "@smithy/middleware-serde": "^4.1.1", + "@smithy/middleware-stack": "^4.1.1", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/node-http-handler": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "@smithy/url-parser": "^4.1.1", + "@smithy/util-base64": "^4.1.0", + "@smithy/util-body-length-browser": "^4.1.0", + "@smithy/util-body-length-node": "^4.1.0", + "@smithy/util-defaults-mode-browser": "^4.1.1", + "@smithy/util-defaults-mode-node": "^4.1.1", + "@smithy/util-endpoints": "^3.1.1", + "@smithy/util-middleware": "^4.1.1", + "@smithy/util-retry": "^4.1.1", + "@smithy/util-utf8": "^4.1.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-sdk-s3": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-sdk-s3/-/middleware-sdk-s3-3.775.0.tgz", - "integrity": "sha512-zsvcu7cWB28JJ60gVvjxPCI7ZU7jWGcpNACPiZGyVtjYXwcxyhXbYEVDSWKsSA6ERpz9XrpLYod8INQWfW3ECg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/region-config-resolver": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/region-config-resolver/-/region-config-resolver-3.887.0.tgz", + "integrity": "sha512-VdSMrIqJ3yjJb/fY+YAxrH/lCVv0iL8uA+lbMNfQGtO5tB3Zx6SU9LEpUwBNX8fPK1tUpI65CNE4w42+MY/7Mg==", "dependencies": { - "@aws-sdk/core": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@aws-sdk/util-arn-parser": "3.723.0", - "@smithy/core": "^3.2.0", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/protocol-http": "^5.1.0", - "@smithy/signature-v4": "^5.0.2", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", + "@aws-sdk/types": "3.887.0", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/types": "^4.5.0", "@smithy/util-config-provider": "^4.0.0", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-stream": "^4.2.0", - "@smithy/util-utf8": "^4.0.0", + "@smithy/util-middleware": "^4.1.1", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-sdk-s3/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/s3-presigned-post": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/s3-presigned-post/-/s3-presigned-post-3.887.0.tgz", + "integrity": "sha512-lUzG9GOSmuzpIHV3ORIeatE/54lGv5wsius2TBx/NX8TV+G0dkaPYxmJXaEPisPvTbLRqSGtmazPneUTJM81tg==", "dependencies": { + "@aws-sdk/client-s3": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@aws-sdk/util-format-url": "3.887.0", + "@smithy/middleware-endpoint": "^4.2.1", + "@smithy/signature-v4": "^5.1.3", + "@smithy/types": "^4.5.0", + "@smithy/util-hex-encoding": "^4.0.0", + "@smithy/util-utf8": "^4.1.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-ssec": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-ssec/-/middleware-ssec-3.775.0.tgz", - "integrity": "sha512-Iw1RHD8vfAWWPzBBIKaojO4GAvQkHOYIpKdAfis/EUSUmSa79QsnXnRqsdcE0mCB0Ylj23yi+ah4/0wh9FsekA==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/s3-request-presigner": { + "version": "3.901.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/s3-request-presigner/-/s3-request-presigner-3.901.0.tgz", + "integrity": "sha512-G/0G5tL3beETs2zkI0YQuM2SkrAsYJSe2vN2XtouVSN5c9v6EiSvdSsHAqMhLebnSs2suUkq0JO9ZotbXkUfMQ==", "dependencies": { - "@aws-sdk/types": "3.775.0", - "@smithy/types": "^4.2.0", + "@aws-sdk/signature-v4-multi-region": "3.901.0", + "@aws-sdk/types": "3.901.0", + "@aws-sdk/util-format-url": "3.901.0", + "@smithy/middleware-endpoint": "^4.3.0", + "@smithy/protocol-http": "^5.3.0", + "@smithy/smithy-client": "^4.7.0", + "@smithy/types": "^4.6.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-ssec/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/s3-request-presigner/node_modules/@aws-sdk/core": { + "version": "3.901.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/core/-/core-3.901.0.tgz", + "integrity": "sha512-brKAc3y64tdhyuEf+OPIUln86bRTqkLgb9xkd6kUdIeA5+qmp/N6amItQz+RN4k4O3kqkCPYnAd3LonTKluobw==", "dependencies": { + "@aws-sdk/types": "3.901.0", + "@aws-sdk/xml-builder": "3.901.0", + "@smithy/core": "^3.14.0", + "@smithy/node-config-provider": "^4.3.0", + "@smithy/property-provider": "^4.2.0", + "@smithy/protocol-http": "^5.3.0", + "@smithy/signature-v4": "^5.3.0", + "@smithy/smithy-client": "^4.7.0", + "@smithy/types": "^4.6.0", + "@smithy/util-base64": "^4.2.0", + "@smithy/util-middleware": "^4.2.0", + "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-user-agent": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-user-agent/-/middleware-user-agent-3.775.0.tgz", - "integrity": "sha512-7Lffpr1ptOEDE1ZYH1T78pheEY1YmeXWBfFt/amZ6AGsKSLG+JPXvof3ltporTGR2bhH/eJPo7UHCglIuXfzYg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/s3-request-presigner/node_modules/@aws-sdk/middleware-sdk-s3": { + "version": "3.901.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/middleware-sdk-s3/-/middleware-sdk-s3-3.901.0.tgz", + "integrity": "sha512-prgjVC3fDT2VIlmQPiw/cLee8r4frTam9GILRUVQyDdNtshNwV3MiaSCLzzQJjKJlLgnBLNUHJCSmvUVtg+3iA==", "dependencies": { - "@aws-sdk/core": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@aws-sdk/util-endpoints": "3.775.0", - "@smithy/core": "^3.2.0", - "@smithy/protocol-http": "^5.1.0", - "@smithy/types": "^4.2.0", + "@aws-sdk/core": "3.901.0", + "@aws-sdk/types": "3.901.0", + "@aws-sdk/util-arn-parser": "3.893.0", + "@smithy/core": "^3.14.0", + "@smithy/node-config-provider": "^4.3.0", + "@smithy/protocol-http": "^5.3.0", + "@smithy/signature-v4": "^5.3.0", + "@smithy/smithy-client": "^4.7.0", + "@smithy/types": "^4.6.0", + "@smithy/util-config-provider": "^4.2.0", + "@smithy/util-middleware": "^4.2.0", + "@smithy/util-stream": "^4.4.0", + "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/middleware-user-agent/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/s3-request-presigner/node_modules/@aws-sdk/signature-v4-multi-region": { + "version": "3.901.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/signature-v4-multi-region/-/signature-v4-multi-region-3.901.0.tgz", + "integrity": "sha512-2IWxbll/pRucp1WQkHi2W5E2SVPGBvk4Is923H7gpNksbVFws18ItjMM8ZpGm44cJEoy1zR5gjhLFklatpuoOw==", "dependencies": { + "@aws-sdk/middleware-sdk-s3": "3.901.0", + "@aws-sdk/types": "3.901.0", + "@smithy/protocol-http": "^5.3.0", + "@smithy/signature-v4": "^5.3.0", + "@smithy/types": "^4.6.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/nested-clients": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/nested-clients/-/nested-clients-3.775.0.tgz", - "integrity": "sha512-f37jmAzkuIhKyhtA6s0LGpqQvm218vq+RNMUDkGm1Zz2fxJ5pBIUTDtygiI3vXTcmt9DTIB8S6JQhjrgtboktw==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/s3-request-presigner/node_modules/@aws-sdk/types": { + "version": "3.901.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/types/-/types-3.901.0.tgz", + "integrity": "sha512-FfEM25hLEs4LoXsLXQ/q6X6L4JmKkKkbVFpKD4mwfVHtRVQG6QxJiCPcrkcPISquiy6esbwK2eh64TWbiD60cg==", "dependencies": { - "@aws-crypto/sha256-browser": "5.2.0", - "@aws-crypto/sha256-js": "5.2.0", - "@aws-sdk/core": "3.775.0", - "@aws-sdk/middleware-host-header": "3.775.0", - "@aws-sdk/middleware-logger": "3.775.0", - "@aws-sdk/middleware-recursion-detection": "3.775.0", - "@aws-sdk/middleware-user-agent": "3.775.0", - "@aws-sdk/region-config-resolver": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@aws-sdk/util-endpoints": "3.775.0", - "@aws-sdk/util-user-agent-browser": "3.775.0", - "@aws-sdk/util-user-agent-node": "3.775.0", - "@smithy/config-resolver": "^4.1.0", - "@smithy/core": "^3.2.0", - "@smithy/fetch-http-handler": "^5.0.2", - "@smithy/hash-node": "^4.0.2", - "@smithy/invalid-dependency": "^4.0.2", - "@smithy/middleware-content-length": "^4.0.2", - "@smithy/middleware-endpoint": "^4.1.0", - "@smithy/middleware-retry": "^4.1.0", - "@smithy/middleware-serde": "^4.0.3", - "@smithy/middleware-stack": "^4.0.2", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/node-http-handler": "^4.0.4", - "@smithy/protocol-http": "^5.1.0", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", - "@smithy/url-parser": "^4.0.2", - "@smithy/util-base64": "^4.0.0", - "@smithy/util-body-length-browser": "^4.0.0", - "@smithy/util-body-length-node": "^4.0.0", - "@smithy/util-defaults-mode-browser": "^4.0.8", - "@smithy/util-defaults-mode-node": "^4.0.8", - "@smithy/util-endpoints": "^3.0.2", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-retry": "^4.0.2", - "@smithy/util-utf8": "^4.0.0", + "@smithy/types": "^4.6.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/nested-clients/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/s3-request-presigner/node_modules/@aws-sdk/util-arn-parser": { + "version": "3.893.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-arn-parser/-/util-arn-parser-3.893.0.tgz", + "integrity": "sha512-u8H4f2Zsi19DGnwj5FSZzDMhytYF/bCh37vAtBsn3cNDL3YG578X5oc+wSX54pM3tOxS+NY7tvOAo52SW7koUA==", "dependencies": { "tslib": "^2.6.2" }, @@ -1526,29 +1763,27 @@ "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/region-config-resolver": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/region-config-resolver/-/region-config-resolver-3.775.0.tgz", - "integrity": "sha512-40iH3LJjrQS3LKUJAl7Wj0bln7RFPEvUYKFxtP8a+oKFDO0F65F52xZxIJbPn6sHkxWDAnZlGgdjZXM3p2g5wQ==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/s3-request-presigner/node_modules/@aws-sdk/util-format-url": { + "version": "3.901.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-format-url/-/util-format-url-3.901.0.tgz", + "integrity": "sha512-GGUnJKrh3OF1F3YRSWtwPLbN904Fcfxf03gujyq1rcrDRPEkzoZB+2BzNkB27SsU6lAlwNq+4aRlZRVUloPiag==", "dependencies": { - "@aws-sdk/types": "3.775.0", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/types": "^4.2.0", - "@smithy/util-config-provider": "^4.0.0", - "@smithy/util-middleware": "^4.0.2", + "@aws-sdk/types": "3.901.0", + "@smithy/querystring-builder": "^4.2.0", + "@smithy/types": "^4.6.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/region-config-resolver/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/s3-request-presigner/node_modules/@aws-sdk/xml-builder": { + "version": "3.901.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/xml-builder/-/xml-builder-3.901.0.tgz", + "integrity": "sha512-pxFCkuAP7Q94wMTNPAwi6hEtNrp/BdFf+HOrIEeFQsk4EoOmpKY3I6S+u6A9Wg295J80Kh74LqDWM22ux3z6Aw==", "dependencies": { + "@smithy/types": "^4.6.0", + "fast-xml-parser": "5.2.5", "tslib": "^2.6.2" }, "engines": { @@ -1556,28 +1791,15 @@ } }, "node_modules/@aws-sdk/signature-v4-multi-region": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/signature-v4-multi-region/-/signature-v4-multi-region-3.775.0.tgz", - "integrity": "sha512-cnGk8GDfTMJ8p7+qSk92QlIk2bmTmFJqhYxcXZ9PysjZtx0xmfCMxnG3Hjy1oU2mt5boPCVSOptqtWixayM17g==", - "license": "Apache-2.0", - "dependencies": { - "@aws-sdk/middleware-sdk-s3": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@smithy/protocol-http": "^5.1.0", - "@smithy/signature-v4": "^5.0.2", - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18.0.0" - } - }, - "node_modules/@aws-sdk/signature-v4-multi-region/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/signature-v4-multi-region/-/signature-v4-multi-region-3.887.0.tgz", + "integrity": "sha512-xAmoHzSow3692IFeAblZKRIABp4Iv96XGQKMIlHE1LugSl4KuR/6M9+UfbNMfSdyfhRt0RkG6kMZ/7GwlxqoAQ==", "dependencies": { + "@aws-sdk/middleware-sdk-s3": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@smithy/protocol-http": "^5.2.1", + "@smithy/signature-v4": "^5.1.3", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { @@ -1585,28 +1807,16 @@ } }, "node_modules/@aws-sdk/token-providers": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/token-providers/-/token-providers-3.775.0.tgz", - "integrity": "sha512-Q6MtbEhkOggVSz/dN89rIY/ry80U3v89o0Lrrc+Rpvaiaaz8pEN0DsfEcg0IjpzBQ8Owoa6lNWyglHbzPhaJpA==", - "license": "Apache-2.0", - "dependencies": { - "@aws-sdk/nested-clients": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@smithy/property-provider": "^4.0.2", - "@smithy/shared-ini-file-loader": "^4.0.2", - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18.0.0" - } - }, - "node_modules/@aws-sdk/token-providers/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/token-providers/-/token-providers-3.887.0.tgz", + "integrity": "sha512-3e5fTPMPeJ5DphZ+OSqzw4ymCgDf8SQVBgrlKVo4Bch9ZwmmAoOHbuQrXVa9xQHklEHJg1Gz2pkjxNaIgx7quA==", "dependencies": { + "@aws-sdk/core": "3.887.0", + "@aws-sdk/nested-clients": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@smithy/property-provider": "^4.0.5", + "@smithy/shared-ini-file-loader": "^4.0.5", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { @@ -1614,24 +1824,11 @@ } }, "node_modules/@aws-sdk/types": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/types/-/types-3.775.0.tgz", - "integrity": "sha512-ZoGKwa4C9fC9Av6bdfqcW6Ix5ot05F/S4VxWR2nHuMv7hzfmAjTOcUiWT7UR4hM/U0whf84VhDtXN/DWAk52KA==", - "license": "Apache-2.0", - "dependencies": { - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18.0.0" - } - }, - "node_modules/@aws-sdk/types/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/types/-/types-3.887.0.tgz", + "integrity": "sha512-fmTEJpUhsPsovQ12vZSpVTEP/IaRoJAMBGQXlQNjtCpkBp6Iq3KQDa/HDaPINE+3xxo6XvTdtibsNOd5zJLV9A==", "dependencies": { + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { @@ -1639,10 +1836,9 @@ } }, "node_modules/@aws-sdk/util-arn-parser": { - "version": "3.723.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/util-arn-parser/-/util-arn-parser-3.723.0.tgz", - "integrity": "sha512-ZhEfvUwNliOQROcAk34WJWVYTlTa4694kSVhDSjW6lE1bMataPnIN8A0ycukEzBXmd8ZSoBcQLn6lKGl7XIJ5w==", - "license": "Apache-2.0", + "version": "3.873.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-arn-parser/-/util-arn-parser-3.873.0.tgz", + "integrity": "sha512-qag+VTqnJWDn8zTAXX4wiVioa0hZDQMtbZcGRERVnLar4/3/VIKBhxX2XibNQXFu1ufgcRn4YntT/XEPecFWcg==", "dependencies": { "tslib": "^2.6.2" }, @@ -1651,10 +1847,9 @@ } }, "node_modules/@aws-sdk/util-dynamodb": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/util-dynamodb/-/util-dynamodb-3.775.0.tgz", - "integrity": "sha512-RawcbaM54+7zJ1ULncZAvSSIGZcSucnQ6OY4pMjuUczp6kGx0dLuUJofLyATcboDKPjZZg+luF/z9E/XQq1J3g==", - "license": "Apache-2.0", + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-dynamodb/-/util-dynamodb-3.887.0.tgz", + "integrity": "sha512-ypjO5uDYkhpnehXlyLxAAUfyVHw0uWIt4bS8k8qucltdHYUFukWlyBycweUTPt8oDee5mBlnFk6AFuoviDQT2g==", "dependencies": { "tslib": "^2.6.2" }, @@ -1662,30 +1857,32 @@ "node": ">=18.0.0" }, "peerDependencies": { - "@aws-sdk/client-dynamodb": "^3.775.0" + "@aws-sdk/client-dynamodb": "^3.887.0" } }, "node_modules/@aws-sdk/util-endpoints": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/util-endpoints/-/util-endpoints-3.775.0.tgz", - "integrity": "sha512-yjWmUgZC9tUxAo8Uaplqmq0eUh0zrbZJdwxGRKdYxfm4RG6fMw1tj52+KkatH7o+mNZvg1GDcVp/INktxonJLw==", - "license": "Apache-2.0", + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-endpoints/-/util-endpoints-3.887.0.tgz", + "integrity": "sha512-kpegvT53KT33BMeIcGLPA65CQVxLUL/C3gTz9AzlU/SDmeusBHX4nRApAicNzI/ltQ5lxZXbQn18UczzBuwF1w==", "dependencies": { - "@aws-sdk/types": "3.775.0", - "@smithy/types": "^4.2.0", - "@smithy/util-endpoints": "^3.0.2", + "@aws-sdk/types": "3.887.0", + "@smithy/types": "^4.5.0", + "@smithy/url-parser": "^4.1.1", + "@smithy/util-endpoints": "^3.1.1", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/util-endpoints/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/util-format-url": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-format-url/-/util-format-url-3.887.0.tgz", + "integrity": "sha512-ABDSP6KsrdD+JC7qwMqUpLXqPidvfgT+Q+W8sGGuk/IBy7smgZDOdYSZLE4VBbQpH3N/zSJuslAWhL2x37Qwww==", "dependencies": { + "@aws-sdk/types": "3.887.0", + "@smithy/querystring-builder": "^4.1.1", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { @@ -1705,39 +1902,25 @@ } }, "node_modules/@aws-sdk/util-user-agent-browser": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/util-user-agent-browser/-/util-user-agent-browser-3.775.0.tgz", - "integrity": "sha512-txw2wkiJmZKVdDbscK7VBK+u+TJnRtlUjRTLei+elZg2ADhpQxfVAQl436FUeIv6AhB/oRHW6/K/EAGXUSWi0A==", - "license": "Apache-2.0", + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-user-agent-browser/-/util-user-agent-browser-3.887.0.tgz", + "integrity": "sha512-X71UmVsYc6ZTH4KU6hA5urOzYowSXc3qvroagJNLJYU1ilgZ529lP4J9XOYfEvTXkLR1hPFSRxa43SrwgelMjA==", "dependencies": { - "@aws-sdk/types": "3.775.0", - "@smithy/types": "^4.2.0", + "@aws-sdk/types": "3.887.0", + "@smithy/types": "^4.5.0", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, - "node_modules/@aws-sdk/util-user-agent-browser/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", - "dependencies": { - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18.0.0" - } - }, "node_modules/@aws-sdk/util-user-agent-node": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/util-user-agent-node/-/util-user-agent-node-3.775.0.tgz", - "integrity": "sha512-N9yhTevbizTOMo3drH7Eoy6OkJ3iVPxhV7dwb6CMAObbLneS36CSfA6xQXupmHWcRvZPTz8rd1JGG3HzFOau+g==", - "license": "Apache-2.0", + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-user-agent-node/-/util-user-agent-node-3.887.0.tgz", + "integrity": "sha512-eqnx2FWAf40Nw6EyhXWjVT5WYYMz0rLrKEhZR3GdRQyOFzgnnEfq74TtG2Xji9k/ODqkcKqkiI52RYDEcdh8Jg==", "dependencies": { - "@aws-sdk/middleware-user-agent": "3.775.0", - "@aws-sdk/types": "3.775.0", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/types": "^4.2.0", + "@aws-sdk/middleware-user-agent": "3.887.0", + "@aws-sdk/types": "3.887.0", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { @@ -1752,1127 +1935,1680 @@ } } }, - "node_modules/@aws-sdk/util-user-agent-node/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@aws-sdk/xml-builder": { + "version": "3.887.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/xml-builder/-/xml-builder-3.887.0.tgz", + "integrity": "sha512-lMwgWK1kNgUhHGfBvO/5uLe7TKhycwOn3eRCqsKPT9aPCx/HWuTlpcQp8oW2pCRGLS7qzcxqpQulcD+bbUL7XQ==", "dependencies": { + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/xml-builder": { - "version": "3.775.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/xml-builder/-/xml-builder-3.775.0.tgz", - "integrity": "sha512-b9NGO6FKJeLGYnV7Z1yvcP1TNU4dkD5jNsLWOF1/sygZoASaQhNOlaiJ/1OH331YQ1R1oWk38nBb0frsYkDsOQ==", - "license": "Apache-2.0", - "dependencies": { - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" - }, + "node_modules/@aws/lambda-invoke-store": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/@aws/lambda-invoke-store/-/lambda-invoke-store-0.0.1.tgz", + "integrity": "sha512-ORHRQ2tmvnBXc8t/X9Z8IcSbBA4xTLKuN873FopzklHMeqBst7YG0d+AX97inkvDX+NChYtSr+qGfcqGFaI8Zw==", "engines": { "node": ">=18.0.0" } }, - "node_modules/@aws-sdk/xml-builder/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" } }, - "node_modules/@smithy/abort-controller": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/abort-controller/-/abort-controller-4.0.2.tgz", - "integrity": "sha512-Sl/78VDtgqKxN2+1qduaVE140XF+Xg+TafkncspwM4jFP/LHr76ZHmIY/y3V1M0mMLNk+Je6IGbzxy23RSToMw==", - "license": "Apache-2.0", - "dependencies": { - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" - }, + "node_modules/@babel/compat-data": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz", + "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==", + "dev": true, + "license": "MIT", "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" } }, - "node_modules/@smithy/abort-controller/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@babel/core": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz", + "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.28.3", + "@babel/helpers": "^7.28.4", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz", + "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" } }, - "node_modules/@smithy/chunked-blob-reader": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/@smithy/chunked-blob-reader/-/chunked-blob-reader-5.0.0.tgz", - "integrity": "sha512-+sKqDBQqb036hh4NPaUiEkYFkTUGYzRsn3EuFhyfQfMy6oGHEUJDurLP9Ufb5dasr/XiAmPNMr6wa9afjQB+Gw==", - "license": "Apache-2.0", + "node_modules/@babel/helper-compilation-targets": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz", + "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" } }, - "node_modules/@smithy/chunked-blob-reader-native": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@smithy/chunked-blob-reader-native/-/chunked-blob-reader-native-4.0.0.tgz", - "integrity": "sha512-R9wM2yPmfEMsUmlMlIgSzOyICs0x9uu7UTHoccMyt7BWw8shcGM8HqB355+BZCPBcySvbTYMs62EgEQkNxz2ig==", - "license": "Apache-2.0", - "dependencies": { - "@smithy/util-base64": "^4.0.0", - "tslib": "^2.6.2" - }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" } }, - "node_modules/@smithy/config-resolver": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/@smithy/config-resolver/-/config-resolver-4.1.0.tgz", - "integrity": "sha512-8smPlwhga22pwl23fM5ew4T9vfLUCeFXlcqNOCD5M5h8VmNPNUE9j6bQSuRXpDSV11L/E/SwEBQuW8hr6+nS1A==", - "license": "Apache-2.0", + "node_modules/@babel/helper-module-imports": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/node-config-provider": "^4.0.2", - "@smithy/types": "^4.2.0", - "@smithy/util-config-provider": "^4.0.0", - "@smithy/util-middleware": "^4.0.2", - "tslib": "^2.6.2" + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" } }, - "node_modules/@smithy/config-resolver/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz", + "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.28.3" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, - "node_modules/@smithy/core": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/@smithy/core/-/core-3.2.0.tgz", - "integrity": "sha512-k17bgQhVZ7YmUvA8at4af1TDpl0NDMBuBKJl8Yg0nrefwmValU+CnA5l/AriVdQNthU/33H3nK71HrLgqOPr1Q==", - "license": "Apache-2.0", - "dependencies": { - "@smithy/middleware-serde": "^4.0.3", - "@smithy/protocol-http": "^5.1.0", - "@smithy/types": "^4.2.0", - "@smithy/util-body-length-browser": "^4.0.0", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-stream": "^4.2.0", - "@smithy/util-utf8": "^4.0.0", - "tslib": "^2.6.2" - }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz", + "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", + "dev": true, + "license": "MIT", "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" } }, - "node_modules/@smithy/core/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", - "dependencies": { - "tslib": "^2.6.2" - }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" } }, - "node_modules/@smithy/credential-provider-imds": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/credential-provider-imds/-/credential-provider-imds-4.0.2.tgz", - "integrity": "sha512-32lVig6jCaWBHnY+OEQ6e6Vnt5vDHaLiydGrwYMW9tPqO688hPGTYRamYJ1EptxEC2rAwJrHWmPoKRBl4iTa8w==", - "license": "Apache-2.0", - "dependencies": { - "@smithy/node-config-provider": "^4.0.2", - "@smithy/property-provider": "^4.0.2", - "@smithy/types": "^4.2.0", - "@smithy/url-parser": "^4.0.2", - "tslib": "^2.6.2" - }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" } }, - "node_modules/@smithy/credential-provider-imds/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", - "dependencies": { - "tslib": "^2.6.2" - }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" } }, - "node_modules/@smithy/eventstream-codec": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/eventstream-codec/-/eventstream-codec-4.0.2.tgz", - "integrity": "sha512-p+f2kLSK7ZrXVfskU/f5dzksKTewZk8pJLPvER3aFHPt76C2MxD9vNatSfLzzQSQB4FNO96RK4PSXfhD1TTeMQ==", - "license": "Apache-2.0", + "node_modules/@babel/helpers": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz", + "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==", + "dev": true, + "license": "MIT", "dependencies": { - "@aws-crypto/crc32": "5.2.0", - "@smithy/types": "^4.2.0", - "@smithy/util-hex-encoding": "^4.0.0", - "tslib": "^2.6.2" + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.4" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" } }, - "node_modules/@smithy/eventstream-codec/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@babel/parser": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz", + "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/types": "^7.28.5" + }, + "bin": { + "parser": "bin/babel-parser.js" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.0.0" } }, - "node_modules/@smithy/eventstream-serde-browser": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-browser/-/eventstream-serde-browser-4.0.2.tgz", - "integrity": "sha512-CepZCDs2xgVUtH7ZZ7oDdZFH8e6Y2zOv8iiX6RhndH69nlojCALSKK+OXwZUgOtUZEUaZ5e1hULVCHYbCn7pug==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/eventstream-serde-universal": "^4.0.2", - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.8.0" }, - "engines": { - "node": ">=18.0.0" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/eventstream-serde-browser/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.8.0" }, - "engines": { - "node": ">=18.0.0" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/eventstream-serde-config-resolver": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-config-resolver/-/eventstream-serde-config-resolver-4.1.0.tgz", - "integrity": "sha512-1PI+WPZ5TWXrfj3CIoKyUycYynYJgZjuQo8U+sphneOtjsgrttYybdqESFReQrdWJ+LKt6NEdbYzmmfDBmjX2A==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.12.13" }, - "engines": { - "node": ">=18.0.0" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/eventstream-serde-config-resolver/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.14.5" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/eventstream-serde-node": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-node/-/eventstream-serde-node-4.0.2.tgz", - "integrity": "sha512-C5bJ/C6x9ENPMx2cFOirspnF9ZsBVnBMtP6BdPl/qYSuUawdGQ34Lq0dMcf42QTjUZgWGbUIZnz6+zLxJlb9aw==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz", + "integrity": "sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/eventstream-serde-universal": "^4.0.2", - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/eventstream-serde-node/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.10.4" }, - "engines": { - "node": ">=18.0.0" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/eventstream-serde-universal": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-universal/-/eventstream-serde-universal-4.0.2.tgz", - "integrity": "sha512-St8h9JqzvnbB52FtckiHPN4U/cnXcarMniXRXTKn0r4b4XesZOGiAyUdj1aXbqqn1icSqBlzzUsCl6nPB018ng==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/eventstream-codec": "^4.0.2", - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.8.0" }, - "engines": { - "node": ">=18.0.0" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/eventstream-serde-universal/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz", + "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/fetch-http-handler": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/@smithy/fetch-http-handler/-/fetch-http-handler-5.0.2.tgz", - "integrity": "sha512-+9Dz8sakS9pe7f2cBocpJXdeVjMopUDLgZs1yWeu7h++WqSbjUYv/JAJwKwXw1HV6gq1jyWjxuyn24E2GhoEcQ==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/protocol-http": "^5.1.0", - "@smithy/querystring-builder": "^4.0.2", - "@smithy/types": "^4.2.0", - "@smithy/util-base64": "^4.0.0", - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.10.4" }, - "engines": { - "node": ">=18.0.0" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/fetch-http-handler/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.8.0" }, - "engines": { - "node": ">=18.0.0" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/hash-blob-browser": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/hash-blob-browser/-/hash-blob-browser-4.0.2.tgz", - "integrity": "sha512-3g188Z3DyhtzfBRxpZjU8R9PpOQuYsbNnyStc/ZVS+9nVX1f6XeNOa9IrAh35HwwIZg+XWk8bFVtNINVscBP+g==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/chunked-blob-reader": "^5.0.0", - "@smithy/chunked-blob-reader-native": "^4.0.0", - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.10.4" }, - "engines": { - "node": ">=18.0.0" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/hash-blob-browser/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.8.0" }, - "engines": { - "node": ">=18.0.0" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/hash-node": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/hash-node/-/hash-node-4.0.2.tgz", - "integrity": "sha512-VnTpYPnRUE7yVhWozFdlxcYknv9UN7CeOqSrMH+V877v4oqtVYuoqhIhtSjmGPvYrYnAkaM61sLMKHvxL138yg==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/types": "^4.2.0", - "@smithy/util-buffer-from": "^4.0.0", - "@smithy/util-utf8": "^4.0.0", - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.8.0" }, - "engines": { - "node": ">=18.0.0" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/hash-node/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.8.0" }, - "engines": { - "node": ">=18.0.0" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/hash-stream-node": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/hash-stream-node/-/hash-stream-node-4.0.2.tgz", - "integrity": "sha512-POWDuTznzbIwlEXEvvXoPMS10y0WKXK790soe57tFRfvf4zBHyzE529HpZMqmDdwG9MfFflnyzndUQ8j78ZdSg==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/types": "^4.2.0", - "@smithy/util-utf8": "^4.0.0", - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.14.5" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/hash-stream-node/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.14.5" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/invalid-dependency": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/invalid-dependency/-/invalid-dependency-4.0.2.tgz", - "integrity": "sha512-GatB4+2DTpgWPday+mnUkoumP54u/MDM/5u44KF9hIu8jF0uafZtQLcdfIKkIcUNuF/fBojpLEHZS/56JqPeXQ==", - "license": "Apache-2.0", + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz", + "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@smithy/invalid-dependency/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" } }, - "node_modules/@smithy/is-array-buffer": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@smithy/is-array-buffer/-/is-array-buffer-4.0.0.tgz", - "integrity": "sha512-saYhF8ZZNoJDTvJBEWgeBccCg+yvp1CX+ed12yORU3NilJScfc6gfch2oVb4QgxZrGUx3/ZJlb+c/dJbyupxlw==", - "license": "Apache-2.0", + "node_modules/@babel/traverse": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz", + "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.5", + "debug": "^4.3.1" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" } }, - "node_modules/@smithy/md5-js": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/md5-js/-/md5-js-4.0.2.tgz", - "integrity": "sha512-Hc0R8EiuVunUewCse2syVgA2AfSRco3LyAv07B/zCOMa+jpXI9ll+Q21Nc6FAlYPcpNcAXqBzMhNs1CD/pP2bA==", - "license": "Apache-2.0", + "node_modules/@babel/types": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz", + "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/types": "^4.2.0", - "@smithy/util-utf8": "^4.0.0", - "tslib": "^2.6.2" + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" }, "engines": { - "node": ">=18.0.0" + "node": ">=6.9.0" } }, - "node_modules/@smithy/md5-js/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "license": "ISC", "dependencies": { - "tslib": "^2.6.2" + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" }, "engines": { - "node": ">=18.0.0" + "node": ">=8" } }, - "node_modules/@smithy/middleware-content-length": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/middleware-content-length/-/middleware-content-length-4.0.2.tgz", - "integrity": "sha512-hAfEXm1zU+ELvucxqQ7I8SszwQ4znWMbNv6PLMndN83JJN41EPuS93AIyh2N+gJ6x8QFhzSO6b7q2e6oClDI8A==", - "license": "Apache-2.0", - "dependencies": { - "@smithy/protocol-http": "^5.1.0", - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" - }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "license": "MIT", "engines": { - "node": ">=18.0.0" + "node": ">=8" } }, - "node_modules/@smithy/middleware-content-length/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/console": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", + "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/middleware-endpoint": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/@smithy/middleware-endpoint/-/middleware-endpoint-4.1.0.tgz", - "integrity": "sha512-xhLimgNCbCzsUppRTGXWkZywksuTThxaIB0HwbpsVLY5sceac4e1TZ/WKYqufQLaUy+gUSJGNdwD2jo3cXL0iA==", - "license": "Apache-2.0", - "dependencies": { - "@smithy/core": "^3.2.0", - "@smithy/middleware-serde": "^4.0.3", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/shared-ini-file-loader": "^4.0.2", - "@smithy/types": "^4.2.0", - "@smithy/url-parser": "^4.0.2", - "@smithy/util-middleware": "^4.0.2", - "tslib": "^2.6.2" - }, + "node_modules/@jest/console/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", "engines": { - "node": ">=18.0.0" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "node_modules/@smithy/middleware-endpoint/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/console/node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/middleware-retry": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/@smithy/middleware-retry/-/middleware-retry-4.1.0.tgz", - "integrity": "sha512-2zAagd1s6hAaI/ap6SXi5T3dDwBOczOMCSkkYzktqN1+tzbk1GAsHNAdo/1uzxz3Ky02jvZQwbi/vmDA6z4Oyg==", - "license": "Apache-2.0", + "node_modules/@jest/console/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/node-config-provider": "^4.0.2", - "@smithy/protocol-http": "^5.1.0", - "@smithy/service-error-classification": "^4.0.2", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-retry": "^4.0.2", - "tslib": "^2.6.2", - "uuid": "^9.0.1" + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/middleware-retry/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/console/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/middleware-serde": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/@smithy/middleware-serde/-/middleware-serde-4.0.3.tgz", - "integrity": "sha512-rfgDVrgLEVMmMn0BI8O+8OVr6vXzjV7HZj57l0QxslhzbvVfikZbVfBVthjLHqib4BW44QhcIgJpvebHlRaC9A==", - "license": "Apache-2.0", + "node_modules/@jest/core": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", + "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@jest/console": "^29.7.0", + "@jest/reporters": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^29.7.0", + "jest-config": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-resolve-dependencies": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "jest-watcher": "^29.7.0", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/core/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", "engines": { - "node": ">=18.0.0" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "node_modules/@smithy/middleware-serde/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/core/node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/middleware-stack": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/middleware-stack/-/middleware-stack-4.0.2.tgz", - "integrity": "sha512-eSPVcuJJGVYrFYu2hEq8g8WWdJav3sdrI4o2c6z/rjnYDd3xH9j9E7deZQCzFn4QvGPouLngH3dQ+QVTxv5bOQ==", - "license": "Apache-2.0", + "node_modules/@jest/core/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/middleware-stack/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/core/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/node-config-provider": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/node-config-provider/-/node-config-provider-4.0.2.tgz", - "integrity": "sha512-WgCkILRZfJwJ4Da92a6t3ozN/zcvYyJGUTmfGbgS/FkCcoCjl7G4FJaCDN1ySdvLvemnQeo25FdkyMSTSwulsw==", - "license": "Apache-2.0", - "dependencies": { - "@smithy/property-provider": "^4.0.2", - "@smithy/shared-ini-file-loader": "^4.0.2", - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" - }, + "node_modules/@jest/diff-sequences": { + "version": "30.0.1", + "resolved": "https://registry.npmjs.org/@jest/diff-sequences/-/diff-sequences-30.0.1.tgz", + "integrity": "sha512-n5H8QLDJ47QqbCNn5SuFjCRDrOLEZ0h8vAHCK5RL9Ls7Xa8AQLa/YxAc9UjFqoEDM48muwtBGjtMY5cr0PLDCw==", + "dev": true, + "license": "MIT", "engines": { - "node": ">=18.0.0" + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" } }, - "node_modules/@smithy/node-config-provider/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/environment": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", + "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/node-http-handler": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/@smithy/node-http-handler/-/node-http-handler-4.0.4.tgz", - "integrity": "sha512-/mdqabuAT3o/ihBGjL94PUbTSPSRJ0eeVTdgADzow0wRJ0rN4A27EOrtlK56MYiO1fDvlO3jVTCxQtQmK9dZ1g==", - "license": "Apache-2.0", + "node_modules/@jest/environment/node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/abort-controller": "^4.0.2", - "@smithy/protocol-http": "^5.1.0", - "@smithy/querystring-builder": "^4.0.2", - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/node-http-handler/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/environment/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/property-provider": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/property-provider/-/property-provider-4.0.2.tgz", - "integrity": "sha512-wNRoQC1uISOuNc2s4hkOYwYllmiyrvVXWMtq+TysNRVQaHm4yoafYQyjN/goYZS+QbYlPIbb/QRjaUZMuzwQ7A==", - "license": "Apache-2.0", + "node_modules/@jest/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "expect": "^29.7.0", + "jest-snapshot": "^29.7.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/property-provider/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/expect-utils": { + "version": "30.2.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-30.2.0.tgz", + "integrity": "sha512-1JnRfhqpD8HGpOmQp180Fo9Zt69zNtC+9lR+kT7NVL05tNXIi+QC8Csz7lfidMoVLPD3FnOtcmp0CEFnxExGEA==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@jest/get-type": "30.1.0" }, "engines": { - "node": ">=18.0.0" + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" } }, - "node_modules/@smithy/protocol-http": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/@smithy/protocol-http/-/protocol-http-5.1.0.tgz", - "integrity": "sha512-KxAOL1nUNw2JTYrtviRRjEnykIDhxc84qMBzxvu1MUfQfHTuBlCG7PA6EdVwqpJjH7glw7FqQoFxUJSyBQgu7g==", - "license": "Apache-2.0", + "node_modules/@jest/expect/node_modules/@jest/expect-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "jest-get-type": "^29.6.3" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/protocol-http/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/expect/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@jest/expect/node_modules/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/querystring-builder": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/querystring-builder/-/querystring-builder-4.0.2.tgz", - "integrity": "sha512-NTOs0FwHw1vimmQM4ebh+wFQvOwkEf/kQL6bSM1Lock+Bv4I89B3hGYoUEPkmvYPkDKyp5UdXJYu+PoTQ3T31Q==", - "license": "Apache-2.0", + "node_modules/@jest/expect/node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/types": "^4.2.0", - "@smithy/util-uri-escape": "^4.0.0", - "tslib": "^2.6.2" + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/querystring-builder/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/expect/node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/querystring-parser": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/querystring-parser/-/querystring-parser-4.0.2.tgz", - "integrity": "sha512-v6w8wnmZcVXjfVLjxw8qF7OwESD9wnpjp0Dqry/Pod0/5vcEA3qxCr+BhbOHlxS8O+29eLpT3aagxXGwIoEk7Q==", - "license": "Apache-2.0", + "node_modules/@jest/expect/node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/querystring-parser/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/expect/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/service-error-classification": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/service-error-classification/-/service-error-classification-4.0.2.tgz", - "integrity": "sha512-LA86xeFpTKn270Hbkixqs5n73S+LVM0/VZco8dqd+JT75Dyx3Lcw/MraL7ybjmz786+160K8rPOmhsq0SocoJQ==", - "license": "Apache-2.0", + "node_modules/@jest/expect/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/types": "^4.2.0" + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/service-error-classification/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/fake-timers": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", + "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@jest/types": "^29.6.3", + "@sinonjs/fake-timers": "^10.0.2", + "@types/node": "*", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/shared-ini-file-loader": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/shared-ini-file-loader/-/shared-ini-file-loader-4.0.2.tgz", - "integrity": "sha512-J9/gTWBGVuFZ01oVA6vdb4DAjf1XbDhK6sLsu3OS9qmLrS6KB5ygpeHiM3miIbj1qgSJ96GYszXFWv6ErJ8QEw==", - "license": "Apache-2.0", + "node_modules/@jest/fake-timers/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@jest/fake-timers/node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/shared-ini-file-loader/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/fake-timers/node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/signature-v4": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/@smithy/signature-v4/-/signature-v4-5.0.2.tgz", - "integrity": "sha512-Mz+mc7okA73Lyz8zQKJNyr7lIcHLiPYp0+oiqiMNc/t7/Kf2BENs5d63pEj7oPqdjaum6g0Fc8wC78dY1TgtXw==", - "license": "Apache-2.0", + "node_modules/@jest/fake-timers/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/is-array-buffer": "^4.0.0", - "@smithy/protocol-http": "^5.1.0", - "@smithy/types": "^4.2.0", - "@smithy/util-hex-encoding": "^4.0.0", - "@smithy/util-middleware": "^4.0.2", - "@smithy/util-uri-escape": "^4.0.0", - "@smithy/util-utf8": "^4.0.0", - "tslib": "^2.6.2" + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/signature-v4/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/fake-timers/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/smithy-client": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/smithy-client/-/smithy-client-4.2.0.tgz", - "integrity": "sha512-Qs65/w30pWV7LSFAez9DKy0Koaoh3iHhpcpCCJ4waj/iqwsuSzJna2+vYwq46yBaqO5ZbP9TjUsATUNxrKeBdw==", - "license": "Apache-2.0", + "node_modules/@jest/get-type": { + "version": "30.1.0", + "resolved": "https://registry.npmjs.org/@jest/get-type/-/get-type-30.1.0.tgz", + "integrity": "sha512-eMbZE2hUnx1WV0pmURZY9XoXPkUYjpc55mb0CrhtdWLtzMQPFvu/rZkTLZFTsdaVQa+Tr4eWAteqcUzoawq/uA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/globals": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", + "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/core": "^3.2.0", - "@smithy/middleware-endpoint": "^4.1.0", - "@smithy/middleware-stack": "^4.0.2", - "@smithy/protocol-http": "^5.1.0", - "@smithy/types": "^4.2.0", - "@smithy/util-stream": "^4.2.0", - "tslib": "^2.6.2" + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/types": "^29.6.3", + "jest-mock": "^29.7.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/smithy-client/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@jest/globals/node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" }, "engines": { - "node": ">=18.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/types": { - "version": "3.7.2", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-3.7.2.tgz", - "integrity": "sha512-bNwBYYmN8Eh9RyjS1p2gW6MIhSO2rl7X9QeLM8iTdcGRP+eDiIWDt66c9IysCc22gefKszZv+ubV9qZc7hdESg==", - "license": "Apache-2.0", + "node_modules/@jest/globals/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", "dependencies": { - "tslib": "^2.6.2" + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" }, "engines": { - "node": ">=16.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@smithy/url-parser": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/url-parser/-/url-parser-4.0.2.tgz", - "integrity": "sha512-Bm8n3j2ScqnT+kJaClSVCMeiSenK6jVAzZCNewsYWuZtnBehEz4r2qP0riZySZVfzB+03XZHJeqfmJDkeeSLiQ==", - "license": "Apache-2.0", + "node_modules/@jest/pattern": { + "version": "30.0.1", + "resolved": "https://registry.npmjs.org/@jest/pattern/-/pattern-30.0.1.tgz", + "integrity": "sha512-gWp7NfQW27LaBQz3TITS8L7ZCQ0TLvtmI//4OwlQRx4rnWxcPNIYjxZpDcN4+UlGxgm3jS5QPz8IPTCkb59wZA==", + "dev": true, + "license": "MIT", "dependencies": { - "@smithy/querystring-parser": "^4.0.2", - "@smithy/types": "^4.2.0", - "tslib": "^2.6.2" + "@types/node": "*", + "jest-regex-util": "30.0.1" }, "engines": { - "node": ">=18.0.0" + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/pattern/node_modules/jest-regex-util": { + "version": "30.0.1", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-30.0.1.tgz", + "integrity": "sha512-jHEQgBXAgc+Gh4g0p3bCevgRCVRkB4VB70zhoAE48gxeSr1hfUOsM/C2WoJgVL7Eyg//hudYENbm3Ne+/dRVVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/reporters": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", + "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^6.0.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "slash": "^3.0.0", + "string-length": "^4.0.1", + "strip-ansi": "^6.0.0", + "v8-to-istanbul": "^9.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/reporters/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@jest/reporters/node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/reporters/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/reporters/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/source-map": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", + "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-result": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", + "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-sequencer": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", + "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/test-result": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", + "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@sinonjs/commons": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz", + "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "type-detect": "4.0.8" + } + }, + "node_modules/@sinonjs/fake-timers": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", + "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@sinonjs/commons": "^3.0.0" } }, - "node_modules/@smithy/url-parser/node_modules/@smithy/types": { + "node_modules/@smithy/abort-controller": { "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "resolved": "https://registry.npmjs.org/@smithy/abort-controller/-/abort-controller-4.2.0.tgz", + "integrity": "sha512-PLUYa+SUKOEZtXFURBu/CNxlsxfaFGxSBPcStL13KpVeVWIfdezWyDqkz7iDLmwnxojXD0s5KzuB5HGHvt4Aeg==", "dependencies": { + "@smithy/types": "^4.6.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-base64": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@smithy/util-base64/-/util-base64-4.0.0.tgz", - "integrity": "sha512-CvHfCmO2mchox9kjrtzoHkWHxjHZzaFojLc8quxXY7WAAMAg43nuxwv95tATVgQFNDwd4M9S1qFzj40Ul41Kmg==", - "license": "Apache-2.0", + "node_modules/@smithy/chunked-blob-reader": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@smithy/chunked-blob-reader/-/chunked-blob-reader-5.1.0.tgz", + "integrity": "sha512-a36AtR7Q7XOhRPt6F/7HENmTWcB8kN7mDJcOFM/+FuKO6x88w8MQJfYCufMWh4fGyVkPjUh3Rrz/dnqFQdo6OQ==", "dependencies": { - "@smithy/util-buffer-from": "^4.0.0", - "@smithy/util-utf8": "^4.0.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-body-length-browser": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@smithy/util-body-length-browser/-/util-body-length-browser-4.0.0.tgz", - "integrity": "sha512-sNi3DL0/k64/LO3A256M+m3CDdG6V7WKWHdAiBBMUN8S3hK3aMPhwnPik2A/a2ONN+9doY9UxaLfgqsIRg69QA==", - "license": "Apache-2.0", + "node_modules/@smithy/chunked-blob-reader-native": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@smithy/chunked-blob-reader-native/-/chunked-blob-reader-native-4.1.0.tgz", + "integrity": "sha512-Bnv0B3nSlfB2mPO0WgM49I/prl7+kamF042rrf3ezJ3Z4C7csPYvyYgZfXTGXwXfj1mAwDWjE/ybIf49PzFzvA==", "dependencies": { + "@smithy/util-base64": "^4.1.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-body-length-node": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@smithy/util-body-length-node/-/util-body-length-node-4.0.0.tgz", - "integrity": "sha512-q0iDP3VsZzqJyje8xJWEJCNIu3lktUGVoSy1KB0UWym2CL1siV3artm+u1DFYTLejpsrdGyCSWBdGNjJzfDPjg==", - "license": "Apache-2.0", + "node_modules/@smithy/config-resolver": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@smithy/config-resolver/-/config-resolver-4.2.1.tgz", + "integrity": "sha512-FXil8q4QN7mgKwU2hCLm0ltab8NyY/1RiqEf25Jnf6WLS3wmb11zGAoLETqg1nur2Aoibun4w4MjeN9CMJ4G6A==", "dependencies": { + "@smithy/node-config-provider": "^4.2.1", + "@smithy/types": "^4.5.0", + "@smithy/util-config-provider": "^4.1.0", + "@smithy/util-middleware": "^4.1.1", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-buffer-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@smithy/util-buffer-from/-/util-buffer-from-4.0.0.tgz", - "integrity": "sha512-9TOQ7781sZvddgO8nxueKi3+yGvkY35kotA0Y6BWRajAv8jjmigQ1sBwz0UX47pQMYXJPahSKEKYFgt+rXdcug==", - "license": "Apache-2.0", + "node_modules/@smithy/core": { + "version": "3.14.0", + "resolved": "https://registry.npmjs.org/@smithy/core/-/core-3.14.0.tgz", + "integrity": "sha512-XJ4z5FxvY/t0Dibms/+gLJrI5niRoY0BCmE02fwmPcRYFPI4KI876xaE79YGWIKnEslMbuQPsIEsoU/DXa0DoA==", "dependencies": { - "@smithy/is-array-buffer": "^4.0.0", + "@smithy/middleware-serde": "^4.2.0", + "@smithy/protocol-http": "^5.3.0", + "@smithy/types": "^4.6.0", + "@smithy/util-base64": "^4.2.0", + "@smithy/util-body-length-browser": "^4.2.0", + "@smithy/util-middleware": "^4.2.0", + "@smithy/util-stream": "^4.4.0", + "@smithy/util-utf8": "^4.2.0", + "@smithy/uuid": "^1.1.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-config-provider": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@smithy/util-config-provider/-/util-config-provider-4.0.0.tgz", - "integrity": "sha512-L1RBVzLyfE8OXH+1hsJ8p+acNUSirQnWQ6/EgpchV88G6zGBTDPdXiiExei6Z1wR2RxYvxY/XLw6AMNCCt8H3w==", - "license": "Apache-2.0", + "node_modules/@smithy/credential-provider-imds": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/credential-provider-imds/-/credential-provider-imds-4.1.1.tgz", + "integrity": "sha512-1WdBfM9DwA59pnpIizxnUvBf/de18p4GP+6zP2AqrlFzoW3ERpZaT4QueBR0nS9deDMaQRkBlngpVlnkuuTisQ==", "dependencies": { + "@smithy/node-config-provider": "^4.2.1", + "@smithy/property-provider": "^4.1.1", + "@smithy/types": "^4.5.0", + "@smithy/url-parser": "^4.1.1", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-defaults-mode-browser": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-4.0.8.tgz", - "integrity": "sha512-ZTypzBra+lI/LfTYZeop9UjoJhhGRTg3pxrNpfSTQLd3AJ37r2z4AXTKpq1rFXiiUIJsYyFgNJdjWRGP/cbBaQ==", - "license": "Apache-2.0", + "node_modules/@smithy/eventstream-codec": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-codec/-/eventstream-codec-4.1.1.tgz", + "integrity": "sha512-PwkQw1hZwHTQB6X5hSUWz2OSeuj5Z6enWuAqke7DgWoP3t6vg3ktPpqPz3Erkn6w+tmsl8Oss6nrgyezoea2Iw==", "dependencies": { - "@smithy/property-provider": "^4.0.2", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", - "bowser": "^2.11.0", + "@aws-crypto/crc32": "5.2.0", + "@smithy/types": "^4.5.0", + "@smithy/util-hex-encoding": "^4.1.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-defaults-mode-browser/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@smithy/eventstream-serde-browser": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-browser/-/eventstream-serde-browser-4.1.1.tgz", + "integrity": "sha512-Q9QWdAzRaIuVkefupRPRFAasaG/droBqn1feiMnmLa+LLEUG45pqX1+FurHFmlqiCfobB3nUlgoJfeXZsr7MPA==", "dependencies": { + "@smithy/eventstream-serde-universal": "^4.1.1", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-defaults-mode-node": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-4.0.8.tgz", - "integrity": "sha512-Rgk0Jc/UDfRTzVthye/k2dDsz5Xxs9LZaKCNPgJTRyoyBoeiNCnHsYGOyu1PKN+sDyPnJzMOz22JbwxzBp9NNA==", - "license": "Apache-2.0", + "node_modules/@smithy/eventstream-serde-config-resolver": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-config-resolver/-/eventstream-serde-config-resolver-4.2.1.tgz", + "integrity": "sha512-oSUkF9zDN9zcOUBMtxp8RewJlh71E9NoHWU8jE3hU9JMYCsmW4assVTpgic/iS3/dM317j6hO5x18cc3XrfvEw==", "dependencies": { - "@smithy/config-resolver": "^4.1.0", - "@smithy/credential-provider-imds": "^4.0.2", - "@smithy/node-config-provider": "^4.0.2", - "@smithy/property-provider": "^4.0.2", - "@smithy/smithy-client": "^4.2.0", - "@smithy/types": "^4.2.0", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-defaults-mode-node/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@smithy/eventstream-serde-node": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-node/-/eventstream-serde-node-4.1.1.tgz", + "integrity": "sha512-tn6vulwf/ScY0vjhzptSJuDJJqlhNtUjkxJ4wiv9E3SPoEqTEKbaq6bfqRO7nvhTG29ALICRcvfFheOUPl8KNA==", "dependencies": { + "@smithy/eventstream-serde-universal": "^4.1.1", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-endpoints": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@smithy/util-endpoints/-/util-endpoints-3.0.2.tgz", - "integrity": "sha512-6QSutU5ZyrpNbnd51zRTL7goojlcnuOB55+F9VBD+j8JpRY50IGamsjlycrmpn8PQkmJucFW8A0LSfXj7jjtLQ==", - "license": "Apache-2.0", + "node_modules/@smithy/eventstream-serde-universal": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-universal/-/eventstream-serde-universal-4.1.1.tgz", + "integrity": "sha512-uLOAiM/Dmgh2CbEXQx+6/ssK7fbzFhd+LjdyFxXid5ZBCbLHTFHLdD/QbXw5aEDsLxQhgzDxLLsZhsftAYwHJA==", "dependencies": { - "@smithy/node-config-provider": "^4.0.2", - "@smithy/types": "^4.2.0", + "@smithy/eventstream-codec": "^4.1.1", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-endpoints/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@smithy/fetch-http-handler": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/@smithy/fetch-http-handler/-/fetch-http-handler-5.3.0.tgz", + "integrity": "sha512-BG3KSmsx9A//KyIfw+sqNmWFr1YBUr+TwpxFT7yPqAk0yyDh7oSNgzfNH7pS6OC099EGx2ltOULvumCFe8bcgw==", "dependencies": { + "@smithy/protocol-http": "^5.3.0", + "@smithy/querystring-builder": "^4.2.0", + "@smithy/types": "^4.6.0", + "@smithy/util-base64": "^4.2.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-hex-encoding": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@smithy/util-hex-encoding/-/util-hex-encoding-4.0.0.tgz", - "integrity": "sha512-Yk5mLhHtfIgW2W2WQZWSg5kuMZCVbvhFmC7rV4IO2QqnZdbEFPmQnCcGMAX2z/8Qj3B9hYYNjZOhWym+RwhePw==", - "license": "Apache-2.0", + "node_modules/@smithy/hash-blob-browser": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/hash-blob-browser/-/hash-blob-browser-4.1.1.tgz", + "integrity": "sha512-avAtk++s1e/1VODf+rg7c9R2pB5G9y8yaJaGY4lPZI2+UIqVyuSDMikWjeWfBVmFZ3O7NpDxBbUCyGhThVUKWQ==", "dependencies": { + "@smithy/chunked-blob-reader": "^5.1.0", + "@smithy/chunked-blob-reader-native": "^4.1.0", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-middleware": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/util-middleware/-/util-middleware-4.0.2.tgz", - "integrity": "sha512-6GDamTGLuBQVAEuQ4yDQ+ti/YINf/MEmIegrEeg7DdB/sld8BX1lqt9RRuIcABOhAGTA50bRbPzErez7SlDtDQ==", - "license": "Apache-2.0", + "node_modules/@smithy/hash-node": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/hash-node/-/hash-node-4.1.1.tgz", + "integrity": "sha512-H9DIU9WBLhYrvPs9v4sYvnZ1PiAI0oc8CgNQUJ1rpN3pP7QADbTOUjchI2FB764Ub0DstH5xbTqcMJu1pnVqxA==", "dependencies": { - "@smithy/types": "^4.2.0", + "@smithy/types": "^4.5.0", + "@smithy/util-buffer-from": "^4.1.0", + "@smithy/util-utf8": "^4.1.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-middleware/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@smithy/hash-stream-node": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/hash-stream-node/-/hash-stream-node-4.1.1.tgz", + "integrity": "sha512-3ztT4pV0Moazs3JAYFdfKk11kYFDo4b/3R3+xVjIm6wY9YpJf+xfz+ocEnNKcWAdcmSMqi168i2EMaKmJHbJMA==", "dependencies": { + "@smithy/types": "^4.5.0", + "@smithy/util-utf8": "^4.1.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-retry": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@smithy/util-retry/-/util-retry-4.0.2.tgz", - "integrity": "sha512-Qryc+QG+7BCpvjloFLQrmlSd0RsVRHejRXd78jNO3+oREueCjwG1CCEH1vduw/ZkM1U9TztwIKVIi3+8MJScGg==", - "license": "Apache-2.0", + "node_modules/@smithy/invalid-dependency": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/invalid-dependency/-/invalid-dependency-4.1.1.tgz", + "integrity": "sha512-1AqLyFlfrrDkyES8uhINRlJXmHA2FkG+3DY8X+rmLSqmFwk3DJnvhyGzyByPyewh2jbmV+TYQBEfngQax8IFGg==", "dependencies": { - "@smithy/service-error-classification": "^4.0.2", - "@smithy/types": "^4.2.0", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-retry/node_modules/@smithy/types": { + "node_modules/@smithy/is-array-buffer": { "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "resolved": "https://registry.npmjs.org/@smithy/is-array-buffer/-/is-array-buffer-4.2.0.tgz", + "integrity": "sha512-DZZZBvC7sjcYh4MazJSGiWMI2L7E0oCiRHREDzIxi/M2LY79/21iXt6aPLHge82wi5LsuRF5A06Ds3+0mlh6CQ==", "dependencies": { "tslib": "^2.6.2" }, @@ -2880,139 +3616,3916 @@ "node": ">=18.0.0" } }, - "node_modules/@smithy/util-stream": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/util-stream/-/util-stream-4.2.0.tgz", - "integrity": "sha512-Vj1TtwWnuWqdgQI6YTUF5hQ/0jmFiOYsc51CSMgj7QfyO+RF4EnT2HNjoviNlOOmgzgvf3f5yno+EiC4vrnaWQ==", - "license": "Apache-2.0", + "node_modules/@smithy/md5-js": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/md5-js/-/md5-js-4.1.1.tgz", + "integrity": "sha512-MvWXKK743BuHjr/hnWuT6uStdKEaoqxHAQUvbKJPPZM5ZojTNFI5D+47BoQfBE5RgGlRRty05EbWA+NXDv+hIA==", "dependencies": { - "@smithy/fetch-http-handler": "^5.0.2", - "@smithy/node-http-handler": "^4.0.4", - "@smithy/types": "^4.2.0", - "@smithy/util-base64": "^4.0.0", - "@smithy/util-buffer-from": "^4.0.0", - "@smithy/util-hex-encoding": "^4.0.0", - "@smithy/util-utf8": "^4.0.0", + "@smithy/types": "^4.5.0", + "@smithy/util-utf8": "^4.1.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-stream/node_modules/@smithy/types": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "node_modules/@smithy/middleware-content-length": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/middleware-content-length/-/middleware-content-length-4.1.1.tgz", + "integrity": "sha512-9wlfBBgTsRvC2JxLJxv4xDGNBrZuio3AgSl0lSFX7fneW2cGskXTYpFxCdRYD2+5yzmsiTuaAJD1Wp7gWt9y9w==", "dependencies": { + "@smithy/protocol-http": "^5.2.1", + "@smithy/types": "^4.5.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-uri-escape": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@smithy/util-uri-escape/-/util-uri-escape-4.0.0.tgz", - "integrity": "sha512-77yfbCbQMtgtTylO9itEAdpPXSog3ZxMe09AEhm0dU0NLTalV70ghDZFR+Nfi1C60jnJoh/Re4090/DuZh2Omg==", - "license": "Apache-2.0", + "node_modules/@smithy/middleware-endpoint": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/@smithy/middleware-endpoint/-/middleware-endpoint-4.3.0.tgz", + "integrity": "sha512-jFVjuQeV8TkxaRlcCNg0GFVgg98tscsmIrIwRFeC74TIUyLE3jmY9xgc1WXrPQYRjQNK3aRoaIk6fhFRGOIoGw==", "dependencies": { + "@smithy/core": "^3.14.0", + "@smithy/middleware-serde": "^4.2.0", + "@smithy/node-config-provider": "^4.3.0", + "@smithy/shared-ini-file-loader": "^4.3.0", + "@smithy/types": "^4.6.0", + "@smithy/url-parser": "^4.2.0", + "@smithy/util-middleware": "^4.2.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-utf8": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@smithy/util-utf8/-/util-utf8-4.0.0.tgz", - "integrity": "sha512-b+zebfKCfRdgNJDknHCob3O7FpeYQN6ZG6YLExMcasDHsCXlsXCEuiPZeLnJLpwa5dvPetGlnGCiMHuLwGvFow==", - "license": "Apache-2.0", - "dependencies": { - "@smithy/util-buffer-from": "^4.0.0", - "tslib": "^2.6.2" + "node_modules/@smithy/middleware-retry": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@smithy/middleware-retry/-/middleware-retry-4.2.1.tgz", + "integrity": "sha512-JzfvjwSJXWRl7LkLgIRTUTd2Wj639yr3sQGpViGNEOjtb0AkAuYqRAHs+jSOI/LPC0ZTjmFVVtfrCICMuebexw==", + "dependencies": { + "@smithy/node-config-provider": "^4.2.1", + "@smithy/protocol-http": "^5.2.1", + "@smithy/service-error-classification": "^4.1.1", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "@smithy/util-middleware": "^4.1.1", + "@smithy/util-retry": "^4.1.1", + "@types/uuid": "^9.0.1", + "tslib": "^2.6.2", + "uuid": "^9.0.1" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-waiter": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/@smithy/util-waiter/-/util-waiter-4.0.3.tgz", - "integrity": "sha512-JtaY3FxmD+te+KSI2FJuEcfNC9T/DGGVf551babM7fAaXhjJUt7oSYurH1Devxd2+BOSUACCgt3buinx4UnmEA==", - "license": "Apache-2.0", + "node_modules/@smithy/middleware-serde": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@smithy/middleware-serde/-/middleware-serde-4.2.0.tgz", + "integrity": "sha512-rpTQ7D65/EAbC6VydXlxjvbifTf4IH+sADKg6JmAvhkflJO2NvDeyU9qsWUNBelJiQFcXKejUHWRSdmpJmEmiw==", "dependencies": { - "@smithy/abort-controller": "^4.0.2", - "@smithy/types": "^4.2.0", + "@smithy/protocol-http": "^5.3.0", + "@smithy/types": "^4.6.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@smithy/util-waiter/node_modules/@smithy/types": { + "node_modules/@smithy/middleware-stack": { "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.2.0.tgz", - "integrity": "sha512-7eMk09zQKCO+E/ivsjQv+fDlOupcFUCSC/L2YUPgwhvowVGWbPQHjEFcmjt7QQ4ra5lyowS92SV53Zc6XD4+fg==", - "license": "Apache-2.0", + "resolved": "https://registry.npmjs.org/@smithy/middleware-stack/-/middleware-stack-4.2.0.tgz", + "integrity": "sha512-G5CJ//eqRd9OARrQu9MK1H8fNm2sMtqFh6j8/rPozhEL+Dokpvi1Og+aCixTuwDAGZUkJPk6hJT5jchbk/WCyg==", "dependencies": { + "@smithy/types": "^4.6.0", "tslib": "^2.6.2" }, "engines": { "node": ">=18.0.0" } }, - "node_modules/@types/aws-lambda": { - "version": "8.10.148", - "resolved": "https://registry.npmjs.org/@types/aws-lambda/-/aws-lambda-8.10.148.tgz", - "integrity": "sha512-JL+2cfkY9ODQeE06hOxSFNkafjNk4JRBgY837kpoq1GHDttq2U3BA9IzKOWxS4DLjKoymGB4i9uBrlCkjUl1yg==", + "node_modules/@smithy/node-config-provider": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/@smithy/node-config-provider/-/node-config-provider-4.3.0.tgz", + "integrity": "sha512-5QgHNuWdT9j9GwMPPJCKxy2KDxZ3E5l4M3/5TatSZrqYVoEiqQrDfAq8I6KWZw7RZOHtVtCzEPdYz7rHZixwcA==", + "dependencies": { + "@smithy/property-provider": "^4.2.0", + "@smithy/shared-ini-file-loader": "^4.3.0", + "@smithy/types": "^4.6.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/node-http-handler": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/@smithy/node-http-handler/-/node-http-handler-4.3.0.tgz", + "integrity": "sha512-RHZ/uWCmSNZ8cneoWEVsVwMZBKy/8123hEpm57vgGXA3Irf/Ja4v9TVshHK2ML5/IqzAZn0WhINHOP9xl+Qy6Q==", + "dependencies": { + "@smithy/abort-controller": "^4.2.0", + "@smithy/protocol-http": "^5.3.0", + "@smithy/querystring-builder": "^4.2.0", + "@smithy/types": "^4.6.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/property-provider": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@smithy/property-provider/-/property-provider-4.2.0.tgz", + "integrity": "sha512-rV6wFre0BU6n/tx2Ztn5LdvEdNZ2FasQbPQmDOPfV9QQyDmsCkOAB0osQjotRCQg+nSKFmINhyda0D3AnjSBJw==", + "dependencies": { + "@smithy/types": "^4.6.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/protocol-http": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/@smithy/protocol-http/-/protocol-http-5.3.0.tgz", + "integrity": "sha512-6POSYlmDnsLKb7r1D3SVm7RaYW6H1vcNcTWGWrF7s9+2noNYvUsm7E4tz5ZQ9HXPmKn6Hb67pBDRIjrT4w/d7Q==", + "dependencies": { + "@smithy/types": "^4.6.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/querystring-builder": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@smithy/querystring-builder/-/querystring-builder-4.2.0.tgz", + "integrity": "sha512-Q4oFD0ZmI8yJkiPPeGUITZj++4HHYCW3pYBYfIobUCkYpI6mbkzmG1MAQQ3lJYYWj3iNqfzOenUZu+jqdPQ16A==", + "dependencies": { + "@smithy/types": "^4.6.0", + "@smithy/util-uri-escape": "^4.2.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/querystring-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@smithy/querystring-parser/-/querystring-parser-4.2.0.tgz", + "integrity": "sha512-BjATSNNyvVbQxOOlKse0b0pSezTWGMvA87SvoFoFlkRsKXVsN3bEtjCxvsNXJXfnAzlWFPaT9DmhWy1vn0sNEA==", + "dependencies": { + "@smithy/types": "^4.6.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/service-error-classification": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/service-error-classification/-/service-error-classification-4.1.1.tgz", + "integrity": "sha512-Iam75b/JNXyDE41UvrlM6n8DNOa/r1ylFyvgruTUx7h2Uk7vDNV9AAwP1vfL1fOL8ls0xArwEGVcGZVd7IO/Cw==", + "dependencies": { + "@smithy/types": "^4.5.0" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/shared-ini-file-loader": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/@smithy/shared-ini-file-loader/-/shared-ini-file-loader-4.3.0.tgz", + "integrity": "sha512-VCUPPtNs+rKWlqqntX0CbVvWyjhmX30JCtzO+s5dlzzxrvSfRh5SY0yxnkirvc1c80vdKQttahL71a9EsdolSQ==", + "dependencies": { + "@smithy/types": "^4.6.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/signature-v4": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/@smithy/signature-v4/-/signature-v4-5.3.0.tgz", + "integrity": "sha512-MKNyhXEs99xAZaFhm88h+3/V+tCRDQ+PrDzRqL0xdDpq4gjxcMmf5rBA3YXgqZqMZ/XwemZEurCBQMfxZOWq/g==", + "dependencies": { + "@smithy/is-array-buffer": "^4.2.0", + "@smithy/protocol-http": "^5.3.0", + "@smithy/types": "^4.6.0", + "@smithy/util-hex-encoding": "^4.2.0", + "@smithy/util-middleware": "^4.2.0", + "@smithy/util-uri-escape": "^4.2.0", + "@smithy/util-utf8": "^4.2.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/smithy-client": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@smithy/smithy-client/-/smithy-client-4.7.0.tgz", + "integrity": "sha512-3BDx/aCCPf+kkinYf5QQhdQ9UAGihgOVqI3QO5xQfSaIWvUE4KYLtiGRWsNe1SR7ijXC0QEPqofVp5Sb0zC8xQ==", + "dependencies": { + "@smithy/core": "^3.14.0", + "@smithy/middleware-endpoint": "^4.3.0", + "@smithy/middleware-stack": "^4.2.0", + "@smithy/protocol-http": "^5.3.0", + "@smithy/types": "^4.6.0", + "@smithy/util-stream": "^4.4.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/types": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.6.0.tgz", + "integrity": "sha512-4lI9C8NzRPOv66FaY1LL1O/0v0aLVrq/mXP/keUa9mJOApEeae43LsLd2kZRUJw91gxOQfLIrV3OvqPgWz1YsA==", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/url-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@smithy/url-parser/-/url-parser-4.2.0.tgz", + "integrity": "sha512-AlBmD6Idav2ugmoAL6UtR6ItS7jU5h5RNqLMZC7QrLCoITA9NzIN3nx9GWi8g4z1pfWh2r9r96SX/jHiNwPJ9A==", + "dependencies": { + "@smithy/querystring-parser": "^4.2.0", + "@smithy/types": "^4.6.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-base64": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@smithy/util-base64/-/util-base64-4.2.0.tgz", + "integrity": "sha512-+erInz8WDv5KPe7xCsJCp+1WCjSbah9gWcmUXc9NqmhyPx59tf7jqFz+za1tRG1Y5KM1Cy1rWCcGypylFp4mvA==", + "dependencies": { + "@smithy/util-buffer-from": "^4.2.0", + "@smithy/util-utf8": "^4.2.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-body-length-browser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@smithy/util-body-length-browser/-/util-body-length-browser-4.2.0.tgz", + "integrity": "sha512-Fkoh/I76szMKJnBXWPdFkQJl2r9SjPt3cMzLdOB6eJ4Pnpas8hVoWPYemX/peO0yrrvldgCUVJqOAjUrOLjbxg==", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-body-length-node": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@smithy/util-body-length-node/-/util-body-length-node-4.1.0.tgz", + "integrity": "sha512-BOI5dYjheZdgR9XiEM3HJcEMCXSoqbzu7CzIgYrx0UtmvtC3tC2iDGpJLsSRFffUpy8ymsg2ARMP5fR8mtuUQQ==", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-buffer-from": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@smithy/util-buffer-from/-/util-buffer-from-4.2.0.tgz", + "integrity": "sha512-kAY9hTKulTNevM2nlRtxAG2FQ3B2OR6QIrPY3zE5LqJy1oxzmgBGsHLWTcNhWXKchgA0WHW+mZkQrng/pgcCew==", + "dependencies": { + "@smithy/is-array-buffer": "^4.2.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-config-provider": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@smithy/util-config-provider/-/util-config-provider-4.2.0.tgz", + "integrity": "sha512-YEjpl6XJ36FTKmD+kRJJWYvrHeUvm5ykaUS5xK+6oXffQPHeEM4/nXlZPe+Wu0lsgRUcNZiliYNh/y7q9c2y6Q==", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-defaults-mode-browser": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-4.1.1.tgz", + "integrity": "sha512-hA1AKIHFUMa9Tl6q6y8p0pJ9aWHCCG8s57flmIyLE0W7HcJeYrYtnqXDcGnftvXEhdQnSexyegXnzzTGk8bKLA==", + "dependencies": { + "@smithy/property-provider": "^4.1.1", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "bowser": "^2.11.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-defaults-mode-node": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-4.1.1.tgz", + "integrity": "sha512-RGSpmoBrA+5D2WjwtK7tto6Pc2wO9KSXKLpLONhFZ8VyuCbqlLdiDAfuDTNY9AJe4JoE+Cx806cpTQQoQ71zPQ==", + "dependencies": { + "@smithy/config-resolver": "^4.2.1", + "@smithy/credential-provider-imds": "^4.1.1", + "@smithy/node-config-provider": "^4.2.1", + "@smithy/property-provider": "^4.1.1", + "@smithy/smithy-client": "^4.6.1", + "@smithy/types": "^4.5.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-endpoints": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@smithy/util-endpoints/-/util-endpoints-3.1.1.tgz", + "integrity": "sha512-qB4R9kO0SetA11Rzu6MVGFIaGYX3p6SGGGfWwsKnC6nXIf0n/0AKVwRTsYsz9ToN8CeNNtNgQRwKFBndGJZdyw==", + "dependencies": { + "@smithy/node-config-provider": "^4.2.1", + "@smithy/types": "^4.5.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-hex-encoding": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@smithy/util-hex-encoding/-/util-hex-encoding-4.2.0.tgz", + "integrity": "sha512-CCQBwJIvXMLKxVbO88IukazJD9a4kQ9ZN7/UMGBjBcJYvatpWk+9g870El4cB8/EJxfe+k+y0GmR9CAzkF+Nbw==", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-middleware": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@smithy/util-middleware/-/util-middleware-4.2.0.tgz", + "integrity": "sha512-u9OOfDa43MjagtJZ8AapJcmimP+K2Z7szXn8xbty4aza+7P1wjFmy2ewjSbhEiYQoW1unTlOAIV165weYAaowA==", + "dependencies": { + "@smithy/types": "^4.6.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-retry": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/util-retry/-/util-retry-4.1.1.tgz", + "integrity": "sha512-jGeybqEZ/LIordPLMh5bnmnoIgsqnp4IEimmUp5c5voZ8yx+5kAlN5+juyr7p+f7AtZTgvhmInQk4Q0UVbrZ0Q==", + "dependencies": { + "@smithy/service-error-classification": "^4.1.1", + "@smithy/types": "^4.5.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-stream": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@smithy/util-stream/-/util-stream-4.4.0.tgz", + "integrity": "sha512-vtO7ktbixEcrVzMRmpQDnw/Ehr9UWjBvSJ9fyAbadKkC4w5Cm/4lMO8cHz8Ysb8uflvQUNRcuux/oNHKPXkffg==", + "dependencies": { + "@smithy/fetch-http-handler": "^5.3.0", + "@smithy/node-http-handler": "^4.3.0", + "@smithy/types": "^4.6.0", + "@smithy/util-base64": "^4.2.0", + "@smithy/util-buffer-from": "^4.2.0", + "@smithy/util-hex-encoding": "^4.2.0", + "@smithy/util-utf8": "^4.2.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-uri-escape": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@smithy/util-uri-escape/-/util-uri-escape-4.2.0.tgz", + "integrity": "sha512-igZpCKV9+E/Mzrpq6YacdTQ0qTiLm85gD6N/IrmyDvQFA4UnU3d5g3m8tMT/6zG/vVkWSU+VxeUyGonL62DuxA==", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-utf8": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@smithy/util-utf8/-/util-utf8-4.2.0.tgz", + "integrity": "sha512-zBPfuzoI8xyBtR2P6WQj63Rz8i3AmfAaJLuNG8dWsfvPe8lO4aCPYLn879mEgHndZH1zQ2oXmG8O1GGzzaoZiw==", + "dependencies": { + "@smithy/util-buffer-from": "^4.2.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/util-waiter": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/@smithy/util-waiter/-/util-waiter-4.1.1.tgz", + "integrity": "sha512-PJBmyayrlfxM7nbqjomF4YcT1sApQwZio0NHSsT0EzhJqljRmvhzqZua43TyEs80nJk2Cn2FGPg/N8phH6KeCQ==", + "dependencies": { + "@smithy/abort-controller": "^4.1.1", + "@smithy/types": "^4.5.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@smithy/uuid": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@smithy/uuid/-/uuid-1.1.0.tgz", + "integrity": "sha512-4aUIteuyxtBUhVdiQqcDhKFitwfd9hqoSDYY2KRXiWtgoWJ9Bmise+KfEPDiVHWeJepvF8xJO9/9+WDIciMFFw==", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@types/aws-lambda": { + "version": "8.10.148", + "resolved": "https://registry.npmjs.org/@types/aws-lambda/-/aws-lambda-8.10.148.tgz", + "integrity": "sha512-JL+2cfkY9ODQeE06hOxSFNkafjNk4JRBgY837kpoq1GHDttq2U3BA9IzKOWxS4DLjKoymGB4i9uBrlCkjUl1yg==", + "license": "MIT" + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.9", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", + "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/jest": { + "version": "30.0.0", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-30.0.0.tgz", + "integrity": "sha512-XTYugzhuwqWjws0CVz8QpM36+T+Dz5mTEBKhNs/esGLnCIlGdRy+Dq78NRjd7ls7r8BC8ZRMOrKlkO1hU0JOwA==", + "dev": true, + "license": "MIT", + "dependencies": { + "expect": "^30.0.0", + "pretty-format": "^30.0.0" + } + }, + "node_modules/@types/node": { + "version": "24.10.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-24.10.0.tgz", + "integrity": "sha512-qzQZRBqkFsYyaSWXuEHc2WR9c0a0CXwiE5FWUvn7ZM+vdy1uZLfCunD38UzhuB7YN/J11ndbDBcTmOdxJo9Q7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~7.16.0" + } + }, + "node_modules/@types/stack-utils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", + "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/uuid": { + "version": "9.0.8", + "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-9.0.8.tgz", + "integrity": "sha512-jg+97EGIcY9AGHJJRaaPVgetKDsrTgbRjQ5Msgjh/DQKEFl0DtyRr/VCOyD1T2R1MNeWPK/u7JoGhlDZnKBAfA==", + "license": "MIT" + }, + "node_modules/@types/yargs": { + "version": "17.0.34", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.34.tgz", + "integrity": "sha512-KExbHVa92aJpw9WDQvzBaGVE2/Pz+pLZQloT2hjL8IqsZnV62rlPOYvNnLmf/L2dyllfVUOVBj64M0z/46eR2A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/babel-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", + "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/transform": "^29.7.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.6.3", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", + "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.2.0.tgz", + "integrity": "sha512-E/VlAEzRrsLEb2+dv8yp3bo4scof3l9nR4lrld+Iy5NyVqgVYUJnDAmunkhPMisRI32Qc4iRiz425d8vM++2fg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5" + }, + "peerDependencies": { + "@babel/core": "^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/babel-preset-jest": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", + "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", + "dev": true, + "license": "MIT", + "dependencies": { + "babel-plugin-jest-hoist": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.8.25", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.8.25.tgz", + "integrity": "sha512-2NovHVesVF5TXefsGX1yzx1xgr7+m9JQenvz6FQY3qd+YXkKkYiv+vTCc7OriP9mcDZpTC5mAOYN4ocd29+erA==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/bowser": { + "version": "2.11.0", + "resolved": "https://registry.npmjs.org/bowser/-/bowser-2.11.0.tgz", + "integrity": "sha512-AlcaJBi/pqqJBIQ8U9Mcpc9i8Aqxn88Skv5d+xBX006BY5u8N3mGLHa5Lgppa7L/HfwgwLgZ6NYs+Ag6uUmJRA==", + "license": "MIT" + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.27.0", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.27.0.tgz", + "integrity": "sha512-AXVQwdhot1eqLihwasPElhX2tAZiBjWdJ9i/Zcj2S6QYIjkx62OKSfnobkriB81C3l4w0rVy3Nt4jaTBltYEpw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.8.19", + "caniuse-lite": "^1.0.30001751", + "electron-to-chromium": "^1.5.238", + "node-releases": "^2.0.26", + "update-browserslist-db": "^1.1.4" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bs-logger": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz", + "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-json-stable-stringify": "2.x" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001753", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001753.tgz", + "integrity": "sha512-Bj5H35MD/ebaOV4iDLqPEtiliTN29qkGtEHCwawWn4cYm+bPJM2NsaP30vtZcnERClMzp52J4+aw2UNbK4o+zw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", + "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/collect-v8-coverage": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.3.tgz", + "integrity": "sha512-1L5aqIkwPfiodaMgQunkF1zRhNqifHBmtbbbxcr6yVxxBnliw4TDOW6NxpO8DJLgJ16OT+Y4ztZqP6p/FtXnAw==", + "dev": true, + "license": "MIT" + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/create-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", + "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "prompts": "^2.0.1" + }, + "bin": { + "create-jest": "bin/create-jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/create-jest/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/dedent": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.7.0.tgz", + "integrity": "sha512-HGFtf8yhuhGhqO07SV79tRp+br4MnbdjeVxotpn1QBl30pcLLCQjX5b2295ll0fv8RKDKsmWYrl05usHM9CewQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/detect-newline": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.245", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.245.tgz", + "integrity": "sha512-rdmGfW47ZhL/oWEJAY4qxRtdly2B98ooTJ0pdEI4jhVLZ6tNf8fPtov2wS1IRKwFJT92le3x4Knxiwzl7cPPpQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/emittery": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", + "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/expect": { + "version": "30.2.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-30.2.0.tgz", + "integrity": "sha512-u/feCi0GPsI+988gU2FLcsHyAHTU0MX1Wg68NhAnN7z/+C5wqG+CY8J53N9ioe8RXgaoz0nBR/TYMf3AycUuPw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/expect-utils": "30.2.0", + "@jest/get-type": "30.1.0", + "jest-matcher-utils": "30.2.0", + "jest-message-util": "30.2.0", + "jest-mock": "30.2.0", + "jest-util": "30.2.0" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-xml-parser": { + "version": "4.5.3", + "resolved": "https://registry.npmjs.org/fast-xml-parser/-/fast-xml-parser-4.5.3.tgz", + "integrity": "sha512-RKihhV+SHsIUGXObeVy9AXiBbFwkVk7Syp8XgwN5U3JV416+Gwp/GO9i0JYKmikykgz/UHRrrV4ROuZEo/T0ig==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/NaturalIntelligence" + } + ], + "license": "MIT", + "dependencies": { + "strnum": "^1.1.1" + }, + "bin": { + "fxparser": "src/cli/cli.js" + } + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/handlebars": { + "version": "4.7.8", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz", + "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.5", + "neo-async": "^2.6.2", + "source-map": "^0.6.1", + "wordwrap": "^1.0.0" + }, + "bin": { + "handlebars": "bin/handlebars" + }, + "engines": { + "node": ">=0.4.7" + }, + "optionalDependencies": { + "uglify-js": "^3.1.4" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true, + "license": "MIT" + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/import-local": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.2.0.tgz", + "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz", + "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-instrument/node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", + "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", + "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/types": "^29.6.3", + "import-local": "^3.0.2", + "jest-cli": "^29.7.0" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-changed-files": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", + "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", + "dev": true, + "license": "MIT", + "dependencies": { + "execa": "^5.0.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-changed-files/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", + "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^1.0.0", + "is-generator-fn": "^2.0.0", + "jest-each": "^29.7.0", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0", + "pretty-format": "^29.7.0", + "pure-rand": "^6.0.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-circus/node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus/node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus/node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-cli": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", + "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "create-jest": "^29.7.0", + "exit": "^0.1.2", + "import-local": "^3.0.2", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "yargs": "^17.3.1" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-cli/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-config": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", + "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/test-sequencer": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-jest": "^29.7.0", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-circus": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@types/node": "*", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/jest-config/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-config/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-config/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-diff": { + "version": "30.2.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-30.2.0.tgz", + "integrity": "sha512-dQHFo3Pt4/NLlG5z4PxZ/3yZTZ1C7s9hveiOj+GCN+uT109NC2QgsoVZsVOAvbJ3RgKkvyLGXZV9+piDpWbm6A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/diff-sequences": "30.0.1", + "@jest/get-type": "30.1.0", + "chalk": "^4.1.2", + "pretty-format": "30.2.0" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-docblock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", + "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", + "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "jest-util": "^29.7.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-each/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", + "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node/node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", + "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", + "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-haste-map/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-leak-detector": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", + "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-leak-detector/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-leak-detector/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils": { + "version": "30.2.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-30.2.0.tgz", + "integrity": "sha512-dQ94Nq4dbzmUWkQ0ANAWS9tBRfqCrn0bV9AMYdOi/MHW726xn7eQmMeRTpX2ViC00bpNaWXq+7o4lIQ3AX13Hg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/get-type": "30.1.0", + "chalk": "^4.1.2", + "jest-diff": "30.2.0", + "pretty-format": "30.2.0" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-message-util": { + "version": "30.2.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-30.2.0.tgz", + "integrity": "sha512-y4DKFLZ2y6DxTWD4cDe07RglV88ZiNEdlRfGtqahfbIjfsw1nMCPx49Uev4IA/hWn3sDKyAnSPwoYSsAEdcimw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@jest/types": "30.2.0", + "@types/stack-utils": "^2.0.3", + "chalk": "^4.1.2", + "graceful-fs": "^4.2.11", + "micromatch": "^4.0.8", + "pretty-format": "30.2.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.6" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-message-util/node_modules/@jest/schemas": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.5.tgz", + "integrity": "sha512-DmdYgtezMkh3cpU8/1uyXakv3tJRcmcXxBOcO0tbaozPwpmh4YMsnWrQm9ZmZMfa5ocbxzbFk6O4bDPEc/iAnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.34.0" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-message-util/node_modules/@jest/types": { + "version": "30.2.0", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-30.2.0.tgz", + "integrity": "sha512-H9xg1/sfVvyfU7o3zMfBEjQ1gcsdeTMgqHoYdN79tuLqfTtuu7WckRA1R5whDwOzxaZAeMKTYWqP+WCAi0CHsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/pattern": "30.0.1", + "@jest/schemas": "30.0.5", + "@types/istanbul-lib-coverage": "^2.0.6", + "@types/istanbul-reports": "^3.0.4", + "@types/node": "*", + "@types/yargs": "^17.0.33", + "chalk": "^4.1.2" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-message-util/node_modules/@sinclair/typebox": { + "version": "0.34.41", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.34.41.tgz", + "integrity": "sha512-6gS8pZzSXdyRHTIqoqSVknxolr1kzfy4/CeDnrzsVz8TTIWUbOBr6gnzOmTYJ3eXQNh4IYHIGi5aIL7sOZ2G/g==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-mock": { + "version": "30.2.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-30.2.0.tgz", + "integrity": "sha512-JNNNl2rj4b5ICpmAcq+WbLH83XswjPbjH4T7yvGzfAGCPh1rw+xVNbtk+FnRslvt9lkCcdn9i1oAoKUuFsOxRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "30.2.0", + "@types/node": "*", + "jest-util": "30.2.0" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-mock/node_modules/@jest/schemas": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.5.tgz", + "integrity": "sha512-DmdYgtezMkh3cpU8/1uyXakv3tJRcmcXxBOcO0tbaozPwpmh4YMsnWrQm9ZmZMfa5ocbxzbFk6O4bDPEc/iAnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.34.0" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-mock/node_modules/@jest/types": { + "version": "30.2.0", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-30.2.0.tgz", + "integrity": "sha512-H9xg1/sfVvyfU7o3zMfBEjQ1gcsdeTMgqHoYdN79tuLqfTtuu7WckRA1R5whDwOzxaZAeMKTYWqP+WCAi0CHsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/pattern": "30.0.1", + "@jest/schemas": "30.0.5", + "@types/istanbul-lib-coverage": "^2.0.6", + "@types/istanbul-reports": "^3.0.4", + "@types/node": "*", + "@types/yargs": "^17.0.33", + "chalk": "^4.1.2" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-mock/node_modules/@sinclair/typebox": { + "version": "0.34.41", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.34.41.tgz", + "integrity": "sha512-6gS8pZzSXdyRHTIqoqSVknxolr1kzfy4/CeDnrzsVz8TTIWUbOBr6gnzOmTYJ3eXQNh4IYHIGi5aIL7sOZ2G/g==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-pnp-resolver": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "jest-resolve": "*" + }, + "peerDependenciesMeta": { + "jest-resolve": { + "optional": true + } + } + }, + "node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", + "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "resolve": "^1.20.0", + "resolve.exports": "^2.0.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve-dependencies": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", + "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-regex-util": "^29.6.3", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", + "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/environment": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-leak-detector": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-util": "^29.7.0", + "jest-watcher": "^29.7.0", + "jest-worker": "^29.7.0", + "p-limit": "^3.1.0", + "source-map-support": "0.5.13" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-runner/node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", + "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/globals": "^29.7.0", + "@jest/source-map": "^29.6.3", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-runtime/node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime/node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", + "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-jsx": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/types": "^7.3.3", + "@jest/expect-utils": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "natural-compare": "^1.4.0", + "pretty-format": "^29.7.0", + "semver": "^7.5.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/@jest/expect-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-get-type": "^29.6.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-snapshot/node_modules/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jest-util": { + "version": "30.2.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.2.0.tgz", + "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "30.2.0", + "@types/node": "*", + "chalk": "^4.1.2", + "ci-info": "^4.2.0", + "graceful-fs": "^4.2.11", + "picomatch": "^4.0.2" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-util/node_modules/@jest/schemas": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.5.tgz", + "integrity": "sha512-DmdYgtezMkh3cpU8/1uyXakv3tJRcmcXxBOcO0tbaozPwpmh4YMsnWrQm9ZmZMfa5ocbxzbFk6O4bDPEc/iAnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.34.0" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-util/node_modules/@jest/types": { + "version": "30.2.0", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-30.2.0.tgz", + "integrity": "sha512-H9xg1/sfVvyfU7o3zMfBEjQ1gcsdeTMgqHoYdN79tuLqfTtuu7WckRA1R5whDwOzxaZAeMKTYWqP+WCAi0CHsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/pattern": "30.0.1", + "@jest/schemas": "30.0.5", + "@types/istanbul-lib-coverage": "^2.0.6", + "@types/istanbul-reports": "^3.0.4", + "@types/node": "*", + "@types/yargs": "^17.0.33", + "chalk": "^4.1.2" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-util/node_modules/@sinclair/typebox": { + "version": "0.34.41", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.34.41.tgz", + "integrity": "sha512-6gS8pZzSXdyRHTIqoqSVknxolr1kzfy4/CeDnrzsVz8TTIWUbOBr6gnzOmTYJ3eXQNh4IYHIGi5aIL7sOZ2G/g==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-util/node_modules/ci-info": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.3.1.tgz", + "integrity": "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-util/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/jest-validate": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", + "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "leven": "^3.1.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-validate/node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-validate/node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-watcher": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", + "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "jest-util": "^29.7.0", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-watcher/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, "license": "MIT" }, - "node_modules/@types/uuid": { - "version": "9.0.8", - "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-9.0.8.tgz", - "integrity": "sha512-jg+97EGIcY9AGHJJRaaPVgetKDsrTgbRjQ5Msgjh/DQKEFl0DtyRr/VCOyD1T2R1MNeWPK/u7JoGhlDZnKBAfA==", + "node_modules/js-yaml": { + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true, "license": "MIT" }, - "node_modules/bowser": { - "version": "2.11.0", - "resolved": "https://registry.npmjs.org/bowser/-/bowser-2.11.0.tgz", - "integrity": "sha512-AlcaJBi/pqqJBIQ8U9Mcpc9i8Aqxn88Skv5d+xBX006BY5u8N3mGLHa5Lgppa7L/HfwgwLgZ6NYs+Ag6uUmJRA==", + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true, + "license": "ISC" + }, + "node_modules/makeerror": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mnemonist": { + "version": "0.38.3", + "resolved": "https://registry.npmjs.org/mnemonist/-/mnemonist-0.38.3.tgz", + "integrity": "sha512-2K9QYubXx/NAjv4VLq1d1Ly8pWNC5L3BrixtdkyTegXWJIqY+zLNDhhX/A+ZwWt70tB1S8H4BE8FLYEFyNoOBw==", + "dependencies": { + "obliterator": "^1.6.1" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/obliterator": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/obliterator/-/obliterator-1.6.1.tgz", + "integrity": "sha512-9WXswnqINnnhOG/5SLimUlzuU1hFJUc8zkwyD59Sd+dPOMf05PmnYG/d6Q7HZ+KmgkZJa1PxRso6QdM3sTNHig==" + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-locate/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pretty-format": { + "version": "30.2.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.2.0.tgz", + "integrity": "sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "30.0.5", + "ansi-styles": "^5.2.0", + "react-is": "^18.3.1" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/pretty-format/node_modules/@jest/schemas": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.5.tgz", + "integrity": "sha512-DmdYgtezMkh3cpU8/1uyXakv3tJRcmcXxBOcO0tbaozPwpmh4YMsnWrQm9ZmZMfa5ocbxzbFk6O4bDPEc/iAnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.34.0" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/pretty-format/node_modules/@sinclair/typebox": { + "version": "0.34.41", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.34.41.tgz", + "integrity": "sha512-6gS8pZzSXdyRHTIqoqSVknxolr1kzfy4/CeDnrzsVz8TTIWUbOBr6gnzOmTYJ3eXQNh4IYHIGi5aIL7sOZ2G/g==", + "dev": true, "license": "MIT" }, - "node_modules/fast-xml-parser": { - "version": "4.5.3", - "resolved": "https://registry.npmjs.org/fast-xml-parser/-/fast-xml-parser-4.5.3.tgz", - "integrity": "sha512-RKihhV+SHsIUGXObeVy9AXiBbFwkVk7Syp8XgwN5U3JV416+Gwp/GO9i0JYKmikykgz/UHRrrV4ROuZEo/T0ig==", + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/pure-rand": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz", + "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==", + "dev": true, "funding": [ { - "type": "github", - "url": "https://github.com/sponsors/NaturalIntelligence" + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" } ], + "license": "MIT" + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dev": true, "license": "MIT", "dependencies": { - "strnum": "^1.1.1" + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" }, "bin": { - "fxparser": "src/cli/cli.js" + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/mnemonist": { - "version": "0.38.3", - "resolved": "https://registry.npmjs.org/mnemonist/-/mnemonist-0.38.3.tgz", - "integrity": "sha512-2K9QYubXx/NAjv4VLq1d1Ly8pWNC5L3BrixtdkyTegXWJIqY+zLNDhhX/A+ZwWt70tB1S8H4BE8FLYEFyNoOBw==", + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, "license": "MIT", "dependencies": { - "obliterator": "^1.6.1" + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" } }, - "node_modules/obliterator": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/obliterator/-/obliterator-1.6.1.tgz", - "integrity": "sha512-9WXswnqINnnhOG/5SLimUlzuU1hFJUc8zkwyD59Sd+dPOMf05PmnYG/d6Q7HZ+KmgkZJa1PxRso6QdM3sTNHig==", + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve.exports": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz", + "integrity": "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true, "license": "MIT" }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.13", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", + "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/strnum": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/strnum/-/strnum-1.1.2.tgz", @@ -3025,12 +7538,242 @@ ], "license": "MIT" }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "license": "ISC", + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tmpl": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-jest": { + "version": "29.4.5", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.4.5.tgz", + "integrity": "sha512-HO3GyiWn2qvTQA4kTgjDcXiMwYQt68a1Y8+JuLRVpdIzm+UOLSHgl/XqR4c6nzJkq5rOkjc02O2I7P7l/Yof0Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "bs-logger": "^0.2.6", + "fast-json-stable-stringify": "^2.1.0", + "handlebars": "^4.7.8", + "json5": "^2.2.3", + "lodash.memoize": "^4.1.2", + "make-error": "^1.3.6", + "semver": "^7.7.3", + "type-fest": "^4.41.0", + "yargs-parser": "^21.1.1" + }, + "bin": { + "ts-jest": "cli.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0" + }, + "peerDependencies": { + "@babel/core": ">=7.0.0-beta.0 <8", + "@jest/transform": "^29.0.0 || ^30.0.0", + "@jest/types": "^29.0.0 || ^30.0.0", + "babel-jest": "^29.0.0 || ^30.0.0", + "jest": "^29.0.0 || ^30.0.0", + "jest-util": "^29.0.0 || ^30.0.0", + "typescript": ">=4.3 <6" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "@jest/transform": { + "optional": true + }, + "@jest/types": { + "optional": true + }, + "babel-jest": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "jest-util": { + "optional": true + } + } + }, + "node_modules/ts-jest/node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/ts-jest/node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/tslib": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", "license": "0BSD" }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/uglify-js": { + "version": "3.19.3", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz", + "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==", + "dev": true, + "license": "BSD-2-Clause", + "optional": true, + "bin": { + "uglifyjs": "bin/uglifyjs" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/undici-types": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz", + "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", + "dev": true, + "license": "MIT" + }, + "node_modules/update-browserslist-db": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.4.tgz", + "integrity": "sha512-q0SPT4xyU84saUX+tomz1WLkxUbuaJnR1xWt17M7fJtEJigJeWUNGUqrauFXsHnqev9y9JTRGwk13tFBuKby4A==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, "node_modules/uuid": { "version": "9.0.1", "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", @@ -3043,6 +7786,152 @@ "bin": { "uuid": "dist/bin/uuid" } + }, + "node_modules/v8-to-istanbul": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", + "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", + "dev": true, + "license": "ISC", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^2.0.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/walker": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "makeerror": "1.0.12" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } } } } diff --git a/source/lambda/layers/aws-sdk-lib/package.json b/source/lambda/layers/aws-sdk-lib/package.json index 4e29ec7e..86abc632 100644 --- a/source/lambda/layers/aws-sdk-lib/package.json +++ b/source/lambda/layers/aws-sdk-lib/package.json @@ -1,21 +1,30 @@ { "name": "@amzn/aws-sdk-layer", - "version": "3.0.7", + "version": "4.0.0", "description": "AWS Javascript SDK v3 layer", + "main": "index.js", + "scripts": { + "build": "npx tsc", + "test": "jest --coverage", + "code-formatter": "prettier --config ../../../../.prettierrc.yml --ignore-path ../../../../.prettierignore --write '**/*.{js,ts,json,css,md}'" + }, "dependencies": { - "@aws-sdk/client-api-gateway": "^3.775.0", - "@aws-sdk/client-cloudformation": "^3.699.0", - "@aws-sdk/client-cognito-identity-provider": "^3.708.0", - "@aws-sdk/client-dynamodb": "^3.705.0", - "@aws-sdk/client-kendra": "^3.706.0", - "@aws-sdk/client-s3": "^3.705.0", - "@aws-sdk/client-secrets-manager": "^3.699.0", - "@aws-sdk/client-ssm": "^3.699.0", - "@aws-sdk/lib-dynamodb": "^3.705.0", - "@aws-sdk/types": "^3.696.0", - "@aws-sdk/util-arn-parser": "^3.693.0", - "@aws-sdk/util-dynamodb": "^3.705.0", - "@smithy/types": "^3.7.2", + "@aws-sdk/client-api-gateway": "^3.875.0", + "@aws-sdk/client-bedrock-agentcore-control": "^3.875.0", + "@aws-sdk/client-cloudformation": "^3.875.0", + "@aws-sdk/client-cognito-identity-provider": "^3.875.0", + "@aws-sdk/client-dynamodb": "^3.875.0", + "@aws-sdk/client-kendra": "^3.875.0", + "@aws-sdk/client-s3": "^3.875.0", + "@aws-sdk/client-secrets-manager": "^3.875.0", + "@aws-sdk/client-ssm": "^3.875.0", + "@aws-sdk/lib-dynamodb": "^3.875.0", + "@aws-sdk/s3-presigned-post": "^3.875.0", + "@aws-sdk/s3-request-presigner": "^3.875.0", + "@aws-sdk/types": "^3.875.0", + "@aws-sdk/util-arn-parser": "^3.873.0", + "@aws-sdk/util-dynamodb": "^3.875.0", + "@smithy/types": "^4.3.2", "@types/aws-lambda": "^8.10.146" }, "author": { @@ -25,5 +34,10 @@ "license": "Apache-2.0", "overrides": { "fast-xml-parser": "^4.4.1" + }, + "devDependencies": { + "@types/jest": "^30.0.0", + "jest": "^29.7.0", + "ts-jest": "^29.4.5" } } diff --git a/source/lambda/layers/aws-sdk-lib/tsconfig.json b/source/lambda/layers/aws-sdk-lib/tsconfig.json new file mode 100644 index 00000000..1d456dbc --- /dev/null +++ b/source/lambda/layers/aws-sdk-lib/tsconfig.json @@ -0,0 +1,22 @@ +{ + "compilerOptions": { + "target": "ES2018", + "module": "commonjs", + "lib": ["es2018"], + "declaration": true, + "outDir": "./dist", + "rootDir": ".", + "strict": true, + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "moduleResolution": "node", + "resolveJsonModule": true, + "paths": { + "aws-node-user-agent-config": [ + "../aws-node-user-agent-config/dist" + ] + } + }, + "include": ["*.ts"], + "exclude": ["node_modules", "dist"] +} diff --git a/source/lambda/layers/aws_boto3/poetry.lock b/source/lambda/layers/aws_boto3/poetry.lock index 8a69047b..6b1202a6 100644 --- a/source/lambda/layers/aws_boto3/poetry.lock +++ b/source/lambda/layers/aws_boto3/poetry.lock @@ -1,35 +1,35 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "boto3" -version = "1.40.15" +version = "1.40.53" description = "The AWS SDK for Python" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "boto3-1.40.15-py3-none-any.whl", hash = "sha256:52b8aa78c9906c4e49dcec6817c041df33c9825073bf66e7df8fc00afbe47b4b"}, - {file = "boto3-1.40.15.tar.gz", hash = "sha256:271b379ce5ad35ca82f1009e917528a182eed0e2de197ccffb0c51acadec5c79"}, + {file = "boto3-1.40.53-py3-none-any.whl", hash = "sha256:65ded2738de259bd9030feb4772ec7b53d5b661befa88ce836117c3df8265309"}, + {file = "boto3-1.40.53.tar.gz", hash = "sha256:3f8cf56034cfde20dd0abca01349f64ab65734d90c3fbf7357e8a84cb64a62ee"}, ] [package.dependencies] -botocore = ">=1.40.15,<1.41.0" +botocore = ">=1.40.53,<1.41.0" jmespath = ">=0.7.1,<2.0.0" -s3transfer = ">=0.13.0,<0.14.0" +s3transfer = ">=0.14.0,<0.15.0" [package.extras] crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.40.15" +version = "1.40.53" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "botocore-1.40.15-py3-none-any.whl", hash = "sha256:b364e039d2b67e509cfb089cb39b295251e48a60cc68fd591defbe10b44d83f9"}, - {file = "botocore-1.40.15.tar.gz", hash = "sha256:4960800e4c5a7b43db22550979c22f5a324cbaf75ef494bbb2cf400ef1e6aca7"}, + {file = "botocore-1.40.53-py3-none-any.whl", hash = "sha256:840322b0af4be7a6e2effddb4eb388053c25af0618f627f37d8b03cc1edbc928"}, + {file = "botocore-1.40.53.tar.gz", hash = "sha256:4ebb9e6648c4896d3f0cdda5ff30b5de9a83aeb591be89a16f98cc5ee3cd371c"}, ] [package.dependencies] @@ -69,14 +69,14 @@ six = ">=1.5" [[package]] name = "s3transfer" -version = "0.13.1" +version = "0.14.0" description = "An Amazon S3 Transfer Manager" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "s3transfer-0.13.1-py3-none-any.whl", hash = "sha256:a981aa7429be23fe6dfc13e80e4020057cbab622b08c0315288758d67cabc724"}, - {file = "s3transfer-0.13.1.tar.gz", hash = "sha256:c3fdba22ba1bd367922f27ec8032d6a1cf5f10c934fb5d68cf60fd5a23d936cf"}, + {file = "s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456"}, + {file = "s3transfer-0.14.0.tar.gz", hash = "sha256:eff12264e7c8b4985074ccce27a3b38a485bb7f7422cc8046fee9be4983e4125"}, ] [package.dependencies] @@ -118,4 +118,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.1" python-versions = "^3.13" -content-hash = "f9848fde3d456411b1c4a72c41a379e70a611a66986434852567593d363ac687" +content-hash = "abb9f065d9e4514351c52473d2d3b5f3c208a8fca64e6295c5256867542e1793" diff --git a/source/lambda/layers/aws_boto3/pyproject.toml b/source/lambda/layers/aws_boto3/pyproject.toml index d2347dd0..6e002f4a 100644 --- a/source/lambda/layers/aws_boto3/pyproject.toml +++ b/source/lambda/layers/aws_boto3/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "boto3-layer" -version = "3.0.7" +version = "4.0.0" authors = [ "Amazon Web Services" ] description = "Layer for AWS Boto3 python SDK" packages = [ @@ -14,8 +14,8 @@ classifiers = [ license = "Apache-2.0" [tool.poetry.dependencies] -botocore = "1.40.15" -boto3 = "1.40.15" +botocore = "1.40.53" +boto3 = "1.40.53" python = "^3.13" urllib3="2.5.0" diff --git a/source/lambda/layers/custom_boto3_init/poetry.lock b/source/lambda/layers/custom_boto3_init/poetry.lock index cadc1618..754c891a 100644 --- a/source/lambda/layers/custom_boto3_init/poetry.lock +++ b/source/lambda/layers/custom_boto3_init/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "aws-lambda-powertools" @@ -47,27 +47,27 @@ wrapt = "*" [[package]] name = "boto3" -version = "1.40.15" +version = "1.40.53" description = "The AWS SDK for Python" optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "boto3-1.40.15-py3-none-any.whl", hash = "sha256:52b8aa78c9906c4e49dcec6817c041df33c9825073bf66e7df8fc00afbe47b4b"}, - {file = "boto3-1.40.15.tar.gz", hash = "sha256:271b379ce5ad35ca82f1009e917528a182eed0e2de197ccffb0c51acadec5c79"}, + {file = "boto3-1.40.53-py3-none-any.whl", hash = "sha256:65ded2738de259bd9030feb4772ec7b53d5b661befa88ce836117c3df8265309"}, + {file = "boto3-1.40.53.tar.gz", hash = "sha256:3f8cf56034cfde20dd0abca01349f64ab65734d90c3fbf7357e8a84cb64a62ee"}, ] [package.dependencies] -botocore = ">=1.40.15,<1.41.0" +botocore = ">=1.40.53,<1.41.0" jmespath = ">=0.7.1,<2.0.0" -s3transfer = ">=0.13.0,<0.14.0" +s3transfer = ">=0.14.0,<0.15.0" [package.extras] crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-layer" -version = "3.0.7" +version = "4.0.0" description = "Layer for AWS Boto3 python SDK" optional = false python-versions = "^3.13" @@ -76,8 +76,8 @@ files = [] develop = true [package.dependencies] -boto3 = "1.40.15" -botocore = "1.40.15" +boto3 = "1.40.53" +botocore = "1.40.53" urllib3 = "2.5.0" [package.source] @@ -86,14 +86,14 @@ url = "../aws_boto3" [[package]] name = "botocore" -version = "1.40.15" +version = "1.40.53" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.9" groups = ["main", "test"] files = [ - {file = "botocore-1.40.15-py3-none-any.whl", hash = "sha256:b364e039d2b67e509cfb089cb39b295251e48a60cc68fd591defbe10b44d83f9"}, - {file = "botocore-1.40.15.tar.gz", hash = "sha256:4960800e4c5a7b43db22550979c22f5a324cbaf75ef494bbb2cf400ef1e6aca7"}, + {file = "botocore-1.40.53-py3-none-any.whl", hash = "sha256:840322b0af4be7a6e2effddb4eb388053c25af0618f627f37d8b03cc1edbc928"}, + {file = "botocore-1.40.53.tar.gz", hash = "sha256:4ebb9e6648c4896d3f0cdda5ff30b5de9a83aeb591be89a16f98cc5ee3cd371c"}, ] [package.dependencies] @@ -491,14 +491,14 @@ six = ">=1.5" [[package]] name = "s3transfer" -version = "0.13.1" +version = "0.14.0" description = "An Amazon S3 Transfer Manager" optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "s3transfer-0.13.1-py3-none-any.whl", hash = "sha256:a981aa7429be23fe6dfc13e80e4020057cbab622b08c0315288758d67cabc724"}, - {file = "s3transfer-0.13.1.tar.gz", hash = "sha256:c3fdba22ba1bd367922f27ec8032d6a1cf5f10c934fb5d68cf60fd5a23d936cf"}, + {file = "s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456"}, + {file = "s3transfer-0.14.0.tar.gz", hash = "sha256:eff12264e7c8b4985074ccce27a3b38a485bb7f7422cc8046fee9be4983e4125"}, ] [package.dependencies] diff --git a/source/lambda/layers/custom_boto3_init/pyproject.toml b/source/lambda/layers/custom_boto3_init/pyproject.toml index 953420d2..ef5c6c76 100644 --- a/source/lambda/layers/custom_boto3_init/pyproject.toml +++ b/source/lambda/layers/custom_boto3_init/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "custom_boto3_init" -version = "3.0.7" +version = "4.0.0" authors = [ "Amazon Web Services" ] description = "Initialize boto config for AWS Python SDK with custom configuration" packages = [ diff --git a/source/lambda/layers/langchain/poetry.lock b/source/lambda/layers/langchain/poetry.lock index 788b4e55..170b07a9 100644 --- a/source/lambda/layers/langchain/poetry.lock +++ b/source/lambda/layers/langchain/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -417,24 +417,24 @@ tools = ["beautifulsoup4 (>=4.13.4)", "bedrock-agentcore (>=0.1.0) ; python_vers [[package]] name = "langchain-core" -version = "0.3.74" +version = "0.3.80" description = "Building applications with LLMs through composability" optional = false -python-versions = ">=3.9" +python-versions = "<4.0.0,>=3.9.0" groups = ["main"] files = [ - {file = "langchain_core-0.3.74-py3-none-any.whl", hash = "sha256:088338b5bc2f6a66892f9afc777992c24ee3188f41cbc603d09181e34a228ce7"}, - {file = "langchain_core-0.3.74.tar.gz", hash = "sha256:ff604441aeade942fbcc0a3860a592daba7671345230c2078ba2eb5f82b6ba76"}, + {file = "langchain_core-0.3.80-py3-none-any.whl", hash = "sha256:2141e3838d100d17dce2359f561ec0df52c526bae0de6d4f469f8026c5747456"}, + {file = "langchain_core-0.3.80.tar.gz", hash = "sha256:29636b82513ab49e834764d023c4d18554d3d719a185d37b019d0a8ae948c6bb"}, ] [package.dependencies] -jsonpatch = ">=1.33,<2.0" -langsmith = ">=0.3.45" -packaging = ">=23.2" -pydantic = ">=2.7.4" -PyYAML = ">=5.3" +jsonpatch = ">=1.33.0,<2.0.0" +langsmith = ">=0.3.45,<1.0.0" +packaging = ">=23.2.0,<26.0.0" +pydantic = ">=2.7.4,<3.0.0" +PyYAML = ">=5.3.0,<7.0.0" tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<10.0.0" -typing-extensions = ">=4.7" +typing-extensions = ">=4.7.0,<5.0.0" [[package]] name = "langchain-text-splitters" @@ -1213,4 +1213,4 @@ cffi = ["cffi (>=1.17) ; python_version >= \"3.13\" and platform_python_implemen [metadata] lock-version = "2.1" python-versions = "^3.13" -content-hash = "3834fdd67bf5499361f6e2f009860080ed37fd365bd0317eafd76750cf63b639" +content-hash = "7c79ceff3cab77964482c91f3c554b09f03c0e5b49b4b1f48f942bd2845bca4c" diff --git a/source/lambda/layers/langchain/pyproject.toml b/source/lambda/layers/langchain/pyproject.toml index 8cc1f054..95fb2001 100644 --- a/source/lambda/layers/langchain/pyproject.toml +++ b/source/lambda/layers/langchain/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-layer" -version = "3.0.7" +version = "4.0.0" authors = [ "Amazon Web Services" ] description = "Layer for LangChain libraries" packages = [ @@ -17,7 +17,7 @@ license = "Apache-2.0" python = "^3.13" numpy = "2.2.2" langchain = "0.3.27" -langchain-core = "0.3.74" +langchain-core = "0.3.80" langchain-aws = "0.2.31" pydantic = "2.11.0" requests="2.32.4" diff --git a/source/lambda/model-info/index.ts b/source/lambda/model-info/index.ts index 0d4a9dbc..28f8b9f0 100644 --- a/source/lambda/model-info/index.ts +++ b/source/lambda/model-info/index.ts @@ -4,15 +4,15 @@ import { injectLambdaContext } from '@aws-lambda-powertools/logger/middleware'; import { captureLambdaHandler } from '@aws-lambda-powertools/tracer/middleware'; import { DynamoDBClient } from '@aws-sdk/client-dynamodb'; +import { AWSClientManager } from 'aws-sdk-lib'; import middy from '@middy/core'; import { APIGatewayEvent } from 'aws-lambda'; -import { customAwsConfig } from 'aws-node-user-agent-config'; import { logger, tracer } from './power-tools-init'; import { checkEnv } from './utils/check-env'; import { formatError, formatResponse } from './utils/http-response-formatters'; import { ModelInfoRetriever } from './utils/model-info-retriever'; -const ddbClient = new DynamoDBClient(customAwsConfig()); +const ddbClient = AWSClientManager.getServiceClient('dynamodb', tracer); export const lambdaHandler = async (event: APIGatewayEvent) => { checkEnv(); diff --git a/source/lambda/model-info/models/types.ts b/source/lambda/model-info/models/types.ts new file mode 100644 index 00000000..f737b816 --- /dev/null +++ b/source/lambda/model-info/models/types.ts @@ -0,0 +1,11 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +/** + * Interface for retry settings + */ +export interface RetrySettings { + maxRetries: number; + backOffRate: number; + initialDelayMs: number; +} \ No newline at end of file diff --git a/source/lambda/model-info/package-lock.json b/source/lambda/model-info/package-lock.json index e0db6c6d..031eb55f 100644 --- a/source/lambda/model-info/package-lock.json +++ b/source/lambda/model-info/package-lock.json @@ -1,12 +1,12 @@ { "name": "@amzn/model-info", - "version": "3.0.7", + "version": "4.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@amzn/model-info", - "version": "3.0.7", + "version": "4.0.0", "license": "Apache-2.0", "devDependencies": { "@types/jest": "^29.5.12", @@ -19,7 +19,7 @@ "eslint": "^9.9.0", "jest": "^29.7.0", "lodash": "^4.17.21", - "prettier": "^3.3.3", + "prettier": "^3.6.2", "ts-jest": "^29.2.4", "ts-node": "^10.9.2", "typescript": "^5.5.4" @@ -813,10 +813,11 @@ } }, "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -3846,9 +3847,9 @@ "dev": true }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -4426,9 +4427,9 @@ } }, "node_modules/prettier": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.3.tgz", - "integrity": "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==", + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", "dev": true, "bin": { "prettier": "bin/prettier.cjs" diff --git a/source/lambda/model-info/package.json b/source/lambda/model-info/package.json index a43145fc..c3d42f69 100644 --- a/source/lambda/model-info/package.json +++ b/source/lambda/model-info/package.json @@ -1,6 +1,6 @@ { "name": "@amzn/model-info", - "version": "3.0.7", + "version": "4.0.0", "description": "This lambda supports APIs that provide the capability to deploy GenAI use cases", "main": "index.ts", "scripts": { @@ -12,7 +12,7 @@ "code-linter-js": "./node_modules/eslint/bin/eslint.js lambda --ext .js", "code-linter-ts": "./node_modules/eslint/bin/eslint.js bin lib --ext .ts", "code-linter": "npm run code-linter-ts && npm run code-linter-js", - "code-formatter": "./node_modules/prettier/bin-prettier.js --config .prettierrc.yml '**/*.ts' '**/*.js' --write" + "code-formatter": "prettier --config ../../../.prettierrc.yml --ignore-path ../../../.prettierignore --write '**/*.{js,ts,json,css,md}'" }, "author": { "name": "Amazon Web Services", @@ -30,7 +30,7 @@ "eslint": "^9.9.0", "jest": "^29.7.0", "lodash": "^4.17.21", - "prettier": "^3.3.3", + "prettier": "^3.6.2", "ts-jest": "^29.2.4", "ts-node": "^10.9.2", "typescript": "^5.5.4" diff --git a/source/lambda/model-info/tsconfig.json b/source/lambda/model-info/tsconfig.json index cdddd0f6..79b8ff9a 100644 --- a/source/lambda/model-info/tsconfig.json +++ b/source/lambda/model-info/tsconfig.json @@ -29,6 +29,9 @@ "moduleResolution": "Node", "rootDir": ".", "paths": { + "aws-sdk-lib": [ + "../layers/aws-sdk-lib/dist" + ], "aws-node-user-agent-config": [ "../layers/aws-node-user-agent-config/dist" ], diff --git a/source/lambda/model-info/utils/model-info-retriever.ts b/source/lambda/model-info/utils/model-info-retriever.ts index 18791360..d84f9790 100644 --- a/source/lambda/model-info/utils/model-info-retriever.ts +++ b/source/lambda/model-info/utils/model-info-retriever.ts @@ -11,7 +11,6 @@ import { ScanCommandInput } from '@aws-sdk/client-dynamodb'; import { unmarshall } from '@aws-sdk/util-dynamodb'; -import { customAwsConfig } from 'aws-node-user-agent-config'; import { logger, tracer } from '../power-tools-init'; import { MODEL_INFO_TABLE_NAME_ENV_VAR, ModelInfoTableKeys } from './constants'; @@ -22,7 +21,7 @@ export class ModelInfoRetriever { private client: DynamoDBClient; private tableName: string; - constructor(client: DynamoDBClient = new DynamoDBClient(customAwsConfig())) { + constructor(client: DynamoDBClient) { this.client = client; this.tableName = process.env[MODEL_INFO_TABLE_NAME_ENV_VAR]!; } diff --git a/source/lambda/use-case-details/index.ts b/source/lambda/use-case-details/index.ts index 5ceedd32..1b11a0fe 100644 --- a/source/lambda/use-case-details/index.ts +++ b/source/lambda/use-case-details/index.ts @@ -3,7 +3,8 @@ import { APIGatewayProxyEvent, APIGatewayProxyResult } from 'aws-lambda'; import { DynamoDBClient, GetItemCommand } from '@aws-sdk/client-dynamodb'; -import { validateAndParseRequest, getRetrySettings, castToResponse } from './utils/utils'; +import { AWSClientManager } from 'aws-sdk-lib'; +import { validateAndParseRequest, castToResponse } from './utils/utils'; import { formatError, formatResponse } from './utils/http-response-formatters'; import { logger, tracer, metrics } from './power-tools-init'; import { MetricUnit } from '@aws-lambda-powertools/metrics'; @@ -14,10 +15,7 @@ import { unmarshall } from '@aws-sdk/util-dynamodb'; import middy from '@middy/core'; // Initialize DynamoDB client with retry settings -const dynamoDB = new DynamoDBClient({ - maxAttempts: getRetrySettings().maxRetries, - retryMode: 'standard' -}); +const dynamoDB = AWSClientManager.getServiceClient('dynamodb', tracer); export const lambdaHandler = async (event: APIGatewayProxyEvent): Promise => { try { diff --git a/source/lambda/use-case-details/package-lock.json b/source/lambda/use-case-details/package-lock.json index 13ab280f..97c495e2 100644 --- a/source/lambda/use-case-details/package-lock.json +++ b/source/lambda/use-case-details/package-lock.json @@ -1,12 +1,12 @@ { "name": "@amzn/use-case-details", - "version": "3.0.7", + "version": "4.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@amzn/use-case-details", - "version": "3.0.7", + "version": "4.0.0", "license": "Apache-2.0", "devDependencies": { "@types/jest": "^29.5.14", @@ -19,7 +19,7 @@ "eslint": "^9.16.0", "jest": "^29.7.0", "lodash": "^4.17.21", - "prettier": "^3.4.2", + "prettier": "^3.6.2", "ts-jest": "^29.2.5", "ts-node": "^10.9.2", "typescript": "^5.7.2" @@ -813,10 +813,11 @@ } }, "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -3846,9 +3847,9 @@ "dev": true }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -4426,9 +4427,9 @@ } }, "node_modules/prettier": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.3.tgz", - "integrity": "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==", + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", "dev": true, "bin": { "prettier": "bin/prettier.cjs" diff --git a/source/lambda/use-case-details/package.json b/source/lambda/use-case-details/package.json index 751249a1..18659061 100644 --- a/source/lambda/use-case-details/package.json +++ b/source/lambda/use-case-details/package.json @@ -1,6 +1,6 @@ { "name": "@amzn/use-case-details", - "version": "3.0.7", + "version": "4.0.0", "description": "This lambda supports APIs that provide details about a deployed use case", "main": "index.ts", "scripts": { @@ -14,7 +14,7 @@ "code-linter-js": "./node_modules/eslint/bin/eslint.js lambda --ext .js", "code-linter-ts": "./node_modules/eslint/bin/eslint.js bin lib --ext .ts", "code-linter": "npm run code-linter-ts && npm run code-linter-js", - "code-formatter": "./node_modules/prettier/bin-prettier.js --config .prettierrc.yml '**/*.ts' '**/*.js' --write" + "code-formatter": "prettier --config ../../../.prettierrc.yml --ignore-path ../../../.prettierignore --write '**/*.{js,ts,json,css,md}'" }, "author": { "name": "Amazon Web Services", @@ -32,7 +32,7 @@ "eslint": "^9.16.0", "jest": "^29.7.0", "lodash": "^4.17.21", - "prettier": "^3.4.2", + "prettier": "^3.6.2", "ts-jest": "^29.2.5", "ts-node": "^10.9.2", "typescript": "^5.7.2" diff --git a/source/lambda/use-case-details/test/index.test.ts b/source/lambda/use-case-details/test/index.test.ts index dff0d820..85459a75 100644 --- a/source/lambda/use-case-details/test/index.test.ts +++ b/source/lambda/use-case-details/test/index.test.ts @@ -1,17 +1,27 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 + +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn(() => ({ region: 'us-east-1' })) +})); + import { DynamoDBClient, GetItemCommand } from '@aws-sdk/client-dynamodb'; import { mockClient } from 'aws-sdk-client-mock'; import { APIGatewayProxyEvent } from 'aws-lambda'; import { lambdaHandler } from '..'; -import { unmarshall } from '@aws-sdk/util-dynamodb'; +import { unmarshall, marshall } from '@aws-sdk/util-dynamodb'; import { castToResponse } from '../utils/utils'; -// Mock the DynamoDB client const ddbMock = mockClient(DynamoDBClient); const errorText = 'Internal Error - Please contact support and quote the following trace id:'; + +const createMockDynamoItem = (key: string, config: any) => { + return marshall({ + key, + config + }); +}; describe('Lambda Handler', () => { - // Reset mocks before each test beforeEach(() => { ddbMock.reset(); process.env.LLM_CONFIG_TABLE = 'test-use-cases-table'; @@ -19,109 +29,49 @@ describe('Lambda Handler', () => { afterEach(() => { ddbMock.reset(); - // Clean up environment variables delete process.env.LLM_CONFIG_TABLE; }); it('should successfully retrieve configuration from DynamoDB', async () => { - const mockItem = { - key: { S: 'test-config' }, - config: { - M: { - AuthenticationParams: { - M: { - AuthenticationProvider: { - S: 'Cognito' - }, - CognitoParams: { - M: { - ExistingUserPoolId: { - S: 'not-real' - } - } - } - } - }, - ConversationMemoryParams: { - M: { - AiPrefix: { - S: 'AI' - }, - ChatHistoryLength: { - N: '20' - }, - ConversationMemoryType: { - S: 'DynamoDB' - }, - HumanPrefix: { - S: 'Human' - } - } - }, - IsInternalUser: { - S: 'true' - }, - KnowledgeBaseParams: { - M: {} - }, - LlmParams: { - M: { - BedrockLlmParams: { - M: { - ModelId: { - S: 'fake-model' - } - } - }, - ModelParams: { - M: {} - }, - ModelProvider: { - S: 'Bedrock' - }, - PromptParams: { - M: { - MaxInputTextLength: { - N: '7500' - }, - MaxPromptTemplateLength: { - N: '7500' - }, - PromptTemplate: { - S: '{history}\n\n{input}' - }, - RephraseQuestion: { - BOOL: true - }, - UserPromptEditingEnabled: { - BOOL: true - } - } - }, - RAGEnabled: { - BOOL: false - }, - Streaming: { - BOOL: false - }, - Temperature: { - N: '0.9' - }, - Verbose: { - BOOL: false - } - } - }, - UseCaseName: { - S: 'test2' - }, - UseCaseType: { - S: 'Text' - } + const mockConfig = { + AuthenticationParams: { + AuthenticationProvider: 'Cognito', + CognitoParams: { + ExistingUserPoolId: 'not-real' } - } + }, + ConversationMemoryParams: { + AiPrefix: 'AI', + ChatHistoryLength: 20, + ConversationMemoryType: 'DynamoDB', + HumanPrefix: 'Human' + }, + IsInternalUser: 'true', + KnowledgeBaseParams: {}, + LlmParams: { + BedrockLlmParams: { + ModelId: 'fake-model' + }, + ModelParams: {}, + ModelProvider: 'Bedrock', + PromptParams: { + MaxInputTextLength: 7500, + MaxPromptTemplateLength: 7500, + PromptTemplate: '{history}\n\n{input}', + RephraseQuestion: true, + UserPromptEditingEnabled: true + }, + RAGEnabled: false, + Streaming: false, + Temperature: 0.9, + Verbose: false + }, + UseCaseName: 'test2', + UseCaseType: 'Text' }; + const mockItem = createMockDynamoItem('test-config', mockConfig); + ddbMock.on(GetItemCommand).resolves({ Item: mockItem }); @@ -209,4 +159,181 @@ describe('Lambda Handler', () => { expect(response.statusCode).toBe(500); expect(response.body).toContain(errorText); }); + + it('should successfully retrieve configuration with MultimodalParams enabled', async () => { + const mockConfig = { + LlmParams: { + ModelProvider: 'Bedrock', + MultimodalParams: { + MultimodalEnabled: true + }, + RAGEnabled: false + }, + UseCaseName: 'Multimodal Test Case', + UseCaseType: 'Multimodal', + FeedbackParams: { + FeedbackEnabled: true + } + }; + + const mockItem = createMockDynamoItem('test-multimodal-config', mockConfig); + + ddbMock.on(GetItemCommand).resolves({ + Item: mockItem + }); + + const event = { + pathParameters: { + useCaseConfigKey: 'test-multimodal-config' + } + } as unknown as APIGatewayProxyEvent; + + const response = await lambdaHandler(event); + const unmarshalledItem = unmarshall(mockItem); + const expectedResponse = castToResponse(unmarshalledItem.config); + + expect(response.statusCode).toBe(200); + expect(response.body).toEqual(JSON.stringify(expectedResponse)); + + const responseBody = JSON.parse(response.body); + expect(responseBody.LlmParams.MultimodalParams).toEqual({ + MultimodalEnabled: true + }); + }); + + it('should successfully retrieve configuration with MultimodalParams disabled', async () => { + const mockConfig = { + LlmParams: { + ModelProvider: 'Bedrock', + MultimodalParams: { + MultimodalEnabled: false + }, + PromptParams: { + UserPromptEditingEnabled: true, + MaxInputTextLength: 5000, + PromptTemplate: 'Custom prompt template' + } + }, + UseCaseName: 'Text Only Case', + UseCaseType: 'Text' + }; + + const mockItem = createMockDynamoItem('test-multimodal-disabled-config', mockConfig); + + ddbMock.on(GetItemCommand).resolves({ + Item: mockItem + }); + + const event = { + pathParameters: { + useCaseConfigKey: 'test-multimodal-disabled-config' + } + } as unknown as APIGatewayProxyEvent; + + const response = await lambdaHandler(event); + const responseBody = JSON.parse(response.body); + + expect(response.statusCode).toBe(200); + expect(responseBody.LlmParams.MultimodalParams).toEqual({ + MultimodalEnabled: false + }); + expect(responseBody.LlmParams.PromptParams).toBeDefined(); + }); + + it('should handle configuration without MultimodalParams', async () => { + const mockConfig = { + LlmParams: { + ModelProvider: 'Bedrock', + RAGEnabled: true + // No MultimodalParams + }, + UseCaseName: 'RAG Only Case', + UseCaseType: 'Text' + }; + + const mockItem = createMockDynamoItem('test-no-multimodal-config', mockConfig); + + ddbMock.on(GetItemCommand).resolves({ + Item: mockItem + }); + + const event = { + pathParameters: { + useCaseConfigKey: 'test-no-multimodal-config' + } + } as unknown as APIGatewayProxyEvent; + + const response = await lambdaHandler(event); + const responseBody = JSON.parse(response.body); + + expect(response.statusCode).toBe(200); + expect(responseBody.LlmParams.MultimodalParams).toBeUndefined(); + expect(responseBody.LlmParams.RAGEnabled).toBe(true); + }); + + it('should handle configuration with both MultimodalParams and PromptParams', async () => { + const mockItem = { + key: { S: 'test-combined-config' }, + config: { + M: { + LlmParams: { + M: { + ModelProvider: { + S: 'Bedrock' + }, + MultimodalParams: { + M: { + MultimodalEnabled: { + BOOL: true + } + } + }, + PromptParams: { + M: { + UserPromptEditingEnabled: { + BOOL: false + }, + MaxInputTextLength: { + N: '10000' + } + } + }, + RAGEnabled: { + BOOL: true + } + } + }, + UseCaseName: { + S: 'Combined Features Case' + }, + UseCaseType: { + S: 'Multimodal' + } + } + } + }; + + ddbMock.on(GetItemCommand).resolves({ + Item: mockItem + }); + + const event = { + pathParameters: { + useCaseConfigKey: 'test-combined-config' + } + } as unknown as APIGatewayProxyEvent; + + const response = await lambdaHandler(event); + const responseBody = JSON.parse(response.body); + + expect(response.statusCode).toBe(200); + expect(responseBody.LlmParams.MultimodalParams).toEqual({ + MultimodalEnabled: true + }); + expect(responseBody.LlmParams.PromptParams).toEqual({ + UserPromptEditingEnabled: false, + MaxInputTextLength: 10000 + }); + expect(responseBody.LlmParams.RAGEnabled).toBe(true); + }); }); diff --git a/source/lambda/use-case-details/test/utils/utils.test.ts b/source/lambda/use-case-details/test/utils/utils.test.ts index 5e4b786b..b18077d9 100644 --- a/source/lambda/use-case-details/test/utils/utils.test.ts +++ b/source/lambda/use-case-details/test/utils/utils.test.ts @@ -92,6 +92,145 @@ describe('Utils', () => { const result = castToResponse(testParams); expect(result).toEqual(expected); }); + + it('should include MultimodalParams when present and enabled', () => { + const paramsWithMultimodal = { + ...testParams, + LlmParams: { + ...testParams.LlmParams, + MultimodalParams: { + MultimodalEnabled: true + } + } + }; + + const result = castToResponse(paramsWithMultimodal); + + expect(result.LlmParams?.MultimodalParams).toEqual({ + MultimodalEnabled: true + }); + }); + + it('should include MultimodalParams when present and disabled', () => { + const paramsWithMultimodal = { + ...testParams, + LlmParams: { + ...testParams.LlmParams, + MultimodalParams: { + MultimodalEnabled: false + } + } + }; + + const result = castToResponse(paramsWithMultimodal); + + expect(result.LlmParams?.MultimodalParams).toEqual({ + MultimodalEnabled: false + }); + }); + + it('should not include MultimodalParams when not present', () => { + const result = castToResponse(testParams); + expect(result.LlmParams?.MultimodalParams).toBeUndefined(); + }); + + it('should handle MultimodalParams with other LlmParams', () => { + const paramsWithMultimodal = { + ...testParams, + LlmParams: { + ...testParams.LlmParams, + MultimodalParams: { + MultimodalEnabled: true + } + } + }; + + const result = castToResponse(paramsWithMultimodal); + + expect(result.LlmParams?.MultimodalParams).toEqual({ + MultimodalEnabled: true + }); + expect(result.LlmParams?.RAGEnabled).toBe(false); + expect(result.LlmParams?.PromptParams).toBeDefined(); + }); + + it('should handle MultimodalParams only (no other LlmParams)', () => { + const paramsWithOnlyMultimodal = { + UseCaseName: 'Multimodal Only', + UseCaseType: 'Multimodal', + LlmParams: { + ModelProvider: 'Bedrock', + MultimodalParams: { + MultimodalEnabled: true + } + } + }; + + const result = castToResponse(paramsWithOnlyMultimodal); + + expect(result.LlmParams?.MultimodalParams).toEqual({ + MultimodalEnabled: true + }); + expect(result.LlmParams?.RAGEnabled).toBeUndefined(); + expect(result.LlmParams?.PromptParams).toBeUndefined(); + expect(result.ModelProviderName).toBe('Bedrock'); + }); + + it('should handle empty LlmParams with MultimodalParams', () => { + const paramsWithEmptyLlm = { + UseCaseName: 'Empty LLM', + UseCaseType: 'Text', + LlmParams: { + MultimodalParams: { + MultimodalEnabled: false + } + } + }; + + const result = castToResponse(paramsWithEmptyLlm); + + expect(result.LlmParams?.MultimodalParams).toEqual({ + MultimodalEnabled: false + }); + expect(result.ModelProviderName).toBe('BedrockAgent'); // Default value + }); + + it('should handle undefined LlmParams', () => { + const paramsWithoutLlm = { + UseCaseName: 'No LLM', + UseCaseType: 'Text' + }; + + const result = castToResponse(paramsWithoutLlm); + + expect(result.LlmParams).toBeUndefined(); + expect(result.ModelProviderName).toBe('BedrockAgent'); // Default value + }); + + it('should preserve all MultimodalParams properties', () => { + // Test with potential future extensions to MultimodalParams + const paramsWithExtendedMultimodal = { + UseCaseName: 'Extended Multimodal', + UseCaseType: 'Multimodal', + LlmParams: { + ModelProvider: 'Bedrock', + MultimodalParams: { + MultimodalEnabled: true, + // These would be additional properties if added in the future + MaxFileSize: 10485760, + SupportedFormats: ['image/jpeg', 'image/png'] + } + } + }; + + const result = castToResponse(paramsWithExtendedMultimodal); + + expect(result.LlmParams?.MultimodalParams).toEqual({ + MultimodalEnabled: true, + MaxFileSize: 10485760, + SupportedFormats: ['image/jpeg', 'image/png'] + }); + }); }); describe('getRetrySettings', () => { diff --git a/source/lambda/use-case-details/tsconfig.json b/source/lambda/use-case-details/tsconfig.json index 8c01dc16..b417fd28 100644 --- a/source/lambda/use-case-details/tsconfig.json +++ b/source/lambda/use-case-details/tsconfig.json @@ -29,6 +29,9 @@ "moduleResolution": "Node", "rootDir": ".", "paths": { + "aws-sdk-lib": [ + "../layers/aws-sdk-lib/dist" + ], "aws-node-user-agent-config": [ "../layers/aws-node-user-agent-config/dist" ], diff --git a/source/lambda/use-case-details/utils/constants.ts b/source/lambda/use-case-details/utils/constants.ts index ce68bb17..bc179915 100644 --- a/source/lambda/use-case-details/utils/constants.ts +++ b/source/lambda/use-case-details/utils/constants.ts @@ -28,6 +28,7 @@ export interface DetailsResponse { export interface LlmParams { PromptParams?: PromptParams; RAGEnabled?: boolean; + MultimodalParams?: MultimodalParams; } export interface PromptParams { @@ -37,6 +38,10 @@ export interface PromptParams { MaxPromptTemplateLength?: number; } +export interface MultimodalParams { + MultimodalEnabled: boolean; +} + export interface FeedbackParams { FeedbackEnabled: boolean; } \ No newline at end of file diff --git a/source/lambda/use-case-details/utils/utils.ts b/source/lambda/use-case-details/utils/utils.ts index 86c42fa3..d3600480 100644 --- a/source/lambda/use-case-details/utils/utils.ts +++ b/source/lambda/use-case-details/utils/utils.ts @@ -36,8 +36,8 @@ export function castToResponse(params: any): DetailsResponse { FeedbackParams: params.FeedbackParams }; + let cleanedLlmParams: any = {}; if (useCaseDetails.LlmParams) { - let cleanedLlmParams: any = {}; if (useCaseDetails.LlmParams.PromptParams) { const { PromptParams } = useCaseDetails.LlmParams; @@ -55,8 +55,12 @@ export function castToResponse(params: any): DetailsResponse { cleanedLlmParams.RAGEnabled = useCaseDetails.LlmParams.RAGEnabled; } - useCaseDetails.LlmParams = Object.keys(cleanedLlmParams).length > 0 ? cleanedLlmParams : undefined; + if ('MultimodalParams' in useCaseDetails.LlmParams) { + cleanedLlmParams.MultimodalParams = useCaseDetails.LlmParams.MultimodalParams; + } } + + useCaseDetails.LlmParams = Object.keys(cleanedLlmParams).length > 0 ? cleanedLlmParams : undefined; return useCaseDetails; } diff --git a/source/lambda/use-case-management/agents-handler.ts b/source/lambda/use-case-management/agents-handler.ts new file mode 100644 index 00000000..cacaf093 --- /dev/null +++ b/source/lambda/use-case-management/agents-handler.ts @@ -0,0 +1,121 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { injectLambdaContext } from '@aws-lambda-powertools/logger/middleware'; +import { logMetrics } from '@aws-lambda-powertools/metrics/middleware'; +import { captureLambdaHandler } from '@aws-lambda-powertools/tracer/middleware'; +import middy from '@middy/core'; +import { APIGatewayEvent } from 'aws-lambda'; +import { + CreateUseCaseCommand, + DeleteUseCaseCommand, + PermanentlyDeleteUseCaseCommand, + UpdateUseCaseCommand, + GetUseCaseCommand +} from './model/commands/use-case-command'; +import { ListUseCasesAdapter } from './model/list-use-cases'; +import { UseCase } from './model/use-case'; +import { logger, metrics, tracer } from './power-tools-init'; +import { checkEnv, handleLambdaError, getRootResourceId, parseEventBody } from './utils/utils'; +import { formatResponse } from './utils/http-response-formatters'; +import { + AgentBuilderUseCaseDeploymentAdapter, + AgentBuilderUseCaseInfoAdapter +} from './model/adapters/agent-builder-use-case-adapter'; +import { AGENT_CORE_DEPLOYMENT_REQUIRED_ENV_VARS, UseCaseTypeFromApiEvent, Status } from './utils/constants'; +import { GetUseCaseAdapter } from './model/get-use-case'; +import { CaseCommand } from './model/commands/case-command'; +import { ListAgentBuilderCommand } from './model/commands/agent-builder-command'; + +const commands: Map = new Map(); +commands.set('create', new CreateUseCaseCommand()); +commands.set('update', new UpdateUseCaseCommand()); +commands.set('delete', new DeleteUseCaseCommand()); +commands.set('permanentlyDelete', new PermanentlyDeleteUseCaseCommand()); +commands.set('list', new ListAgentBuilderCommand()); +commands.set('get', new GetUseCaseCommand()); + +const routeMap = new Map([ + ['GET:/deployments/agents', 'list'], + ['POST:/deployments/agents', 'create'], + ['GET:/deployments/agents/{useCaseId}', 'get'], + ['PATCH:/deployments/agents/{useCaseId}', 'update'], + ['DELETE:/deployments/agents/{useCaseId}', 'delete'] +]); + +const getStackAction = (event: APIGatewayEvent): string => { + const routeKey = `${event.httpMethod}:${event.resource}`; + const baseAction = routeMap.get(routeKey); + + if (!baseAction) { + logger.error(`Invalid HTTP method: ${event.httpMethod}, at resource: ${event.resource}`); + throw new Error(`Invalid HTTP method: ${event.httpMethod}, at resource: ${event.resource}`); + } + + // Special case for permanent delete + if (baseAction === 'delete' && event.queryStringParameters?.permanent === 'true') { + return 'permanentlyDelete'; + } + + return baseAction; +}; + +export const agentsLambdaHandler = async (event: APIGatewayEvent) => { + checkEnv(AGENT_CORE_DEPLOYMENT_REQUIRED_ENV_VARS); + + const stackAction = getStackAction(event); + const command = commands.get(stackAction); + + if (!command) { + logger.error(`Invalid action: ${stackAction}`); + throw new Error(`Invalid action: ${stackAction}`); + } + try { + const response = await command.execute(await adaptEvent(event, stackAction)); + + // as create stack and update stack failures don't throw error, but returns a Failure response + // to render a 500 request in the UI the following error is + if (response === Status.FAILED) { + throw new Error('Command execution failed'); + } + return formatResponse(response); + } catch (error: unknown) { + const agentAction = event.httpMethod && event.resource ? `${event.httpMethod}:${event.resource}` : 'unknown'; + return handleLambdaError(error, agentAction, 'Agent'); + } +}; + +export const adaptEvent = async ( + event: APIGatewayEvent, + stackAction: string +): Promise => { + if (stackAction === 'list') { + return new ListUseCasesAdapter(event); + } else if (stackAction === 'delete' || stackAction === 'permanentlyDelete') { + return new AgentBuilderUseCaseInfoAdapter(event); + } else if (stackAction === 'get') { + return new GetUseCaseAdapter(event); + } + + // Parse the event body + const eventBody = parseEventBody(event); + const useCaseType = eventBody.UseCaseType; + + // Only get root resource ID when ExistingRestApiId is provided + let rootResourceId; + if (eventBody.ExistingRestApiId) { + rootResourceId = await getRootResourceId(eventBody.ExistingRestApiId); + } + + // Create the appropriate adapter based on UseCaseType + if (useCaseType !== UseCaseTypeFromApiEvent.AGENT_BUILDER) { + throw new Error(`Unsupported UseCaseType: ${useCaseType}`); + } + return new AgentBuilderUseCaseDeploymentAdapter(event, rootResourceId); +}; + +export const agentsHandler = middy(agentsLambdaHandler).use([ + captureLambdaHandler(tracer), + injectLambdaContext(logger), + logMetrics(metrics) +]); diff --git a/source/lambda/use-case-management/cfn/stack-management.ts b/source/lambda/use-case-management/cfn/stack-management.ts index 6d84c5d2..1509b12d 100644 --- a/source/lambda/use-case-management/cfn/stack-management.ts +++ b/source/lambda/use-case-management/cfn/stack-management.ts @@ -15,7 +15,7 @@ import { UpdateStackCommandOutput } from '@aws-sdk/client-cloudformation'; import { parse, validate } from '@aws-sdk/util-arn-parser'; -import { customAwsConfig } from 'aws-node-user-agent-config'; +import { AWSClientManager } from 'aws-sdk-lib'; import { StackInfo, UseCaseRecord } from '../model/list-use-cases'; import { UseCase } from '../model/use-case'; import { logger, metrics, tracer } from '../power-tools-init'; @@ -53,8 +53,7 @@ export class StackManagement { private cfnClient: CloudFormationClient; constructor() { - this.cfnClient = new CloudFormationClient(customAwsConfig()); - tracer.captureAWSv3Client(this.cfnClient); + this.cfnClient = AWSClientManager.getServiceClient('cloudformation', tracer); } /** * Method that creates a use case stack using cloudformation @@ -66,6 +65,7 @@ export class StackManagement { public async createStack(useCase: UseCase): Promise { const input = await new CreateStackCommandInputBuilder(useCase).build(); //NOSONAR - removing await, input is empty const command = new CreateStackCommand(input); + logger.debug(`Stack parameters: ${JSON.stringify(input.Parameters)}`); let response: CreateStackCommandOutput; try { @@ -95,6 +95,7 @@ export class StackManagement { const input = await builder.build(); //NOSONAR - removing await, input is empty const command = new UpdateStackCommand(input); + logger.debug(`Update stack parameters: ${JSON.stringify(input.Parameters, null, 2)}`); let response: UpdateStackCommandOutput; try { @@ -243,7 +244,7 @@ export class StackManagement { * @returns */ private createStackInfoFromDdbRecord = (useCaseRecord: UseCaseRecord): StackInfo => { - console.debug(`useCaseRecord: ${JSON.stringify(useCaseRecord)}`); + logger.debug(`useCaseRecord: ${JSON.stringify(useCaseRecord)}`); if (!validate(useCaseRecord.StackId)) { throw new Error(`Invalid stackId ARN provided in DDB record: ${useCaseRecord.StackId}`); } diff --git a/source/lambda/use-case-management/cfn/stack-operation-builder.ts b/source/lambda/use-case-management/cfn/stack-operation-builder.ts index 4993c0a4..decc38b2 100644 --- a/source/lambda/use-case-management/cfn/stack-operation-builder.ts +++ b/source/lambda/use-case-management/cfn/stack-operation-builder.ts @@ -14,7 +14,6 @@ import { ARTIFACT_BUCKET_ENV_VAR, ARTIFACT_KEY_PREFIX_ENV_VAR, CFN_DEPLOY_ROLE_ARN_ENV_VAR, - RetainedCfnParameterKeys, TEMPLATE_FILE_EXTN_ENV_VAR } from '../utils/constants'; @@ -95,7 +94,7 @@ export class UpdateStackCommandInputBuilder extends CommandInputBuilder { const updateCommandInput = { StackName: this.useCase.stackId, TemplateURL: getTemplateUrl(this.useCase), - Parameters: updateParameters(this.useCase.cfnParameters!), + Parameters: updateParameters(this.useCase.cfnParameters!, this.useCase.getRetainedParameterKeys()), Capabilities: ['CAPABILITY_IAM', 'CAPABILITY_AUTO_EXPAND', 'CAPABILITY_NAMED_IAM'], Tags: [ { @@ -147,9 +146,9 @@ const parameters = (cfnParameters: Map): Parameter[] => { /** * Utility method to build the Parameter array from the Map on updates, marking parameters to be retained */ -const updateParameters = (cfnParameters: Map): Parameter[] => { +const updateParameters = (cfnParameters: Map, retainedParameterKeys: string[]): Parameter[] => { let parameterArray: Parameter[] = parameters(cfnParameters); - for (let parameter of RetainedCfnParameterKeys) { + for (let parameter of retainedParameterKeys) { if (!cfnParameters.has(parameter)) { parameterArray.push({ ParameterKey: parameter, diff --git a/source/lambda/use-case-management/ddb/storage-management.ts b/source/lambda/use-case-management/ddb/storage-management.ts index 85b17d8d..5fd7e001 100644 --- a/source/lambda/use-case-management/ddb/storage-management.ts +++ b/source/lambda/use-case-management/ddb/storage-management.ts @@ -10,7 +10,7 @@ import { UpdateItemCommand } from '@aws-sdk/client-dynamodb'; import { unmarshall } from '@aws-sdk/util-dynamodb'; -import { customAwsConfig } from 'aws-node-user-agent-config'; +import { AWSClientManager } from 'aws-sdk-lib'; import { ListUseCasesAdapter, UseCaseRecord } from '../model/list-use-cases'; import { UseCase } from '../model/use-case'; import { logger, tracer } from '../power-tools-init'; @@ -37,7 +37,7 @@ export class StorageManagement { private client: DynamoDBClient; constructor() { - this.client = new DynamoDBClient(customAwsConfig()); + this.client = AWSClientManager.getServiceClient('dynamodb', tracer); } /** diff --git a/source/lambda/use-case-management/ddb/storage-operation-builder.ts b/source/lambda/use-case-management/ddb/storage-operation-builder.ts index de494985..b7879248 100644 --- a/source/lambda/use-case-management/ddb/storage-operation-builder.ts +++ b/source/lambda/use-case-management/ddb/storage-operation-builder.ts @@ -9,6 +9,7 @@ import { UpdateItemCommandInput } from '@aws-sdk/client-dynamodb'; import { UseCase } from '../model/use-case'; +import { AgentBuilderUseCaseConfiguration, UseCaseConfiguration } from '../model/types'; import { logger, tracer } from '../power-tools-init'; import { CfnParameterKeys, @@ -52,6 +53,7 @@ export class PutItemCommandInputBuilder extends CommandInputBuilder { TableName: process.env[USE_CASES_TABLE_NAME_ENV_VAR], Item: { UseCaseId: { S: this.useCase.useCaseId }, + UseCaseType: { S: this.useCase.useCaseType }, StackId: { S: this.useCase.stackId }, Name: { S: this.useCase.name }, ...(this.useCase.description && { @@ -189,23 +191,37 @@ export class GetModelInfoCommandInputBuilder extends CommandInputBuilder { @tracer.captureMethod({ captureResponse: false, subSegmentName: '###getModelInfoRecord' }) public build(): GetItemCommandInput { logger.debug('Building GetItemCommandInput'); - let sortKey = `${this.useCase.configuration.LlmParams!.ModelProvider}#`; - switch (this.useCase.configuration.LlmParams!.ModelProvider) { - case CHAT_PROVIDERS.BEDROCK: - sortKey += this.useCase.configuration.LlmParams!.BedrockLlmParams!.ModelId ?? INFERENCE_PROFILE; - break; - case CHAT_PROVIDERS.SAGEMAKER: - sortKey += 'default'; - break; - default: - logger.error(`Unknown model provider: ${this.useCase.configuration.LlmParams!.ModelProvider}`); - break; + + // Handle different configuration types + let modelProvider: string; + let modelId: string | undefined; + let ragEnabled: boolean = false; + + if (this.useCase.useCaseType === UseCaseTypes.AGENT_BUILDER) { + const config = this.useCase.configuration as AgentBuilderUseCaseConfiguration; + modelProvider = config.LlmParams?.ModelProvider || CHAT_PROVIDERS.AGENT_CORE; + modelId = config.LlmParams?.BedrockLlmParams?.ModelId; + ragEnabled = config.LlmParams?.RAGEnabled || false; + } else if (this.useCase.useCaseType === UseCaseTypes.AGENT) { + modelProvider = CHAT_PROVIDERS.BEDROCK_AGENT; + modelId = 'default'; + } else { + // Text/Chat use cases + const config = this.useCase.configuration as UseCaseConfiguration; + modelProvider = config.LlmParams!.ModelProvider!; + modelId = config.LlmParams!.BedrockLlmParams?.ModelId; + ragEnabled = config.LlmParams!.RAGEnabled || false; } + + const sortKey = `${modelProvider}#${ + modelProvider === CHAT_PROVIDERS.BEDROCK ? (modelId ?? INFERENCE_PROFILE) : 'default' + }`; + return { TableName: process.env[MODEL_INFO_TABLE_NAME_ENV_VAR], Key: { UseCase: { - S: this.useCase.configuration.LlmParams!.RAGEnabled ? UseCaseTypes.RAGChat : UseCaseTypes.CHAT + S: ragEnabled ? UseCaseTypes.RAGChat : UseCaseTypes.CHAT }, SortKey: { S: sortKey diff --git a/source/lambda/use-case-management/ddb/use-case-config-management.ts b/source/lambda/use-case-management/ddb/use-case-config-management.ts index 179db787..d6643510 100644 --- a/source/lambda/use-case-management/ddb/use-case-config-management.ts +++ b/source/lambda/use-case-management/ddb/use-case-config-management.ts @@ -9,14 +9,15 @@ import { UpdateItemCommand } from '@aws-sdk/client-dynamodb'; import { unmarshall } from '@aws-sdk/util-dynamodb'; -import { customAwsConfig } from 'aws-node-user-agent-config'; +import { AWSClientManager } from 'aws-sdk-lib'; import { UseCaseRecord } from '../model/list-use-cases'; -import { UseCaseConfiguration } from '../model/types'; +import { BaseUseCaseConfiguration } from '../model/types'; import { UseCase } from '../model/use-case'; import { logger, tracer } from '../power-tools-init'; import { DeleteConfigItemBuilder, GetConfigItemBuilder, + MarkConfigItemForDeletionCommandBuilder, MarkItemForDeletionCommandBuilder, PutConfigItemBuilder } from './use-case-config-operation-builder'; @@ -26,7 +27,7 @@ export class UseCaseConfigManagement { private client: DynamoDBClient; constructor() { - this.client = new DynamoDBClient(customAwsConfig()); + this.client = AWSClientManager.getServiceClient('dynamodb', tracer); } /** @@ -64,7 +65,7 @@ export class UseCaseConfigManagement { throw new Error('No use case config found for the specified key.'); } const unmarshalledConfig = unmarshall(response.Item).config; - return unmarshalledConfig as UseCaseConfiguration; + return unmarshalledConfig as BaseUseCaseConfiguration; } catch (error) { const errMessage = `Failed to get config: ${error}`; logger.error(errMessage); @@ -97,12 +98,13 @@ export class UseCaseConfigManagement { @tracer.captureMethod({ captureResponse: true, subSegmentName: '###updateUseCaseConfig' }) public async updateUseCaseConfig(useCase: UseCase, oldDynamoDbRecordKey: string): Promise { // Add the new config to DynamoDb - await this.createUseCaseConfig(useCase); + const response = await this.createUseCaseConfig(useCase); - // Remove the old DynamoDB key + // Set TTL on the old config instead of deleting it let existingConfigUseCase = useCase.clone(); existingConfigUseCase.setUseCaseConfigRecordKey(oldDynamoDbRecordKey); - const response = await this.deleteUseCaseConfig(existingConfigUseCase); + await this.markUseCaseConfigForDeletion(existingConfigUseCase); + return response; } @@ -112,7 +114,7 @@ export class UseCaseConfigManagement { * @returns */ @tracer.captureMethod({ captureResponse: true, subSegmentName: '###getUseCaseConfig' }) - public async getUseCaseConfigFromRecord(useCaseRecordInput: UseCaseRecord): Promise { + public async getUseCaseConfigFromRecord(useCaseRecordInput: UseCaseRecord): Promise { try { const input = await new GetItemCommandInputBuilder(useCaseRecordInput).build(); const response = await this.client.send(new GetItemCommand(input)); @@ -124,6 +126,23 @@ export class UseCaseConfigManagement { } } + /** + * Method for setting the TTL of a use case config in the use case config table + * + * @param useCase + */ + @tracer.captureMethod({ captureResponse: false, subSegmentName: '###markUseCaseConfigForDeletion' }) + public async markUseCaseConfigForDeletion(useCase: UseCase): Promise { + const input = await new MarkConfigItemForDeletionCommandBuilder(useCase).build(); //NOSONAR - without await, input is empty + try { + await this.client.send(new UpdateItemCommand(input)); + } catch (error) { + const errMessage = `Failed to update Use Case Config Record: ${error}`; + logger.error(errMessage); + throw error; + } + } + /** * Method for setting the TTL of a use case in the use cases table * diff --git a/source/lambda/use-case-management/ddb/use-case-config-operation-builder.ts b/source/lambda/use-case-management/ddb/use-case-config-operation-builder.ts index 0066966b..8b1c9fd1 100644 --- a/source/lambda/use-case-management/ddb/use-case-config-operation-builder.ts +++ b/source/lambda/use-case-management/ddb/use-case-config-operation-builder.ts @@ -14,6 +14,7 @@ import { logger, tracer } from '../power-tools-init'; import { DYNAMODB_TTL_ATTRIBUTE_NAME, TTL_SECONDS, + CONFIG_TTL_SECONDS, USE_CASE_CONFIG_RECORD_KEY_ATTRIBUTE_NAME, USE_CASE_CONFIG_TABLE_NAME_ENV_VAR } from '../utils/constants'; @@ -42,7 +43,7 @@ export abstract class CommandInputBuilder { export class GetConfigItemBuilder extends CommandInputBuilder { @tracer.captureMethod({ captureResponse: false, subSegmentName: '###getUseCaseConfigRecord' }) public build(): GetItemCommandInput { - console.debug('Building GetConfigItemBuilder'); + logger.debug('Building GetConfigItemBuilder'); return { TableName: process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR], Key: { @@ -73,7 +74,7 @@ export class PutConfigItemBuilder extends CommandInputBuilder { export class DeleteConfigItemBuilder extends CommandInputBuilder { @tracer.captureMethod({ captureResponse: false, subSegmentName: '###deleteUseCaseConfigRecord' }) public build(): DeleteItemCommandInput { - console.debug('Building DeleteConfigItemBuilder'); + logger.debug('Building DeleteConfigItemBuilder'); return { TableName: process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR], Key: { @@ -83,6 +84,35 @@ export class DeleteConfigItemBuilder extends CommandInputBuilder { } } +/** + * Builder to build input to mark a use case config for deletion by setting the TTL + */ +export class MarkConfigItemForDeletionCommandBuilder extends CommandInputBuilder { + /** + * Method to create input to update an existing config record in dynamodb setting the TTL + * @returns + */ + @tracer.captureMethod({ captureResponse: false, subSegmentName: '###updateUseCaseConfigRecordForDelete' }) + public build(): UpdateItemCommandInput { + logger.debug('Building UpdateItemCommandInput for config deletion'); + const currentTime = new Date(); + const expiryTime = Math.floor(currentTime.getTime() / 1000) + CONFIG_TTL_SECONDS; + return { + TableName: process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR], + Key: { + [USE_CASE_CONFIG_RECORD_KEY_ATTRIBUTE_NAME]: { S: this.useCase.getUseCaseConfigRecordKey() } + }, + UpdateExpression: 'SET #TTL = :expiry_time', + ExpressionAttributeNames: { + ['#TTL']: DYNAMODB_TTL_ATTRIBUTE_NAME + }, + ExpressionAttributeValues: { + [':expiry_time']: { N: expiryTime.toString() } + } + } as UpdateItemCommandInput; + } +} + /** * Builder to build input to mark a use case for deletion by setting the TTL */ diff --git a/source/lambda/use-case-management/jest.config.js b/source/lambda/use-case-management/jest.config.js index 621d58a6..318ba48d 100644 --- a/source/lambda/use-case-management/jest.config.js +++ b/source/lambda/use-case-management/jest.config.js @@ -14,5 +14,13 @@ module.exports = { collectCoverageFrom: ['**/*.ts', '!**/test/*.ts', '!dist/'], coverageReporters: ['text', ['lcov', { projectRoot: '../../../' }]], preset: 'ts-jest', - testEnvironment: 'node' + testEnvironment: 'node', + // Limit concurrency to prevent worker exhaustion + maxWorkers: 2, + // Set reasonable timeouts + testTimeout: 30000, + // Force exit to prevent hanging processes + forceExit: true, + // Detect open handles to identify resource leaks + detectOpenHandles: true }; diff --git a/source/lambda/use-case-management/mcp-handler.ts b/source/lambda/use-case-management/mcp-handler.ts new file mode 100644 index 00000000..7eef1c2b --- /dev/null +++ b/source/lambda/use-case-management/mcp-handler.ts @@ -0,0 +1,85 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { injectLambdaContext } from '@aws-lambda-powertools/logger/middleware'; +import { logMetrics } from '@aws-lambda-powertools/metrics/middleware'; +import { captureLambdaHandler } from '@aws-lambda-powertools/tracer/middleware'; +import middy from '@middy/core'; +import { APIGatewayEvent } from 'aws-lambda'; +import { logger, metrics, tracer } from './power-tools-init'; +import { checkEnv, handleLambdaError, getStackAction } from './utils/utils'; +import { formatResponse } from './utils/http-response-formatters'; +import { UploadSchemasCommand, ListMCPServersCommand } from './model/commands/mcp-command'; +import { McpAdapterFactory, McpOperation } from './model/adapters/mcp-adapter'; +import { Status, REQUIRED_MCP_ENV_VARS, McpOperationTypes } from './utils/constants'; +import { CaseCommand } from './model/commands/case-command'; + +import { CreateUseCaseCommand, UpdateUseCaseCommand, DeleteUseCaseCommand, PermanentlyDeleteUseCaseCommand, GetUseCaseCommand } from './model/commands/use-case-command'; +import { UseCase } from './model/use-case'; +// Command registry +const mcpCommands: Map = new Map(); +mcpCommands.set(McpOperationTypes.UPLOAD_SCHEMA, new UploadSchemasCommand()); +mcpCommands.set(McpOperationTypes.CREATE, new CreateUseCaseCommand()); +mcpCommands.set(McpOperationTypes.LIST, new ListMCPServersCommand()); +mcpCommands.set(McpOperationTypes.UPDATE, new UpdateUseCaseCommand()); +mcpCommands.set(McpOperationTypes.DELETE, new DeleteUseCaseCommand()); +mcpCommands.set(McpOperationTypes.PERMANENTLY_DELETE, new PermanentlyDeleteUseCaseCommand()); +mcpCommands.set(McpOperationTypes.GET, new GetUseCaseCommand()); + +const mcpRouteMap = new Map([ + ['POST:/deployments/mcp/upload-schemas', McpOperationTypes.UPLOAD_SCHEMA], + ['POST:/deployments/mcp', McpOperationTypes.CREATE], + ['PATCH:/deployments/mcp/{useCaseId}', McpOperationTypes.UPDATE], + ['DELETE:/deployments/mcp/{useCaseId}', McpOperationTypes.DELETE], + ['GET:/deployments/mcp/{useCaseId}', McpOperationTypes.GET], + ['GET:/deployments/mcp', McpOperationTypes.LIST] +]); + +/** + * Adapts the API Gateway event to the appropriate MCP operation + * @param event - API Gateway event + * @param mcpAction - The MCP action to perform + * @returns The adapted MCP operation + */ +export const adaptMcpEvent = (event: APIGatewayEvent, mcpAction: string): McpOperation | UseCase => { + return McpAdapterFactory.createAdapter(event, mcpAction); +}; + +/** + * Lambda handler for MCP operations + * @param event - API Gateway event + * @returns Formatted response + */ +export const mcpLambdaHandler = async (event: APIGatewayEvent) => { + try { + checkEnv(REQUIRED_MCP_ENV_VARS); + + const mcpAction = getStackAction(event, mcpRouteMap); + const command = mcpCommands.get(mcpAction); + + if (!command) { + logger.error(`Invalid MCP action: ${mcpAction}`); + throw new Error(`Invalid MCP action: ${mcpAction}`); + } + const mcpOperation = adaptMcpEvent(event, mcpAction); + const response = await command.execute(mcpOperation); + + if (response === Status.FAILED) { + throw new Error('MCP command execution failed'); + } + + return formatResponse(response); + } catch (error: unknown) { + const mcpAction = event.httpMethod && event.resource ? `${event.httpMethod}:${event.resource}` : 'unknown'; + return handleLambdaError(error, mcpAction, 'MCP'); + } +}; + +/** + * Middy-wrapped handler with powertools middleware + */ +export const mcpHandler = middy(mcpLambdaHandler).use([ + captureLambdaHandler(tracer), + injectLambdaContext(logger), + logMetrics(metrics) +]); diff --git a/source/lambda/use-case-management/model/adapters/agent-builder-use-case-adapter.ts b/source/lambda/use-case-management/model/adapters/agent-builder-use-case-adapter.ts new file mode 100644 index 00000000..a19ee97e --- /dev/null +++ b/source/lambda/use-case-management/model/adapters/agent-builder-use-case-adapter.ts @@ -0,0 +1,280 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { APIGatewayEvent } from 'aws-lambda'; +import * as crypto from 'crypto'; +import { MissingValueError } from '../../exception/missing-value-error'; +import { logger } from '../../power-tools-init'; +import { UseCase } from '../use-case'; +import { + AUTHENTICATION_PROVIDERS, + COGNITO_POLICY_TABLE_ENV_VAR, + CfnParameterKeys, + FILES_METADATA_TABLE_NAME_ENV_VAR, + IS_INTERNAL_USER_ENV_VAR, + MULTIMODAL_DATA_BUCKET_ENV_VAR, + SHARED_ECR_CACHE_PREFIX_ENV_VAR, + STACK_DEPLOYMENT_SOURCE_USE_CASE, + USER_POOL_ID_ENV_VAR, + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, + UseCaseTypes +} from '../../utils/constants'; +import { AgentBuilderUseCaseConfiguration } from '../types'; +import { generateUUID, parseEventBody } from '../../utils/utils'; + +/** + * Adapter implementation for Agent Builder use cases to extract information from Lambda event objects + * and convert them to @UseCase type. + * + * Used for operations which require detailed information about the use case to perform the action, + * specifically deployments and updates of Agent Builder use cases. + * + * IMPORTANT: Amazon Bedrock AgentCore (preview service) does not support VPC deployments. + * All Agent Builder components run in non-VPC mode regardless of any VPC configuration provided. + */ +export class AgentBuilderUseCaseDeploymentAdapter extends UseCase { + constructor(event: APIGatewayEvent, apiRootResourceId?: string) { + const jsonBody = parseEventBody(event); + if (apiRootResourceId) { + jsonBody.ExistingApiRootResourceId = apiRootResourceId; + } + // in update and delete cases, we will be provided a useCaseId. In create, we generate one + const useCaseId: string = event.pathParameters?.useCaseId ?? crypto.randomUUID(); + const cfnParameters = AgentBuilderUseCaseDeploymentAdapter.createCfnParameters(jsonBody, useCaseId); + const config = AgentBuilderUseCaseDeploymentAdapter.createConfiguration(jsonBody); + const userId = event.requestContext.authorizer!.UserId; + + if (!jsonBody.LlmParams?.ModelProvider) { + const errMsg = `Model Provider name not found in event body. ${JSON.stringify(jsonBody)}`; + logger.error(errMsg); + throw new MissingValueError(errMsg); + } + + super( + useCaseId, + jsonBody?.UseCaseName, + jsonBody?.UseCaseDescription, + cfnParameters, + config, + userId, + // AgentBuilder usecase generates templates with name AgentBuilder.template.json + // hence provider is not needed + undefined, + UseCaseTypes.AGENT_BUILDER + ); + } + + /** + * Override template name generation for Agent Builder use cases. + * Agent Builder use cases use a fixed template name regardless of provider. + * + * @param providerName The provider name (ignored for Agent Builder) + * @param useCaseType The use case type (ignored for Agent Builder) + * @returns Fixed template name 'AgentBuilderStack' + */ + protected generateTemplateName(providerName: string | undefined, useCaseType: string): string { + return 'AgentBuilderStack'; + } + + /** + * Override parameter retention for MCP use cases. + * MCP Server use cases don't need VPC or other parameters to be retained during updates. + * + * @returns Empty array - no parameters should be retained for MCP updates + */ + public getRetainedParameterKeys(): string[] { + return []; + } + + private static getUseInferenceProfileValue(eventBody: any): string { + const bedrockInferenceType = eventBody?.LlmParams?.BedrockLlmParams?.BedrockInferenceType; + return bedrockInferenceType === 'INFERENCE_PROFILE' ? 'Yes' : 'No'; + } + + private static createCfnParameters(eventBody: any, useCaseId: string): Map { + const cfnParameters = new Map(); + const shortUUID = this.generateShortUUID(useCaseId); + const recordKeySuffixUUID = this.generateShortUUID(generateUUID()); + + // Agent Builder specific parameters + AgentBuilderUseCaseDeploymentAdapter.setBooleanParameterIfExists( + cfnParameters, + CfnParameterKeys.EnableLongTermMemory, + eventBody.AgentParams?.MemoryConfig?.LongTermEnabled + ); + + AgentBuilderUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.SharedEcrCachePrefix, + process.env[SHARED_ECR_CACHE_PREFIX_ENV_VAR] + ); + + // Standard use case parameters + AgentBuilderUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.DefaultUserEmail, + eventBody.DefaultUserEmail + ); + + AgentBuilderUseCaseDeploymentAdapter.setBooleanParameterIfExists( + cfnParameters, + CfnParameterKeys.DeployUI, + eventBody.DeployUI + ); + + // Note: Agent Builder does not support VPC deployments + // VPC parameters are intentionally omitted + + AgentBuilderUseCaseDeploymentAdapter.setBooleanParameterIfExists( + cfnParameters, + CfnParameterKeys.FeedbackEnabled, + eventBody.FeedbackParams?.FeedbackEnabled + ); + + if (!eventBody.AuthenticationParams?.CognitoParams?.ExistingUserPoolId) { + AgentBuilderUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.ExistingRestApiId, + eventBody.ExistingRestApiId + ); + + AgentBuilderUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.ExistingApiRootResourceId, + eventBody.ExistingApiRootResourceId + ); + } + + // fixed/mandatory parameters for the deployment + // each new deployment or update requires a new DDB record key in order to properly have cloudformation update all resources on a deploy + cfnParameters.set( + CfnParameterKeys.UseCaseConfigRecordKey, + UseCase.generateUseCaseConfigRecordKey(shortUUID, recordKeySuffixUUID) + ); + cfnParameters.set(CfnParameterKeys.UseCaseConfigTableName, process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]!); + + // prettier-ignore + if (eventBody.AuthenticationParams) { + switch ( //NOSONAR - typescript:S1301, switch statement used for ease of future extensions + eventBody.AuthenticationParams.AuthenticationProvider + ) { + case AUTHENTICATION_PROVIDERS.COGNITO: + const existingUserPoolId = eventBody.AuthenticationParams.CognitoParams.ExistingUserPoolId; + const existingUserPoolClientId = + eventBody.AuthenticationParams.CognitoParams.ExistingUserPoolClientId; + + if (!existingUserPoolId) { + throw new Error( + 'Required field existingUserPoolId not provided for the "Cognito" AuthenticationProvider.' + ); + } + + cfnParameters.set(CfnParameterKeys.ExistingCognitoUserPoolId, existingUserPoolId); + cfnParameters.set(CfnParameterKeys.ComponentCognitoUserPoolId, existingUserPoolId) + if (existingUserPoolClientId) { + cfnParameters.set(CfnParameterKeys.ExistingCognitoUserPoolClient, existingUserPoolClientId); + } + + break; + default: + logger.error( + `Error: unsupported AuthenticationProvider. AuthenticationParams provided: ${eventBody.AuthenticationParams}` + ); + throw new Error( + `Error: unsupported AuthenticationProvider: ${eventBody.AuthenticationParams.AuthenticationProvider}.` + ); + } + } else { + cfnParameters.set(CfnParameterKeys.ExistingCognitoUserPoolId, process.env[USER_POOL_ID_ENV_VAR]!); + cfnParameters.set(CfnParameterKeys.ComponentCognitoUserPoolId, process.env[USER_POOL_ID_ENV_VAR]!) + } + + cfnParameters.set( + CfnParameterKeys.ExistingCognitoGroupPolicyTableName, + process.env[COGNITO_POLICY_TABLE_ENV_VAR]! + ); + cfnParameters.set(CfnParameterKeys.UseCaseUUID, `${useCaseId}`); + cfnParameters.set(CfnParameterKeys.StackDeploymentSource, STACK_DEPLOYMENT_SOURCE_USE_CASE); + + AgentBuilderUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.UseInferenceProfile, + this.getUseInferenceProfileValue(eventBody) + ); + + // Set multimodal data parameters from environment variables if multimodal is enabled + AgentBuilderUseCaseDeploymentAdapter.setBooleanParameterIfExists( + cfnParameters, + CfnParameterKeys.MultimodalEnabled, + eventBody.LlmParams?.MultimodalParams?.MultimodalEnabled + ); + + if (eventBody.LlmParams?.MultimodalParams?.MultimodalEnabled === true) { + AgentBuilderUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.ExistingMultimodalDataMetadataTable, + process.env[FILES_METADATA_TABLE_NAME_ENV_VAR] + ); + + AgentBuilderUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.ExistingMultimodalDataBucket, + process.env[MULTIMODAL_DATA_BUCKET_ENV_VAR] + ); + } else if (eventBody.LlmParams?.MultimodalParams?.MultimodalEnabled === false) { + cfnParameters.set(CfnParameterKeys.ExistingMultimodalDataMetadataTable, ''); + cfnParameters.set(CfnParameterKeys.ExistingMultimodalDataBucket, ''); + } + + return cfnParameters; + } + + private static createConfiguration(eventBody: any): AgentBuilderUseCaseConfiguration { + const config: AgentBuilderUseCaseConfiguration = { + UseCaseType: eventBody.UseCaseType, + UseCaseName: eventBody.UseCaseName, + LlmParams: { + ModelProvider: eventBody.LlmParams?.ModelProvider, + BedrockLlmParams: eventBody.LlmParams?.BedrockLlmParams, + SageMakerLlmParams: eventBody.LlmParams?.SageMakerLlmParams, + PromptParams: eventBody.LlmParams?.PromptParams, + ModelParams: eventBody.LlmParams?.ModelParams, + Temperature: eventBody.LlmParams?.Temperature, + RAGEnabled: eventBody.LlmParams?.RAGEnabled, + Streaming: eventBody.LlmParams?.Streaming, + Verbose: eventBody.LlmParams?.Verbose, + MultimodalParams: eventBody.LlmParams?.MultimodalParams + }, + AgentBuilderParams: { + SystemPrompt: eventBody.AgentParams?.SystemPrompt, + MCPServers: eventBody.AgentParams?.MCPServers, + Tools: eventBody.AgentParams?.Tools, + MemoryConfig: eventBody.AgentParams?.MemoryConfig + }, + AuthenticationParams: eventBody.AuthenticationParams, + FeedbackParams: { + FeedbackEnabled: eventBody.FeedbackParams?.FeedbackEnabled, + ...(eventBody.FeedbackParams?.FeedbackEnabled && { CustomMappings: {} }) + }, + IsInternalUser: process.env[IS_INTERNAL_USER_ENV_VAR]! // env var value is set as 'true' or 'false' on deployment of management stack + }; + + return config; + } +} + +/** + * Adapter implementation for Agent Builder use cases to extract information from Lambda event objects + * and convert them to @UseCase type. + * + * Used for operations which require only the use case ID and user, such as deletion, + * permanent deletion, and getting info on a single use case + */ +export class AgentBuilderUseCaseInfoAdapter extends UseCase { + constructor(event: APIGatewayEvent) { + const useCaseId: string = event.pathParameters!.useCaseId!; + const userId = event.requestContext.authorizer!.UserId; + + super(useCaseId, '', undefined, undefined, {}, userId, '', UseCaseTypes.AGENT_BUILDER); + } +} diff --git a/source/lambda/use-case-management/model/agent-use-case-adapter.ts b/source/lambda/use-case-management/model/adapters/agent-use-case-adapter.ts similarity index 91% rename from source/lambda/use-case-management/model/agent-use-case-adapter.ts rename to source/lambda/use-case-management/model/adapters/agent-use-case-adapter.ts index 08969eb4..7be1492d 100644 --- a/source/lambda/use-case-management/model/agent-use-case-adapter.ts +++ b/source/lambda/use-case-management/model/adapters/agent-use-case-adapter.ts @@ -3,7 +3,7 @@ import { APIGatewayEvent } from 'aws-lambda'; import * as crypto from 'crypto'; -import { logger } from '../power-tools-init'; +import { logger } from '../../power-tools-init'; import { AgentProviders, AUTHENTICATION_PROVIDERS, @@ -14,13 +14,14 @@ import { USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, UseCaseTypes, USER_POOL_ID_ENV_VAR -} from '../utils/constants'; -import { AgentUseCaseConfiguration } from './types'; -import { UseCase } from './use-case'; +} from '../../utils/constants'; +import { AgentUseCaseConfiguration } from '../types'; +import { UseCase } from '../use-case'; +import { generateUUID, parseEventBody } from '../../utils/utils'; export class AgentUseCaseDeploymentAdapter extends UseCase { constructor(event: APIGatewayEvent, apiRootResourceId?: string) { - const jsonBody = JSON.parse(event.body!); + const jsonBody = parseEventBody(event); if (apiRootResourceId) { jsonBody.ExistingApiRootResourceId = apiRootResourceId; } @@ -44,8 +45,8 @@ export class AgentUseCaseDeploymentAdapter extends UseCase { private static createCfnParameters(eventBody: any, useCaseId: string): Map { const cfnParameters = new Map(); - const shortUUID = this.generateShortUUID(useCaseId); - const recordKeySuffixUUID = this.generateShortUUID(crypto.randomUUID()); + const shortUUID = UseCase.generateShortUUID(useCaseId); + const recordKeySuffixUUID = UseCase.generateShortUUID(generateUUID()); AgentUseCaseDeploymentAdapter.setParameterIfExists( cfnParameters, @@ -101,7 +102,13 @@ export class AgentUseCaseDeploymentAdapter extends UseCase { CfnParameterKeys.ExistingSecurityGroupIds, eventBody.VpcParams?.ExistingSecurityGroupIds ); - + + AgentUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.ProvisionedConcurrencyValue, + eventBody.ProvisionedConcurrencyValue + ); + AgentUseCaseDeploymentAdapter.setBooleanParameterIfExists( cfnParameters, CfnParameterKeys.FeedbackEnabled, @@ -186,6 +193,7 @@ export class AgentUseCaseDeploymentAdapter extends UseCase { }, AuthenticationParams: eventBody.AuthenticationParams, IsInternalUser: process.env[IS_INTERNAL_USER_ENV_VAR]!, + ProvisionedConcurrencyValue: eventBody.ProvisionedConcurrencyValue, FeedbackParams: { FeedbackEnabled: eventBody.FeedbackParams?.FeedbackEnabled, ...(eventBody.FeedbackParams?.FeedbackEnabled && { CustomMappings: {} }) diff --git a/source/lambda/use-case-management/model/chat-use-case-adapter.ts b/source/lambda/use-case-management/model/adapters/chat-use-case-adapter.ts similarity index 94% rename from source/lambda/use-case-management/model/chat-use-case-adapter.ts rename to source/lambda/use-case-management/model/adapters/chat-use-case-adapter.ts index bdbbba1c..4c5011d8 100644 --- a/source/lambda/use-case-management/model/chat-use-case-adapter.ts +++ b/source/lambda/use-case-management/model/adapters/chat-use-case-adapter.ts @@ -3,9 +3,9 @@ import { APIGatewayEvent } from 'aws-lambda'; import * as crypto from 'crypto'; -import { MissingValueError } from '../exception/missing-value-error'; -import { logger } from '../power-tools-init'; -import { UseCase } from './use-case'; +import { MissingValueError } from '../../exception/missing-value-error'; +import { logger } from '../../power-tools-init'; +import { UseCase } from '../use-case'; import { AUTHENTICATION_PROVIDERS, COGNITO_POLICY_TABLE_ENV_VAR, @@ -16,8 +16,9 @@ import { USER_POOL_ID_ENV_VAR, USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, UseCaseTypes -} from '../utils/constants'; -import { UseCaseConfiguration } from './types'; +} from '../../utils/constants'; +import { UseCaseConfiguration } from '../types'; +import { generateUUID, parseEventBody } from '../../utils/utils'; /** * Adapter implementation for @UseCase to extract information from Lambda event objects @@ -28,7 +29,7 @@ import { UseCaseConfiguration } from './types'; */ export class ChatUseCaseDeploymentAdapter extends UseCase { constructor(event: APIGatewayEvent, apiRootResourceId?: string) { - const jsonBody = JSON.parse(event.body!); + const jsonBody = parseEventBody(event); if (apiRootResourceId) { jsonBody.ExistingApiRootResourceId = apiRootResourceId; } @@ -59,7 +60,7 @@ export class ChatUseCaseDeploymentAdapter extends UseCase { private static createCfnParameters(eventBody: any, useCaseId: string): Map { const cfnParameters = new Map(); const shortUUID = this.generateShortUUID(useCaseId); - const recordKeySuffixUUID = this.generateShortUUID(crypto.randomUUID()); + const recordKeySuffixUUID = this.generateShortUUID(generateUUID()); // Knowledge base related Params ChatUseCaseDeploymentAdapter.setParameterIfExists( @@ -160,6 +161,12 @@ export class ChatUseCaseDeploymentAdapter extends UseCase { eventBody.FeedbackParams?.FeedbackEnabled ); + ChatUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.ProvisionedConcurrencyValue, + eventBody.ProvisionedConcurrencyValue + ); + if (!eventBody.AuthenticationParams?.CognitoParams?.ExistingUserPoolId) { ChatUseCaseDeploymentAdapter.setParameterIfExists( cfnParameters, @@ -255,6 +262,7 @@ export class ChatUseCaseDeploymentAdapter extends UseCase { Verbose: eventBody.LlmParams.Verbose }, AuthenticationParams: eventBody.AuthenticationParams, + ProvisionedConcurrencyValue: eventBody.ProvisionedConcurrencyValue, FeedbackParams: { FeedbackEnabled: eventBody.FeedbackParams?.FeedbackEnabled, ...(eventBody.FeedbackParams?.FeedbackEnabled && { CustomMappings: {} }) diff --git a/source/lambda/use-case-management/model/adapters/mcp-adapter.ts b/source/lambda/use-case-management/model/adapters/mcp-adapter.ts new file mode 100644 index 00000000..0bbdd192 --- /dev/null +++ b/source/lambda/use-case-management/model/adapters/mcp-adapter.ts @@ -0,0 +1,201 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { APIGatewayEvent } from 'aws-lambda'; +import RequestValidationError from '../../utils/error'; +import { logger } from '../../power-tools-init'; +import { generateUUID, parseEventBody, extractUserId } from '../../utils/utils'; +import { + McpOperationTypes, + UseCaseTypes, + CfnParameterKeys, + USER_POOL_ID_ENV_VAR, + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, + GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR +} from '../../utils/constants'; +import { UseCase } from '../use-case'; +import { MCPUseCaseConfiguration } from '../types'; +import { GetUseCaseAdapter } from '../get-use-case'; + +/** + * Base interface for MCP operations + */ +export interface McpOperation { + // Interface intentionally minimal + // As other adapters are implemented, this interface can be used like UseCase to pull out common fields +} + +/** + * Interface for file upload information + */ +export interface FileUploadInfo { + schemaType: string; + fileName: string; + fileExtension: string; + contentType: string; +} + +/** + * Adapter for upload-schemas MCP operation that validates all inputs and supports multiple files + */ +export class UploadMCPTargetSchemaAdapter implements McpOperation { + public readonly rawFiles: any[]; + public readonly userId: string; + public files: FileUploadInfo[] = []; + + constructor(event: APIGatewayEvent) { + const eventBody = parseEventBody(event); + + if (!eventBody.files || !Array.isArray(eventBody.files) || eventBody.files.length === 0) { + logger.error("'files' is a required field and must be a non-empty array"); + throw new RequestValidationError("'files' is a required field and must be a non-empty array"); + } + + this.userId = extractUserId(event); + this.rawFiles = eventBody.files; + } +} + +/** + * Adapter for MCP use case operations + */ +export class MCPUseCaseAdapter extends UseCase { + public readonly deploymentData: any; + public readonly userId: string; + + constructor(event: APIGatewayEvent) { + const jsonBody = parseEventBody(event); + const useCaseId: string = event.pathParameters?.useCaseId ?? generateUUID(); + const cfnParameters = MCPUseCaseAdapter.createCfnParameters(jsonBody, useCaseId); + const config = MCPUseCaseAdapter.createConfiguration(jsonBody); + const userId = event.requestContext.authorizer!.UserId; + + super( + useCaseId, + jsonBody?.UseCaseName, + jsonBody?.UseCaseDescription, + cfnParameters, + config, + userId, + undefined, + UseCaseTypes.MCP_SERVER + ); + } + + /** + * Override template name generation for Agent Builder use cases. + * MCP Server use cases use a fixed template name regardless of provider. + * + * @param providerName The provider name (ignored for MCP Server Stack) + * @param useCaseType The use case type (ignored for MCP Server Stack) + * @returns Fixed template name 'MCPServerStack' + */ + protected generateTemplateName(providerName: string | undefined, useCaseType: string): string { + return 'MCPServerStack'; + } + + /** + * Override parameter retention for MCP use cases. + * MCP Server use cases don't need VPC or other parameters to be retained during updates. + * + * @returns Empty array - no parameters should be retained for MCP updates + */ + public getRetainedParameterKeys(): string[] { + return []; + } + + private static createCfnParameters(eventBody: any, useCaseId: string): Map { + const cfnParameters = new Map(); + const shortUUID = this.generateShortUUID(useCaseId); + const recordKeySuffixUUID = this.generateShortUUID(generateUUID()); + const agentClientId = eventBody.AgentClientId; + cfnParameters.set(CfnParameterKeys.UseCaseUUID, `${useCaseId}`); + + cfnParameters.set(CfnParameterKeys.UseCaseConfigTableName, process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]!); + cfnParameters.set( + CfnParameterKeys.UseCaseConfigRecordKey, + UseCase.generateUseCaseConfigRecordKey(shortUUID, recordKeySuffixUUID) + ); + + cfnParameters.set(CfnParameterKeys.ExistingRestApiId, eventBody.ExistingRestApiId); + + if (process.env[GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR]) { + const s3BucketName = process.env[GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR]; + cfnParameters.set(CfnParameterKeys.S3BucketName, s3BucketName); + } + + cfnParameters.set(CfnParameterKeys.ExistingCognitoUserPoolId, process.env[USER_POOL_ID_ENV_VAR]!); + + if (eventBody.MCPParams?.RuntimeParams?.EcrUri) + this.setParameterIfExists( + cfnParameters, + CfnParameterKeys.EcrUri, + eventBody.MCPParams?.RuntimeParams?.EcrUri + ); + + if (agentClientId) cfnParameters.set(CfnParameterKeys.ExistingCognitoUserPoolClient, agentClientId); + + return cfnParameters; + } + private static createConfiguration(eventBody: any): MCPUseCaseConfiguration { + return { + UseCaseType: eventBody.UseCaseType, + UseCaseName: eventBody.UseCaseName, + UseCaseDescription: eventBody.UseCaseDescription, + MCPParams: eventBody.MCPParams + }; + } +} + +/** + * Adapter for listing MCP servers + */ +export class ListMCPAdapter implements McpOperation { + public readonly event: APIGatewayEvent; + + constructor(event: APIGatewayEvent) { + this.event = event; + } +} + +/** + * Factory function to create appropriate MCP adapter based on operation type + */ +export class McpAdapterFactory { + public static createAdapter(event: APIGatewayEvent, operation: string): McpOperation | UseCase { + switch (operation) { + case McpOperationTypes.UPLOAD_SCHEMA: + return new UploadMCPTargetSchemaAdapter(event); + case McpOperationTypes.CREATE: + case McpOperationTypes.UPDATE: + return new MCPUseCaseAdapter(event); + case McpOperationTypes.DELETE: + case McpOperationTypes.PERMANENTLY_DELETE: + return new MCPInfoAdapter(event); + case McpOperationTypes.GET: + return new GetUseCaseAdapter(event); + case McpOperationTypes.LIST: + return new ListMCPAdapter(event); + default: + const errorMsg = `Unsupported MCP operation: ${operation}`; + logger.error(`McpAdapterFactory creation failed: ${errorMsg}`); + throw new Error(errorMsg); + } + } +} + +/** + * Adapter implementation for @UseCase to extract information from Lambda event objects + * and convert them to @UseCase type. + * + * Used for operations which require only the use case ID and user, such as deletion, + * permanent deletion, and getting info on a single use case + */ +export class MCPInfoAdapter extends UseCase { + constructor(event: APIGatewayEvent) { + const useCaseId: string = event.pathParameters!.useCaseId!; + const userId = event.requestContext.authorizer!.UserId; + + super(useCaseId, '', undefined, undefined, {}, userId, '', UseCaseTypes.MCP_SERVER); + } +} diff --git a/source/lambda/use-case-management/model/adapters/workflow-use-case-adapter.ts b/source/lambda/use-case-management/model/adapters/workflow-use-case-adapter.ts new file mode 100644 index 00000000..a71f4658 --- /dev/null +++ b/source/lambda/use-case-management/model/adapters/workflow-use-case-adapter.ts @@ -0,0 +1,284 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { APIGatewayEvent } from 'aws-lambda'; +import * as crypto from 'crypto'; +import { MissingValueError } from '../../exception/missing-value-error'; +import { logger } from '../../power-tools-init'; +import { UseCase } from '../use-case'; +import { + AUTHENTICATION_PROVIDERS, + COGNITO_POLICY_TABLE_ENV_VAR, + CfnParameterKeys, + IS_INTERNAL_USER_ENV_VAR, + STACK_DEPLOYMENT_SOURCE_USE_CASE, + USER_POOL_ID_ENV_VAR, + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, + FILES_METADATA_TABLE_NAME_ENV_VAR, + MULTIMODAL_DATA_BUCKET_ENV_VAR, + UseCaseTypes, + SHARED_ECR_CACHE_PREFIX_ENV_VAR +} from '../../utils/constants'; +import { WorkflowUseCaseConfiguration } from '../types'; +import { generateUUID, parseEventBody } from '../../utils/utils'; + +/** + * Adapter implementation for Workflow use cases to extract information from Lambda event objects + * and convert them to @UseCase type. + * + * Used for operations which require detailed information about the use case to perform the action, + * specifically deployments and updates of Workflow use cases. + */ +export class WorkflowUseCaseDeploymentAdapter extends UseCase { + constructor(event: APIGatewayEvent, apiRootResourceId?: string) { + const jsonBody = parseEventBody(event); + if (apiRootResourceId) { + jsonBody.ExistingApiRootResourceId = apiRootResourceId; + } + // in update and delete cases, we will be provided a useCaseId. In create, we generate one + const useCaseId: string = event.pathParameters?.useCaseId ?? crypto.randomUUID(); + const cfnParameters = WorkflowUseCaseDeploymentAdapter.createCfnParameters(jsonBody, useCaseId); + const config = WorkflowUseCaseDeploymentAdapter.createConfiguration(jsonBody); + const userId = event.requestContext.authorizer!.UserId; + + if (!jsonBody.LlmParams?.ModelProvider) { + const errMsg = 'Model Provider name not found in event body.'; + logger.error(errMsg); + throw new MissingValueError(errMsg); + } + + super( + useCaseId, + jsonBody?.UseCaseName, + jsonBody?.UseCaseDescription, + cfnParameters, + config, + userId, + // Workflow usecase generates templates with name Workflow.template.json + // hence provider is not needed + undefined, + UseCaseTypes.WORKFLOW + ); + } + + /** + * Override template name generation for Workflow use cases. + * Workflow use cases use a fixed template name regardless of provider. + * + * @param providerName The provider name (ignored for Workflow) + * @param useCaseType The use case type (ignored for Workflow) + * @returns Fixed template name 'WorkflowStack' + */ + protected generateTemplateName(providerName: string | undefined, useCaseType: string): string { + return 'WorkflowStack'; + } + + /** + * Override parameter retention for Workflow use cases. + * Workflow use cases don't need VPC or other parameters to be retained during updates. + * + * @returns Empty array - no parameters should be retained for Workflow updates + */ + public getRetainedParameterKeys(): string[] { + return []; + } + + private static getUseInferenceProfileValue(eventBody: any): string { + // Always check top-level inference profile first + const topLevelInferenceProfileId = eventBody?.LlmParams?.BedrockLlmParams?.InferenceProfileId; + if (topLevelInferenceProfileId) { + return 'Yes'; + } + + // Additional check: if this is a Workflow with agents-as-tools orchestration, also check agents + if (eventBody?.UseCaseType === 'Workflow' && + eventBody?.WorkflowParams?.OrchestrationPattern === 'agents-as-tools') { + const agents = eventBody?.WorkflowParams?.AgentsAsToolsParams?.Agents || []; + for (const agent of agents) { + if (agent?.LlmParams?.BedrockLlmParams?.InferenceProfileId) { + return 'Yes'; + } + } + } + + return 'No'; + } + + private static createCfnParameters(eventBody: any, useCaseId: string): Map { + const cfnParameters = new Map(); + const shortUUID = this.generateShortUUID(useCaseId); + const recordKeySuffixUUID = this.generateShortUUID(generateUUID()); + + WorkflowUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.SharedEcrCachePrefix, + process.env[SHARED_ECR_CACHE_PREFIX_ENV_VAR] + ); + + // Standard use case parameters + WorkflowUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.DefaultUserEmail, + eventBody.DefaultUserEmail + ); + + WorkflowUseCaseDeploymentAdapter.setBooleanParameterIfExists( + cfnParameters, + CfnParameterKeys.DeployUI, + eventBody.DeployUI + ); + + WorkflowUseCaseDeploymentAdapter.setBooleanParameterIfExists( + cfnParameters, + CfnParameterKeys.FeedbackEnabled, + eventBody.FeedbackParams?.FeedbackEnabled + ); + + if (!eventBody.AuthenticationParams?.CognitoParams?.ExistingUserPoolId) { + WorkflowUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.ExistingRestApiId, + eventBody.ExistingRestApiId + ); + + WorkflowUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.ExistingApiRootResourceId, + eventBody.ExistingApiRootResourceId + ); + } + + // fixed/mandatory parameters for the deployment + // each new deployment or update requires a new DDB record key in order to properly have cloudformation update all resources on a deploy + cfnParameters.set( + CfnParameterKeys.UseCaseConfigRecordKey, + UseCase.generateUseCaseConfigRecordKey(shortUUID, recordKeySuffixUUID) + ); + cfnParameters.set(CfnParameterKeys.UseCaseConfigTableName, process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]!); + + // prettier-ignore + if (eventBody.AuthenticationParams) { + switch ( //NOSONAR - typescript:S1301, switch statement used for ease of future extensions + eventBody.AuthenticationParams.AuthenticationProvider + ) { + case AUTHENTICATION_PROVIDERS.COGNITO: + const existingUserPoolId = eventBody.AuthenticationParams.CognitoParams.ExistingUserPoolId; + const existingUserPoolClientId = + eventBody.AuthenticationParams.CognitoParams.ExistingUserPoolClientId; + + if (!existingUserPoolId) { + throw new Error( + 'Required field existingUserPoolId not provided for the "Cognito" AuthenticationProvider.' + ); + } + + cfnParameters.set(CfnParameterKeys.ExistingCognitoUserPoolId, existingUserPoolId); + cfnParameters.set(CfnParameterKeys.ComponentCognitoUserPoolId, existingUserPoolId) + if (existingUserPoolClientId) { + cfnParameters.set(CfnParameterKeys.ExistingCognitoUserPoolClient, existingUserPoolClientId); + } + + break; + default: + logger.error( + `Error: unsupported AuthenticationProvider. AuthenticationParams provided: ${eventBody.AuthenticationParams}` + ); + throw new Error( + `Error: unsupported AuthenticationProvider: ${eventBody.AuthenticationParams.AuthenticationProvider}.` + ); + } + } else { + cfnParameters.set(CfnParameterKeys.ExistingCognitoUserPoolId, process.env[USER_POOL_ID_ENV_VAR]!); + cfnParameters.set(CfnParameterKeys.ComponentCognitoUserPoolId, process.env[USER_POOL_ID_ENV_VAR]!) + } + + cfnParameters.set( + CfnParameterKeys.ExistingCognitoGroupPolicyTableName, + process.env[COGNITO_POLICY_TABLE_ENV_VAR]! + ); + cfnParameters.set(CfnParameterKeys.UseCaseUUID, `${useCaseId}`); + cfnParameters.set(CfnParameterKeys.StackDeploymentSource, STACK_DEPLOYMENT_SOURCE_USE_CASE); + + WorkflowUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.UseInferenceProfile, + this.getUseInferenceProfileValue(eventBody) + ); + + // Set multimodal data parameters from environment variables if multimodal is enabled + WorkflowUseCaseDeploymentAdapter.setBooleanParameterIfExists( + cfnParameters, + CfnParameterKeys.MultimodalEnabled, + eventBody.LlmParams?.MultimodalParams?.MultimodalEnabled + ); + + if (eventBody.LlmParams?.MultimodalParams?.MultimodalEnabled === true) { + WorkflowUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.ExistingMultimodalDataMetadataTable, + process.env[FILES_METADATA_TABLE_NAME_ENV_VAR] + ); + + WorkflowUseCaseDeploymentAdapter.setParameterIfExists( + cfnParameters, + CfnParameterKeys.ExistingMultimodalDataBucket, + process.env[MULTIMODAL_DATA_BUCKET_ENV_VAR] + ); + } else if (eventBody.LlmParams?.MultimodalParams?.MultimodalEnabled === false) { + cfnParameters.set(CfnParameterKeys.ExistingMultimodalDataMetadataTable, ''); + cfnParameters.set(CfnParameterKeys.ExistingMultimodalDataBucket, ''); + } + + return cfnParameters; + } + + private static createConfiguration(eventBody: any): WorkflowUseCaseConfiguration { + const config: WorkflowUseCaseConfiguration = { + UseCaseType: eventBody.UseCaseType, + UseCaseName: eventBody.UseCaseName, + UseCaseDescription: eventBody.UseCaseDescription, + LlmParams: { + ModelProvider: eventBody.LlmParams?.ModelProvider, + BedrockLlmParams: eventBody.LlmParams?.BedrockLlmParams, + SageMakerLlmParams: eventBody.LlmParams?.SageMakerLlmParams, + PromptParams: eventBody.LlmParams?.PromptParams, + ModelParams: eventBody.LlmParams?.ModelParams, + Temperature: eventBody.LlmParams?.Temperature, + RAGEnabled: eventBody.LlmParams?.RAGEnabled, + Streaming: eventBody.LlmParams?.Streaming, + Verbose: eventBody.LlmParams?.Verbose, + MultimodalParams: eventBody.LlmParams?.MultimodalParams + }, + WorkflowParams: { + OrchestrationPattern: eventBody.WorkflowParams?.OrchestrationPattern, + SystemPrompt: eventBody.WorkflowParams?.SystemPrompt, + AgentsAsToolsParams: eventBody.WorkflowParams?.AgentsAsToolsParams, + MemoryConfig: eventBody.WorkflowParams?.MemoryConfig + }, + AuthenticationParams: eventBody.AuthenticationParams, + FeedbackParams: { + FeedbackEnabled: eventBody.FeedbackParams?.FeedbackEnabled, + ...(eventBody.FeedbackParams?.FeedbackEnabled && { CustomMappings: {} }) + }, + IsInternalUser: process.env[IS_INTERNAL_USER_ENV_VAR]! // env var value is set as 'true' or 'false' on deployment of management stack + }; + + return config; + } +} + +/** + * Adapter implementation for Workflow use cases to extract information from Lambda event objects + * and convert them to @UseCase type. + * + * Used for operations which require only the use case ID and user, such as deletion, + * permanent deletion, and getting info on a single use case + */ +export class WorkflowUseCaseInfoAdapter extends UseCase { + constructor(event: APIGatewayEvent) { + const useCaseId: string = event.pathParameters!.useCaseId!; + const userId = event.requestContext.authorizer!.UserId; + + super(useCaseId, '', undefined, undefined, {}, userId, '', UseCaseTypes.WORKFLOW); + } +} diff --git a/source/lambda/use-case-management/model/commands/agent-builder-command.ts b/source/lambda/use-case-management/model/commands/agent-builder-command.ts new file mode 100644 index 00000000..ea5e1c94 --- /dev/null +++ b/source/lambda/use-case-management/model/commands/agent-builder-command.ts @@ -0,0 +1,24 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { ListUseCasesAdapter, UseCaseRecord } from '../list-use-cases'; +import { UseCaseTypes } from '../../utils/constants'; +import { tracer } from '../../power-tools-init'; +import { ListUseCasesCommand } from './use-case-command'; + +/** + * Command to list Agent Builder use cases + */ +export class ListAgentBuilderCommand extends ListUseCasesCommand { + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###listAgentBuilderCommand' }) + public async execute(operation: ListUseCasesAdapter): Promise { + return await super.execute(operation); + } + + /** + * Filters use cases to only include Agent Builder type + */ + protected filterUseCasesByType(useCaseRecords: UseCaseRecord[]): UseCaseRecord[] { + return useCaseRecords.filter((record) => record.UseCaseType === UseCaseTypes.AGENT_BUILDER); + } +} diff --git a/source/lambda/use-case-management/model/commands/case-command.ts b/source/lambda/use-case-management/model/commands/case-command.ts new file mode 100644 index 00000000..c678270f --- /dev/null +++ b/source/lambda/use-case-management/model/commands/case-command.ts @@ -0,0 +1,15 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { UseCase } from '../use-case'; +import { ListUseCasesAdapter } from '../list-use-cases'; +import { GetUseCaseAdapter } from '../get-use-case'; +import { McpOperation } from '../adapters/mcp-adapter'; + +/** + * Common command interface for all command types (UseCase and MCP) + * Supports both UseCase operations and MCP operations + */ +export interface CaseCommand { + execute(operation: UseCase | ListUseCasesAdapter | GetUseCaseAdapter | McpOperation): Promise; +} diff --git a/source/lambda/use-case-management/model/commands/mcp-command.ts b/source/lambda/use-case-management/model/commands/mcp-command.ts new file mode 100644 index 00000000..63f1a77e --- /dev/null +++ b/source/lambda/use-case-management/model/commands/mcp-command.ts @@ -0,0 +1,435 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { logger, tracer } from '../../power-tools-init'; +import { UploadMCPTargetSchemaAdapter, McpOperation } from '../adapters/mcp-adapter'; +import { CaseCommand } from './case-command'; +import { UseCase } from '../use-case'; +import { ListUseCasesAdapter, UseCaseRecord } from '../list-use-cases'; +import { GetUseCaseAdapter } from '../get-use-case'; +import { McpOperationsValidator } from '../validators/mcp-validator'; +import { S3Management } from '../../s3/s3-management'; +import { McpOperationTypes, UseCaseTypes, STRANDS_TOOLS_SSM_PARAM_ENV_VAR } from '../../utils/constants'; +import { StackManagement } from '../../cfn/stack-management'; +import { StorageManagement } from '../../ddb/storage-management'; +import { UseCaseConfigManagement } from '../../ddb/use-case-config-management'; +import { SSMClient, GetParameterCommand } from '@aws-sdk/client-ssm'; +import { AWSClientManager } from 'aws-sdk-lib'; + +/** + * Interface for Strands SDK tools + */ +export interface StrandsTool { + name: string; + description: string; + value: string; + category?: string; + isDefault: boolean; +} + +/** + * Response interface for ListMCPServersCommand + */ +export interface ListMCPServersResponse { + mcpServers: Array<{ + useCaseId: string; + useCaseName: string; + description: string; + status: 'ACTIVE' | 'INACTIVE'; + url: string; + type: 'gateway' | 'runtime'; + }>; + strandsTools: StrandsTool[]; +} + +/** + * Abstract base class for MCP management commands + */ +export abstract class McpMgmtCommand implements CaseCommand { + protected s3Mgmt: S3Management; + protected validator: McpOperationsValidator; + + constructor() { + this.s3Mgmt = new S3Management(); + } + + /** + * Initializes the MCP validator based on the operation type. + * This method should be called before using validateMcpOperation. + * + * @param operationType - The type of MCP operation (e.g., McpOperationTypes.UPLOAD_SCHEMA) + */ + protected initializeValidator(operationType: string): void { + this.validator = McpOperationsValidator.createValidator(operationType); + } + + /** + * Validates an MCP operation using the initialized validator. + * + * @param mcpOperation - The MCP operation to be validated + * @returns A promise that resolves to the validated MCP operation + * @throws Error if the validator has not been initialized + */ + protected async validateMcpOperation(mcpOperation: McpOperation): Promise { + if (!this.validator) { + const errorMsg = 'MCP Validator not initialized. Call initializeValidator first.'; + logger.error(`McpMgmtCommand validation failed: ${errorMsg}`); + throw new Error(errorMsg); + } + return this.validator.validateMcpOperation(mcpOperation); + } + + /** + * Execute method that supports MCP operations + * @param operation - McpOperation + */ + abstract execute(operation: McpOperation): Promise; +} + +/** + * Command to handle schema upload operations for MCP servers + */ +export class UploadSchemasCommand extends McpMgmtCommand { + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###uploadSchemasCommand' }) + public async execute(operation: UseCase | ListUseCasesAdapter | GetUseCaseAdapter | McpOperation): Promise { + // Handle MCP operations specifically + if (!(operation instanceof UploadMCPTargetSchemaAdapter)) { + const errorMsg = 'UploadSchemasCommand only supports UploadMCPTargetSchemaAdapter operations'; + logger.error( + `UploadSchemasCommand operation type validation failed: ${errorMsg}, received: ${ + operation?.constructor?.name || 'unknown' + }` + ); + throw new Error(errorMsg); + } + + // Initialize and use validator (similar to UseCaseMgmtCommand pattern) + this.initializeValidator(McpOperationTypes.UPLOAD_SCHEMA); + await this.validateMcpOperation(operation); + + logger.info(`Creating presigned POSTs for validated schema uploads - fileCount: ${operation.files.length}`); + + try { + const response = await this.s3Mgmt.createSchemaUploadPresignedPosts(operation.userId, operation.files); + + logger.info(`Successfully created ${response.uploads.length} presigned POSTs for schema uploads`); + + return response; + } catch (error) { + logger.error(`Failed to create presigned POSTs for schema uploads, error: ${(error as Error).message}`); + throw error; + } + } +} + +/** + * Command to list all MCP servers + */ +export class ListMCPServersCommand implements CaseCommand { + stackMgmt: StackManagement; + storageMgmt: StorageManagement; + useCaseConfigMgmt: UseCaseConfigManagement; + ssmClient: SSMClient; + + constructor() { + this.stackMgmt = new StackManagement(); + this.storageMgmt = new StorageManagement(); + this.useCaseConfigMgmt = new UseCaseConfigManagement(); + this.ssmClient = AWSClientManager.getServiceClient('ssm', tracer); + } + + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###listMCPServersCommand' }) + public async execute( + operation: UseCase | ListUseCasesAdapter | GetUseCaseAdapter | McpOperation + ): Promise { + logger.debug('Enter ListMCPServersCommand'); + + const useCaseRecords = await this.fetchUseCaseRecords(); + const mcpServers = await this.processMcpServerRecords(useCaseRecords); + + logger.info(`Found ${mcpServers.length} MCP servers`); + if (mcpServers.length === 0) { + logger.debug('No MCP servers found, returning empty array'); + } + + const strandsTools = await this.getStrandsTools(); + + return { + mcpServers, + strandsTools + }; + } + + private async fetchUseCaseRecords(): Promise { + const listAdapter = new ListUseCasesAdapter({ + queryStringParameters: { pageNumber: '1' } + } as any); + + try { + const response = await this.storageMgmt.getAllCaseRecords(listAdapter); + return response.useCaseRecords; + } catch (error) { + logger.error(`Error while listing use case records in DDB, Error: ${error}`); + throw error; + } + } + + private async processMcpServerRecords(useCaseRecords: UseCaseRecord[]): Promise> { + const mcpServers = []; + + for (const record of useCaseRecords) { + const server = await this.processSingleRecord(record); + if (server) { + mcpServers.push(server); + } + } + + return mcpServers; + } + + private async processSingleRecord(record: UseCaseRecord) { + if (!record.UseCaseConfigRecordKey) { + logger.error(`UseCaseConfigRecordKey missing for record: ${record.UseCaseId}`); + return null; + } + + try { + const config = await this.useCaseConfigMgmt.getUseCaseConfigFromRecord(record); + + if (config.UseCaseType !== UseCaseTypes.MCP_SERVER) { + return null; + } + + return await this.buildMcpServerEntry(record, config); + } catch (error) { + logger.warn(`Error retrieving config for record ${record.UseCaseId}, excluding from results: ${error}`); + return null; + } + } + + private async buildMcpServerEntry(record: UseCaseRecord, config: any) { + const stackStatus = await this.getStackStatus(record); + + try { + const { type, url } = this.extractMcpServerDetails(config, record.UseCaseId); + const description = this.extractDescription(config, record.UseCaseId); + + return { + useCaseId: record.UseCaseId, + useCaseName: config.UseCaseName || '', + description, + status: this.determineStatus(stackStatus), + url, + type + }; + } catch (error) { + logger.error(`Error extracting MCP server details for ${record.UseCaseId}: ${error}`); + return null; + } + } + + /** + * Extracts description from use case configuration + * + * @param config - Use case configuration object + * @param useCaseId - Use case ID for logging + * @returns Description string (empty string if not available) + */ + private extractDescription(config: any, useCaseId: string): string { + if (config.UseCaseDescription && typeof config.UseCaseDescription === 'string') { + const description = config.UseCaseDescription.trim(); + if (description) { + logger.debug(`Using UseCaseDescription for MCP server ${useCaseId}`); + return description; + } + } + + if (config.UseCaseDescription && typeof config.UseCaseDescription !== 'string') { + logger.error( + `Invalid UseCaseDescription type for MCP server ${useCaseId}: expected string, got ${typeof config.UseCaseDescription}` + ); + } + + logger.warn(`No valid description available for MCP server ${useCaseId}`); + return ''; + } + + private async getStackStatus(record: UseCaseRecord): Promise { + try { + const stackDetails = await this.stackMgmt.getStackDetailsFromUseCaseRecord(record); + return stackDetails.status || 'UNKNOWN'; + } catch (error) { + logger.warn(`Could not retrieve stack status for ${record.UseCaseId}: ${error}`); + return 'UNKNOWN'; + } + } + + /** + * Fetches Strands tools configuration from SSM Parameter Store + * + * @returns Promise resolving to array of StrandsTool objects, or empty array on error + */ + private async getStrandsTools(): Promise { + const paramName = process.env[STRANDS_TOOLS_SSM_PARAM_ENV_VAR]; + + if (!paramName) { + logger.warn(`${STRANDS_TOOLS_SSM_PARAM_ENV_VAR} environment variable not set, returning empty tools array`); + return []; + } + + try { + logger.debug(`Fetching Strands tools from SSM parameter: ${paramName}`); + + const command = new GetParameterCommand({ Name: paramName }); + const response = await this.ssmClient.send(command); + + if (!response.Parameter?.Value) { + logger.warn(`SSM parameter ${paramName} has no value, returning empty tools array`); + return []; + } + + // Parse JSON and validate structure + const tools = JSON.parse(response.Parameter.Value) as StrandsTool[]; + + if (!Array.isArray(tools)) { + logger.error(`SSM parameter ${paramName} value is not an array, returning empty tools array`); + return []; + } + + logger.info(`Successfully loaded ${tools.length} Strands tools from SSM parameter ${paramName}`); + return tools; + } catch (error: any) { + if (error.name === 'ParameterNotFound') { + logger.warn(`SSM parameter ${paramName} not found, returning empty tools array`); + } else if (error.name === 'AccessDeniedException') { + logger.error(`Insufficient IAM permissions to read SSM parameter ${paramName}: ${error.message}`); + } else if (error instanceof SyntaxError) { + // Truncate value for logging to avoid excessive log size + const truncatedValue = error.message.substring(0, 200); + logger.error(`Invalid JSON in SSM parameter ${paramName}. Parse error: ${truncatedValue}...`); + } else { + logger.error(`Unexpected error reading SSM parameter ${paramName}: ${error.message || error}`); + } + return []; + } + } + + /** + * Extracts MCP server type and URL from configuration + * + * @param config - Use case configuration object + * @param useCaseId - Use case ID for logging + * @returns Object containing type ('gateway' | 'runtime') and url (string) + * @throws Error if configuration is invalid (both or neither params present) + */ + private extractMcpServerDetails(config: any, useCaseId: string): { type: 'gateway' | 'runtime'; url: string } { + const mcpParams = this.validateMcpParams(config, useCaseId); + this.validateMcpParamsStructure(mcpParams, useCaseId); + + if (mcpParams.GatewayParams) { + return this.extractGatewayDetails(mcpParams.GatewayParams, useCaseId); + } + + return this.extractRuntimeDetails(mcpParams.RuntimeParams, useCaseId); + } + + private validateMcpParams(config: any, useCaseId: string): any { + if (!config.MCPParams) { + throw new Error(`MCPParams not found in configuration for use case ${useCaseId}`); + } + return config.MCPParams; + } + + private validateMcpParamsStructure(mcpParams: any, useCaseId: string): void { + const hasGateway = mcpParams.GatewayParams; + const hasRuntime = mcpParams.RuntimeParams; + + if (hasGateway && hasRuntime) { + logger.error( + `Invalid MCP configuration: both GatewayParams and RuntimeParams present for use case ${useCaseId}` + ); + throw new Error('Invalid MCP configuration: both GatewayParams and RuntimeParams present'); + } + + if (!hasGateway && !hasRuntime) { + logger.error( + `Invalid MCP configuration: neither GatewayParams nor RuntimeParams present for use case ${useCaseId}` + ); + throw new Error('Invalid MCP configuration: neither GatewayParams nor RuntimeParams present'); + } + } + + private extractGatewayDetails(gatewayParams: any, useCaseId: string): { type: 'gateway'; url: string } { + const gatewayUrl = gatewayParams.GatewayUrl || ''; + + if (!gatewayUrl) { + logger.warn(`GatewayUrl missing for use case ${useCaseId}`); + } + + return { + type: 'gateway', + url: gatewayUrl + }; + } + + private extractRuntimeDetails(runtimeParams: any, useCaseId: string): { type: 'runtime'; url: string } { + const runtimeUrl = this.getRuntimeUrl(runtimeParams, useCaseId); + + if (!runtimeUrl) { + logger.warn(`RuntimeUrl missing for use case ${useCaseId}`); + } + + return { + type: 'runtime', + url: runtimeUrl + }; + } + + private getRuntimeUrl(runtimeParams: any, useCaseId: string): string { + if (runtimeParams.RuntimeUrl) { + return runtimeParams.RuntimeUrl; + } + + if (runtimeParams.RuntimeArn) { + return this.constructRuntimeUrlFromArn(runtimeParams.RuntimeArn, useCaseId); + } + + return ''; + } + + private constructRuntimeUrlFromArn(runtimeArn: string, useCaseId: string): string { + try { + const encodedArn = encodeURIComponent(runtimeArn); + const region = process.env.AWS_REGION; + const url = `https://bedrock-agentcore.${region}.amazonaws.com/runtimes/${encodedArn}/invocations?qualifier=DEFAULT`; + logger.debug(`Constructed RuntimeUrl from RuntimeArn for use case ${useCaseId}`); + return url; + } catch (error) { + logger.warn(`Failed to construct RuntimeUrl from RuntimeArn for use case ${useCaseId}: ${error}`); + return ''; + } + } + + /** + * Determines the MCP server status based on CloudFormation stack status + * + * @param stackStatus - CloudFormation stack status + * @returns 'ACTIVE' or 'INACTIVE' + */ + private determineStatus(stackStatus: string): 'ACTIVE' | 'INACTIVE' { + // Active if stack creation is complete + if (stackStatus === 'CREATE_COMPLETE' || stackStatus === 'UPDATE_COMPLETE') { + return 'ACTIVE'; + } + + // All other states (CREATE_IN_PROGRESS, ROLLBACK, etc.) are inactive + return 'INACTIVE'; + } +} diff --git a/source/lambda/use-case-management/command.ts b/source/lambda/use-case-management/model/commands/use-case-command.ts similarity index 91% rename from source/lambda/use-case-management/command.ts rename to source/lambda/use-case-management/model/commands/use-case-command.ts index dd59f023..49b3f272 100644 --- a/source/lambda/use-case-management/command.ts +++ b/source/lambda/use-case-management/model/commands/use-case-command.ts @@ -3,21 +3,18 @@ import { StackNotFoundException } from '@aws-sdk/client-cloudformation'; import { ResourceNotFoundException } from '@aws-sdk/client-dynamodb'; -import { StackManagement, UseCaseStackDetails } from './cfn/stack-management'; -import { StorageManagement } from './ddb/storage-management'; -import { UseCaseConfigManagement } from './ddb/use-case-config-management'; -import { ListUseCasesAdapter, UseCaseRecord } from './model/list-use-cases'; -import { UseCase } from './model/use-case'; -import { UseCaseValidator } from './model/use-case-validator'; -import { logger, tracer } from './power-tools-init'; -import { DEFAULT_USE_CASES_PER_PAGE } from './utils/constants'; -import { GetUseCaseAdapter, validateAdminToken, castToAdminType, castToBusinessUserType } from './model/get-use-case'; -import { UseCaseConfiguration, GetUseCaseDetailsAdminResponse, GetUseCaseDetailsUserResponse } from './model/types'; - -export enum Status { - SUCCESS = 'SUCCESS', - FAILED = 'FAILED' -} +import { StackManagement, UseCaseStackDetails } from '../../cfn/stack-management'; +import { StorageManagement } from '../../ddb/storage-management'; +import { UseCaseConfigManagement } from '../../ddb/use-case-config-management'; +import { ListUseCasesAdapter, UseCaseRecord } from '../list-use-cases'; +import { UseCase } from '../use-case'; + +import { logger, tracer } from '../../power-tools-init'; +import { DEFAULT_USE_CASES_PER_PAGE, Status } from '../../utils/constants'; +import { GetUseCaseAdapter, validateAdminToken, castToAdminType, castToBusinessUserType } from '../get-use-case'; +import { UseCaseConfiguration, GetUseCaseDetailsAdminResponse, GetUseCaseDetailsUserResponse } from '../types'; +import { CaseCommand } from './case-command'; +import { UseCaseValidator, ValidatorFactory } from '../validators'; export type DeploymentDetails = { useCaseRecord: UseCaseRecord; @@ -25,10 +22,6 @@ export type DeploymentDetails = { useCaseConfigDetails: UseCaseConfiguration; }; -export interface CaseCommand { - execute(useCase: UseCase | ListUseCasesAdapter | GetUseCaseAdapter): Promise; -} - /** * Command interface to define operations on use cases that the deployment stack manages */ @@ -51,7 +44,7 @@ export abstract class UseCaseMgmtCommand implements CaseCommand { * @param useCaseType - The type of use case (e.g., 'Text', 'Agent') */ protected initializeValidator(useCaseType: string): void { - this.validator = UseCaseValidator.createValidator(useCaseType, this.storageMgmt, this.useCaseConfigMgmt); + this.validator = ValidatorFactory.createValidator(useCaseType, this.storageMgmt, this.useCaseConfigMgmt); } /** @@ -249,7 +242,7 @@ export class PermanentlyDeleteUseCaseCommand implements CaseCommand { } } - await this.deleteConfig(useCase); + await this.markConfigForDeletion(useCase); await this.deleteDdbRecord(useCase); return Status.SUCCESS; @@ -264,14 +257,14 @@ export class PermanentlyDeleteUseCaseCommand implements CaseCommand { } } - private async deleteConfig(useCase: UseCase) { + private async markConfigForDeletion(useCase: UseCase) { try { - await this.useCaseConfigMgmt.deleteUseCaseConfig(useCase); + await this.useCaseConfigMgmt.markUseCaseConfigForDeletion(useCase); } catch (error) { if (error instanceof ResourceNotFoundException) { - logger.warn('Table does not exist, hence skipping deletion.'); + logger.warn('Config record does not exist, hence skipping TTL setting.'); } else { - logger.error(`Error while deleting use case configuration, Error: ${error}`); + logger.error(`Error while marking use case configuration for deletion, Error: ${error}`); throw error; } } @@ -305,6 +298,9 @@ export class ListUseCasesCommand implements CaseCommand { try { const response = await this.storageMgmt.getAllCaseRecords(listUseCasesEvent); useCaseRecords = response.useCaseRecords; + + useCaseRecords = this.filterUseCasesByType(useCaseRecords); + if (listUseCasesEvent.searchFilter) { useCaseRecords = this.filterUseCases(useCaseRecords, listUseCasesEvent.searchFilter); } @@ -404,6 +400,17 @@ export class ListUseCasesCommand implements CaseCommand { return useCaseRecords; } + /** + * Filters use case records by type, removing any records that don't have a valid use case type. + * + * @param useCaseRecords - Array of use case records to filter + * @returns Filtered array of use case records with valid types + */ + protected filterUseCasesByType(useCaseRecords: UseCaseRecord[]): UseCaseRecord[] { + //no filtering performed within this top-level function + return useCaseRecords; + } + /** * Formatting the data from ddb, use case config config, and a stack's deployment details to a list of use cases * to send to the front end. @@ -411,7 +418,7 @@ export class ListUseCasesCommand implements CaseCommand { * @param useCaseDeploymentsMap * @returns */ - private formatUseCasesToList = ( + protected formatUseCasesToList = ( useCaseDeploymentsMap: Map, numUseCases: number, nextPage: number | undefined @@ -424,6 +431,7 @@ export class ListUseCasesCommand implements CaseCommand { Name: value.useCaseRecord.Name, UseCaseId: value.useCaseRecord.UseCaseId, CreatedDate: value.useCaseRecord.CreatedDate, + Description: value.useCaseRecord.Description, useCaseUUID: value.useCaseDeploymentDetails.useCaseUUID, status: value.useCaseDeploymentDetails.status, cloudFrontWebUrl: value.useCaseDeploymentDetails.cloudFrontWebUrl ?? undefined, diff --git a/source/lambda/use-case-management/model/commands/workflow-command.ts b/source/lambda/use-case-management/model/commands/workflow-command.ts new file mode 100644 index 00000000..5b300b41 --- /dev/null +++ b/source/lambda/use-case-management/model/commands/workflow-command.ts @@ -0,0 +1,24 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { ListUseCasesAdapter, UseCaseRecord } from '../list-use-cases'; +import { UseCaseTypes } from '../../utils/constants'; +import { tracer } from '../../power-tools-init'; +import { ListUseCasesCommand } from './use-case-command'; + +/** + * Command to list Workflow use cases + */ +export class ListWorkflowCommand extends ListUseCasesCommand { + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###listWorkflowCommand' }) + public async execute(operation: ListUseCasesAdapter): Promise { + return await super.execute(operation); + } + + /** + * Filters use cases to only include Workflow type + */ + protected filterUseCasesByType(useCaseRecords: UseCaseRecord[]): UseCaseRecord[] { + return useCaseRecords.filter((record) => record.UseCaseType === UseCaseTypes.WORKFLOW); + } +} \ No newline at end of file diff --git a/source/lambda/use-case-management/model/get-use-case.ts b/source/lambda/use-case-management/model/get-use-case.ts index 0ffa6119..30cf491a 100644 --- a/source/lambda/use-case-management/model/get-use-case.ts +++ b/source/lambda/use-case-management/model/get-use-case.ts @@ -6,7 +6,10 @@ import { AgentUseCaseConfiguration, UseCaseConfiguration, GetUseCaseDetailsAdminResponse, - GetUseCaseDetailsUserResponse + GetUseCaseDetailsUserResponse, + MCPUseCaseConfiguration, + AgentBuilderParams, + WorkflowParams } from './types'; import { UseCaseRecord } from './list-use-cases'; import { UseCaseStackDetails } from '../cfn/stack-management'; @@ -16,7 +19,12 @@ import { TokenVerifier } from '../utils/cognito_jwt_verifier'; export type CombinedUseCaseParams = UseCaseRecord & Partial & Partial & - Partial; + Partial & + Partial & { + AgentBuilderParams?: AgentBuilderParams; + } & { + WorkflowParams?: WorkflowParams; + }; export async function validateAdminToken(token: string): Promise { try { @@ -53,7 +61,7 @@ export function castToAdminType(params: CombinedUseCaseParams): GetUseCaseDetail createNewVpc: params.createNewVpc as string, vpcEnabled: params.vpcEnabled as string, vpcId: params.vpcId, - UseCaseType: params.UseCaseType as string, + UseCaseType: params.UseCaseType, UseCaseName: params.UseCaseName ?? params.Name, cloudwatchDashboardUrl: params.cloudwatchDashboardUrl, cloudFrontWebUrl: params.cloudFrontWebUrl, @@ -65,9 +73,13 @@ export function castToAdminType(params: CombinedUseCaseParams): GetUseCaseDetail LlmParams: params.LlmParams, KnowledgeBaseParams: params.KnowledgeBaseParams, AgentParams: params.AgentParams, + MCPParams: params.MCPParams, + AgentBuilderParams: params.AgentBuilderParams, + WorkflowParams: params.WorkflowParams, AuthenticationParams: params.AuthenticationParams, defaultUserEmail: params.defaultUserEmail, - FeedbackParams: params.FeedbackParams + FeedbackParams: params.FeedbackParams, + ProvisionedConcurrencyValue: params.ProvisionedConcurrencyValue }; return useCaseInfo; @@ -76,7 +88,7 @@ export function castToAdminType(params: CombinedUseCaseParams): GetUseCaseDetail export function castToBusinessUserType(params: CombinedUseCaseParams): GetUseCaseDetailsUserResponse { let useCaseInfo: GetUseCaseDetailsUserResponse = { UseCaseName: params.UseCaseName ?? params.Name, - UseCaseType: params.UseCaseType as string, + UseCaseType: params.UseCaseType, LlmParams: params.LlmParams, ModelProviderName: params.LlmParams?.ModelProvider ?? 'BedrockAgent' }; @@ -127,6 +139,6 @@ export class GetUseCaseAdapter { throw new RequestValidationError('Authorization header was not found in the request'); } - this.authToken = event.headers.Authorization; + this.authToken = event.headers.Authorization.replace(/^Bearer\s+/i, ''); } } diff --git a/source/lambda/use-case-management/model/list-use-cases.ts b/source/lambda/use-case-management/model/list-use-cases.ts index 644d8875..abdc1474 100644 --- a/source/lambda/use-case-management/model/list-use-cases.ts +++ b/source/lambda/use-case-management/model/list-use-cases.ts @@ -21,6 +21,7 @@ export interface StackInfo { */ export interface UseCaseRecord { UseCaseId: string; + UseCaseType: string; StackId: string; Name: string; UseCaseConfigRecordKey: string; diff --git a/source/lambda/use-case-management/model/types.ts b/source/lambda/use-case-management/model/types.ts index 7034ba06..88e34eef 100644 --- a/source/lambda/use-case-management/model/types.ts +++ b/source/lambda/use-case-management/model/types.ts @@ -44,10 +44,16 @@ export interface LlmParams { RAGEnabled?: boolean; Streaming?: boolean; Verbose?: boolean; + MultimodalParams?: MultimodalParams; } -interface FeedbackParams { +export interface FeedbackParams { FeedbackEnabled: boolean; + CustomMappings?: {}; +} + +export interface MultimodalParams { + MultimodalEnabled: boolean; } export interface KnowledgeBaseParams { @@ -77,15 +83,22 @@ export interface AuthenticationParams { CognitoParams?: CognitoParams; } -export interface UseCaseConfiguration { - UseCaseType?: string; +// Base configuration interface that all use case configurations extend +export interface BaseUseCaseConfiguration { UseCaseName?: string; - ConversationMemoryParams?: ConversationMemoryParams; - KnowledgeBaseParams?: KnowledgeBaseParams; - LlmParams?: LlmParams; + UseCaseType?: string; + UseCaseDescription?: string; AuthenticationParams?: AuthenticationParams; IsInternalUser?: string; FeedbackParams?: FeedbackParams; + ProvisionedConcurrencyValue?: number; +} + +// Text/Chat use case configuration +export interface UseCaseConfiguration extends BaseUseCaseConfiguration { + ConversationMemoryParams?: ConversationMemoryParams; + KnowledgeBaseParams?: KnowledgeBaseParams; + LlmParams?: LlmParams; } export interface BedrockAgentParams { @@ -98,13 +111,120 @@ export interface AgentParams { BedrockAgentParams: BedrockAgentParams; } -export interface AgentUseCaseConfiguration { - UseCaseType?: string; - UseCaseName?: string; +export interface AgentUseCaseConfiguration extends BaseUseCaseConfiguration { AgentParams?: AgentParams; - AuthenticationParams?: AuthenticationParams; - IsInternalUser?: string; - FeedbackParams?: FeedbackParams; +} + +export interface CustomParameter { + key: string; + value: string; +} + +export interface OAuthAdditionalConfig { + scopes?: string[]; + customParameters?: CustomParameter[]; +} + +export interface ApiKeyAdditionalConfig { + location?: 'HEADER' | 'QUERY_PARAMETER'; + parameterName?: string; + prefix?: string; +} + +export interface AdditionalConfigParams { + OAuthAdditionalConfig?: OAuthAdditionalConfig; + ApiKeyAdditionalConfig?: ApiKeyAdditionalConfig; +} + +export interface OutboundAuthParams { + OutboundAuthProviderArn: string; + OutboundAuthProviderType: 'API_KEY' | 'OAUTH'; + AdditionalConfigParams?: AdditionalConfigParams; +} + +export interface GatewayParams { + GatewayName?: string; + GatewayId?: string; + GatewayUrl?: string; + GatewayArn?: string; + TargetParams: TargetParams[]; +} + +export interface TargetParams { + TargetId?: string; + TargetName: string; + TargetDescription?: string; + TargetType: 'openApiSchema' | 'smithyModel' | 'lambda'; + LambdaArn?: string; + SchemaUri: string; + OutboundAuthParams?: OutboundAuthParams; +} + +export interface RuntimeParams { + EcrUri: string; + RuntimeId?: string; + AgentArn?: string; + RuntimeUrl?: string; + EnvironmentVariables?: { [key: string]: string }; +} + +export interface MCPParams { + GatewayParams?: GatewayParams; + RuntimeParams?: RuntimeParams; +} + +// MCP use case configuration +export interface MCPUseCaseConfiguration extends BaseUseCaseConfiguration { + MCPParams?: MCPParams; +} + +export interface MemoryParams { + LongTermEnabled?: boolean; +} + +export interface AgentBuilderParams { + SystemPrompt?: string; + MCPServers?: Array<{ + Type: string; + UseCaseName: string; + UseCaseId: string; + Url: string; + }>; + Tools?: Array<{ + ToolId: string; + }>; + MemoryConfig?: MemoryParams; +} + +// Agent Builder use case configuration +export interface AgentBuilderUseCaseConfiguration extends BaseUseCaseConfiguration { + AgentBuilderParams?: AgentBuilderParams; + LlmParams?: LlmParams; +} + +export interface WorkflowParams { + SystemPrompt?: string; + OrchestrationPattern?: string; + AgentsAsToolsParams?: AgentsAsToolsParams; + MemoryConfig?: MemoryParams; +} + +// Extended interface for workflow agents that includes UseCaseId +export interface WorkflowAgentConfiguration extends AgentBuilderUseCaseConfiguration { + UseCaseId: string; +} + +export interface AgentsAsToolsParams { + Agents?: Pick< + WorkflowAgentConfiguration, + 'UseCaseId' | 'UseCaseType' | 'UseCaseName' | 'UseCaseDescription' | 'AgentBuilderParams' | 'LlmParams' + >[]; +} + +// Workflow use case configuration +export interface WorkflowUseCaseConfiguration extends BaseUseCaseConfiguration { + WorkflowParams?: WorkflowParams; + LlmParams?: LlmParams; } export interface GetUseCaseDetailsAdminResponse { @@ -130,10 +250,14 @@ export interface GetUseCaseDetailsAdminResponse { AuthenticationParams?: AuthenticationParams; LlmParams?: LlmParams; AgentParams?: AgentParams; + MCPParams?: MCPParams; + AgentBuilderParams?: AgentBuilderParams; + WorkflowParams?: WorkflowParams; privateSubnetIds?: string[]; securityGroupIds?: string[]; - defaultUserEmail?: string; + defaultUserEmail?: string; FeedbackParams?: FeedbackParams; + ProvisionedConcurrencyValue?: number; } export interface GetUseCaseDetailsUserResponse { diff --git a/source/lambda/use-case-management/model/use-case-validator.ts b/source/lambda/use-case-management/model/use-case-validator.ts deleted file mode 100644 index 32ccb5b9..00000000 --- a/source/lambda/use-case-management/model/use-case-validator.ts +++ /dev/null @@ -1,484 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -import { CognitoIdentityProviderClient, DescribeUserPoolCommand } from '@aws-sdk/client-cognito-identity-provider'; -import { customAwsConfig } from 'aws-node-user-agent-config'; -import _ from 'lodash'; -import { StorageManagement } from '../ddb/storage-management'; -import { UseCaseConfigManagement } from '../ddb/use-case-config-management'; -import { logger, tracer } from '../power-tools-init'; -import { - AUTHENTICATION_PROVIDERS, - CfnParameterKeys, - ChatRequiredPlaceholders, - CHAT_PROVIDERS, - DisambiguationRequiredPlaceholders, - KnowledgeBaseTypes, - RAGChatRequiredPlaceholders, - UseCaseTypes -} from '../utils/constants'; -import RequestValidationError from '../utils/error'; -import { UseCase } from './use-case'; - -/** - * Abstract base class for use case validators. - * This class provides a common interface for validating different types of use cases. - */ -export abstract class UseCaseValidator { - protected storageMgmt: StorageManagement; - protected useCaseConfigMgmt: UseCaseConfigManagement; - - constructor(storageMgmt: StorageManagement, useCaseConfigMgmt: UseCaseConfigManagement) { - this.storageMgmt = storageMgmt; - this.useCaseConfigMgmt = useCaseConfigMgmt; - } - - /** - * Validates a new use case. - * - * @param useCase - The use case to be validated - * @returns A promise that resolves to the validated use case - */ - public abstract validateNewUseCase(useCase: UseCase): Promise; - - /** - * Validates an updated use case. - * - * @param useCase - The use case to be validated - * @param oldDynamoDbRecordKey - The key of the old DynamoDB record - * @returns A promise that resolves to the validated use case - */ - public abstract validateUpdateUseCase(useCase: UseCase, oldDynamoDbRecordKey: string): Promise; - - /** - * Factory method to create the appropriate validator based on the use case type. - * - * @param useCaseType - The type of use case (e.g., 'Text', 'Agent') - * @param storageMgmt - The storage management instance - * @param useCaseConfigMgmt - The use case configuration management instance - * @returns An instance of the appropriate UseCaseValidator subclass - * @throws Error if an invalid use case type is provided - */ - static createValidator( - useCaseType: string, - storageMgmt: StorageManagement, - useCaseConfigMgmt: UseCaseConfigManagement - ): UseCaseValidator { - switch (useCaseType) { - case UseCaseTypes.CHAT: - return new TextUseCaseValidator(storageMgmt, useCaseConfigMgmt); - case UseCaseTypes.RAGChat: - return new TextUseCaseValidator(storageMgmt, useCaseConfigMgmt); - case UseCaseTypes.AGENT: - return new AgentUseCaseValidator(storageMgmt, useCaseConfigMgmt); - default: - throw new Error(`Invalid use case type: ${useCaseType}`); - } - } -} - -export class TextUseCaseValidator extends UseCaseValidator { - /** - * Validates a use case meant for a new text deployment fills in values as required. Will: - * - Check the model info database to ensure provider/modelid combination is valid - * - Populate a default prompt if none is provided - * - * @param config a config to validate - * @returns validated config with values filled in where needed - * @throws if the config is invalid or cannot be validated for some reason - */ - @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateNewTextUseCase' }) - public async validateNewUseCase(useCase: UseCase): Promise { - const modelInfo = await this.storageMgmt.getModelInfo(useCase); // will throw if provider/model id combo does not exist - if (!useCase.configuration.LlmParams!.PromptParams) { - useCase.configuration.LlmParams!.PromptParams = {}; - } - if (!useCase.configuration.LlmParams!.PromptParams.PromptTemplate) { - useCase.configuration.LlmParams!.PromptParams.PromptTemplate = modelInfo.Prompt; - } - if (!useCase.configuration.LlmParams!.PromptParams.DisambiguationPromptTemplate) { - useCase.configuration.LlmParams!.PromptParams.DisambiguationPromptTemplate = modelInfo.DisambiguationPrompt; - } - if (useCase.configuration.AuthenticationParams) { - // prettier-ignore - switch (useCase.configuration.AuthenticationParams.AuthenticationProvider) { //NOSONAR - typescript:S1301, switch statement used for ease of future extensions - case AUTHENTICATION_PROVIDERS.COGNITO: - // overriding the previously set CognitoDomainPrefix parameter - // by fetching it dynamically based on the set user pool - - const existingUserPoolId = useCase.cfnParameters?.get(CfnParameterKeys.ExistingCognitoUserPoolId); - if (!existingUserPoolId) { - throw new Error('Undefined user pool provided for the cognito authentication provider.'); - } - - const cognitoDomainPrefix = await getCognitoDomainPrefixByUserPool(existingUserPoolId); - - if (!useCase.cfnParameters) { - throw new Error('CFNParameters are not available yet for setting Cognito Domain Prefix.'); - } - - useCase.cfnParameters.set(CfnParameterKeys.CognitoDomainPrefix, cognitoDomainPrefix); - - break; - } - } - - await TextUseCaseValidator.checkModelInputPayloadSchema(useCase); // NOSONAR - typescript:S4123 - await is required in tests despite seeming unnecessary - await TextUseCaseValidator.checkPromptsAreCompatible(useCase); // NOSONAR - typescript:S4123 - await is required in tests despite seeming unnecessary - await TextUseCaseValidator.checkPromptIsEscaped(useCase); // NOSONAR - typescript:S4123 - await is required in tests despite seeming unnecessary - await TextUseCaseValidator.checkKnowledgeBaseTypeMatchesParams(useCase); // NOSONAR - typescript:S4123 - await is required in tests despite seeming unnecessary - - return useCase; - } - - /** - * Validates a use case meant for an update fills in values as required. Will: - * - Check the model info database to ensure provider/modelid combination is valid - * - * @param config a config to validate - * @param oldSSMParamName the name of the SSM parameter previously used by this use case. Used to retrieve the existing config and merge with the new one. - * @returns validated config with values filled in where needed - * @throws if the config is invalid or cannot be validated for some reason - */ - @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateUpdateTextUseCase' }) - public async validateUpdateUseCase(useCase: UseCase, oldDynamoDbRecordKey: string): Promise { - // retrieve the existing config from DynamoDB using a dummy use case object - let dummyOldUseCase = useCase.clone(); - dummyOldUseCase.setUseCaseConfigRecordKey(oldDynamoDbRecordKey); - const existingConfigObj = await this.useCaseConfigMgmt.getUseCaseConfigFromTable(dummyOldUseCase); - // this await is required for this to work on lambda, despite it seeming unnecessary here - useCase.configuration = await TextUseCaseValidator.mergeConfigs(existingConfigObj, useCase.configuration); - - await this.storageMgmt.getModelInfo(useCase); // will throw if provider/model id combo does not exist - await TextUseCaseValidator.checkModelInputPayloadSchema(useCase); // NOSONAR - typescript:S4123 - await is required in tests despite seeming unnecessary - await TextUseCaseValidator.checkPromptsAreCompatible(useCase); // NOSONAR - typescript:S4123 - await is required in tests despite seeming unnecessary - await TextUseCaseValidator.checkPromptIsEscaped(useCase); // NOSONAR - typescript:S4123 - await is required in tests despite seeming unnecessary - await TextUseCaseValidator.checkKnowledgeBaseTypeMatchesParams(useCase); // NOSONAR - typescript:S4123 - await is required in tests despite seeming unnecessary - - return useCase; - } - - /** - * Merge existing config with new config, replacing common parameters with the new values. - * For the LlmParams.ModelParams, the values from the new config are used to overwrite the - * existing config's ModelParams. - * @param existingConfigObj Existing config data object - * @param newConfigObj Config data to be updated - * @returns - */ - @tracer.captureMethod({ captureResponse: true, subSegmentName: '###checkMergeConfigs' }) - public static mergeConfigs(existingConfigObj: any, newConfigObj: any): any { - const modelParams = _.get(newConfigObj, 'LlmParams.ModelParams', undefined); - const sageMakerModelInputPayloadSchema = _.get( - newConfigObj, - 'LlmParams.SageMakerLlmParams.ModelInputPayloadSchema', - undefined - ); - let mergedConfig = _.merge(existingConfigObj, newConfigObj); - - if (modelParams) { - mergedConfig.LlmParams.ModelParams = modelParams; - } - if (sageMakerModelInputPayloadSchema) { - mergedConfig.LlmParams.SageMakerLlmParams.ModelInputPayloadSchema = sageMakerModelInputPayloadSchema; - } - mergedConfig = this.resolveKnowledgeBaseParamsOnUpdate(newConfigObj, mergedConfig); - - mergedConfig = this.resolveBedrockModelSourceOnUpdate(newConfigObj, mergedConfig); - - return mergedConfig; - } - - /** - * Function to be applied to an updated use case configuration which will restrict the BedrockLlmParams to only contain one of the InferenceProfileId or ModelId. Required since merging of new and existing configs on updates will retain both values. - * - * @param newConfig The new config object coming from an update request - * @param mergedConfig A merged config from existing and new configs - * @returns A resolved config which has only 1 of the InferenceProfileId or ModelId. - */ - public static resolveBedrockModelSourceOnUpdate(updateConfig: any, mergedConfig: any): any { - let resolvedConfig = mergedConfig; - - // only perform this action if our merged config is invalid (has both a ModelId and an InferenceProfileId) - if ( - mergedConfig.LlmParams?.BedrockLlmParams?.ModelId && - mergedConfig.LlmParams?.BedrockLlmParams?.InferenceProfileId - ) { - // switching rom an inference profile to a model - if (updateConfig.LlmParams?.BedrockLlmParams?.ModelId) { - resolvedConfig.LlmParams.BedrockLlmParams.ModelId = updateConfig.LlmParams.BedrockLlmParams.ModelId; - delete resolvedConfig.LlmParams.BedrockLlmParams.InferenceProfileId; - } - // switching from a model to an inference profile - else if (updateConfig.LlmParams?.BedrockLlmParams?.InferenceProfileId) { - resolvedConfig.LlmParams.BedrockLlmParams.InferenceProfileId = - updateConfig.LlmParams.BedrockLlmParams.InferenceProfileId; - delete resolvedConfig.LlmParams.BedrockLlmParams.ModelId; - // if previously using a provisioned model, ModelArn would be present and should be removed - if (resolvedConfig.LlmParams?.BedrockLlmParams?.ModelArn) { - delete resolvedConfig.LlmParams?.BedrockLlmParams?.ModelArn; - } - } - } - - return resolvedConfig; - } - - /** - * Function to be applied to an updated use case configuration which will ensure that a removed NoDocsFoundResponse value results in its removal from the LLM config. - * - * @param newConfig The new config object coming from an update request - * @param mergedConfig A merged config from existing and new configs - * @returns A resolved config which always takes the value of NoDocsFoundResponse from the updated config. - */ - public static resolveKnowledgeBaseParamsOnUpdate(updateConfig: any, mergedConfig: any): any { - let resolvedConfig = mergedConfig; - - if(resolvedConfig?.KnowledgeBaseParams?.NoDocsFoundResponse && !updateConfig?.KnowledgeBaseParams?.NoDocsFoundResponse) { - delete resolvedConfig.KnowledgeBaseParams.NoDocsFoundResponse; - } - - return resolvedConfig; - } - - /** - * Checks that the provided prompt is valid given the configuration. - * Namely, correct placeholders are present for the given RAG configuration. - * - * @param useCase use case to check - * @throws if validation fails - */ - @tracer.captureMethod({ captureResponse: true, subSegmentName: '###checkModelInputPayloadSchema' }) - private static checkModelInputPayloadSchema(useCase: UseCase): void { - const modelInputPayloadSchema = useCase.configuration.LlmParams!.SageMakerLlmParams?.ModelInputPayloadSchema; - if (modelInputPayloadSchema !== undefined) { - // finds all the placeholders ("<>") in the payload schema - const regex = /<<\w+>>/g; - const reservedPlaceholders = ['<>', '<>']; - const modelInputPayloadSchemaStr = JSON.stringify(modelInputPayloadSchema); - const matches = modelInputPayloadSchemaStr.match(regex); - const modelParams = useCase.configuration.LlmParams!.ModelParams; - - if (matches) { - // reserved placeholders aren't provided in the model parameters - _.remove(matches, (match: string) => { - return reservedPlaceholders.includes(match); - }); - - matches.forEach((match) => { - const key = match.replace('<<', '').replace('>>', ''); - if (modelParams === undefined) { - throw new RequestValidationError( - 'No model parameters were provided in the useCase despite requiring parameters in the input payload schema.' - ); - } else if (modelParams[key as keyof Object] !== undefined) { - return; - } else { - throw new RequestValidationError( - `InvalidModelParameter: ${key} is not a valid model parameter present in the Model Parameters` - ); - } - }); - } - } - } - - /** - * Checks that the provided prompt is valid given the configuration. Namely, correct placeholders are present for given RAG configuration. - * - * @param useCase use case to check - * @throws if validation fails - */ - @tracer.captureMethod({ captureResponse: true, subSegmentName: '###checkPromptsAreCompatible' }) - private static checkPromptsAreCompatible(useCase: UseCase): void { - //validate main prompt template - const promptTemplate = useCase.configuration.LlmParams!.PromptParams!.PromptTemplate!; - const chat_provider = useCase.configuration.LlmParams!.ModelProvider; - const requiredPlaceholders = useCase.configuration.LlmParams!.RAGEnabled - ? RAGChatRequiredPlaceholders[chat_provider as CHAT_PROVIDERS] - : ChatRequiredPlaceholders[chat_provider as CHAT_PROVIDERS]; - - requiredPlaceholders.forEach((placeholder) => { - //placeholder must exist - if (!promptTemplate.includes(placeholder)) { - throw new RequestValidationError( - `Provided prompt template does not have the required placeholder '${placeholder}'.` - ); - } - - //placeholder must exist only once - if (promptTemplate.indexOf(placeholder) !== promptTemplate.lastIndexOf(placeholder)) { - throw new RequestValidationError( - `Placeholder '${placeholder}' should appear only once in the prompt template.` - ); - } - }); - - //validate disambiguation prompt template - if ( - useCase.configuration.LlmParams!.RAGEnabled && - useCase.configuration.LlmParams!.PromptParams?.DisambiguationEnabled - ) { - const disambiguationPromptTemplate = - useCase.configuration.LlmParams!.PromptParams.DisambiguationPromptTemplate!; - - DisambiguationRequiredPlaceholders.forEach((placeholder) => { - //placeholder must exist - if (!disambiguationPromptTemplate.includes(placeholder)) { - throw new RequestValidationError( - `Provided disambiguation prompt template does not have the required placeholder '${placeholder}'.` - ); - } - - //placeholder must exist only once - if ( - disambiguationPromptTemplate.indexOf(placeholder) !== - disambiguationPromptTemplate.lastIndexOf(placeholder) - ) { - throw new RequestValidationError( - `Placeholder '${placeholder}' should appear only once in the disambiguation prompt template.` - ); - } - }); - } - } - - /** - * In order for a prompt to contain curly braces (e.g. providing code or JSON data in the prompt), LangChain requires they are escaped by being doubled ({{ }} rather than {}), so as to not interfere with the placeholders (e.g. history, etc.) - * - * @param useCase use case to check - * @throws if validation fails - */ - @tracer.captureMethod({ captureResponse: true, subSegmentName: '###checkPromptIsEscaped' }) - private static checkPromptIsEscaped(useCase: UseCase): void { - // removes all the placeholders, which are valid uses of unescaped curly braces - let promptTemplate = useCase.configuration.LlmParams!.PromptParams!.PromptTemplate!; - const chat_provider = useCase.configuration.LlmParams!.ModelProvider!; - const requiredPlaceholders = useCase.configuration.LlmParams!.RAGEnabled - ? RAGChatRequiredPlaceholders[chat_provider as CHAT_PROVIDERS] - : ChatRequiredPlaceholders[chat_provider as CHAT_PROVIDERS]; - - requiredPlaceholders.forEach((placeholder) => { - promptTemplate = promptTemplate.replace(placeholder, ''); - }); - - // ensure both types of braces are escaped (doubled), per langchain standards - const escapableCharacters = ['{', '}']; - escapableCharacters.forEach((char) => { - let index = 0; - while (index < promptTemplate.length) { - const charIndex = promptTemplate.indexOf(char, index); - - if (charIndex === -1) { - // No more curly braces found - break; - } - - // is it escaped by doubling? - if (promptTemplate.charAt(charIndex + 1) !== char) { - throw new RequestValidationError(`Prompt template contains an unescaped curly brace '${char}'`); - } else { - index = charIndex + 2; - } - } - }); - } - - /** - * Checks that the selected KnowledgeBaseType is compatible with the KnowledgeBaseParams provided - * - * @param useCase use case to check - * @throws if validation fails - */ - @tracer.captureMethod({ captureResponse: true, subSegmentName: '###checkPromptIsCompatible' }) - private static checkKnowledgeBaseTypeMatchesParams(useCase: UseCase): void { - if (useCase.configuration.LlmParams?.RAGEnabled) { - const knowledgeBaseType = useCase.configuration.KnowledgeBaseParams!.KnowledgeBaseType!; - - let typeSpecificRequiredParamsObject: string = ''; - - switch (knowledgeBaseType) { - case KnowledgeBaseTypes.KENDRA: - if (useCase.configuration.KnowledgeBaseParams!.KendraKnowledgeBaseParams !== undefined) { - return; - } else { - typeSpecificRequiredParamsObject = 'KendraKnowledgeBaseParams'; - } - break; - case KnowledgeBaseTypes.BEDROCK: - if (useCase.configuration.KnowledgeBaseParams!.BedrockKnowledgeBaseParams !== undefined) { - return; - } else { - typeSpecificRequiredParamsObject = 'BedrockKnowledgeBaseParams'; - } - break; - default: - throw new RequestValidationError( - `Provided knowledge base type ${knowledgeBaseType} is not supported. You should not get this error.` - ); - } - - throw new RequestValidationError( - `Provided knowledge base type ${knowledgeBaseType} requires ${typeSpecificRequiredParamsObject} to be present in KnowledgeBaseParams.` - ); - } - } -} - -export class AgentUseCaseValidator extends UseCaseValidator { - @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateNewAgentUseCase' }) - public async validateNewUseCase(useCase: UseCase): Promise { - if (useCase.configuration.AuthenticationParams) { - // prettier-ignore - switch (useCase.configuration.AuthenticationParams.AuthenticationProvider) { //NOSONAR - typescript:S1301, switch statement used for ease of future extensions - case AUTHENTICATION_PROVIDERS.COGNITO: - // overriding the previously set CognitoDomainPrefix parameter - // by fetching it dynamically based on the set user pool - - const existingUserPoolId = useCase.cfnParameters?.get(CfnParameterKeys.ExistingCognitoUserPoolId); - if (!existingUserPoolId) { - throw new Error('Undefined user pool provided for the cognito authentication provider.'); - } - - const cognitoDomainPrefix = await getCognitoDomainPrefixByUserPool(existingUserPoolId); - - if (!useCase.cfnParameters) { - throw new Error('CfnParameters are not available yet for setting Cognito Domain Prefix.'); - } - - useCase.cfnParameters.set(CfnParameterKeys.CognitoDomainPrefix, cognitoDomainPrefix); - break; - } - } - return useCase; - } - - @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateUpdateAgentUseCase' }) - public async validateUpdateUseCase(useCase: UseCase, oldDynamoDbRecordKey: string): Promise { - // retrieve the existing config from DynamoDB using a dummy use case object - let dummyOldUseCase = useCase.clone(); - dummyOldUseCase.setUseCaseConfigRecordKey(oldDynamoDbRecordKey); - const existingConfigObj = await this.useCaseConfigMgmt.getUseCaseConfigFromTable(dummyOldUseCase); - _.merge(existingConfigObj, useCase.configuration); - - return useCase; - } -} - -export async function getCognitoDomainPrefixByUserPool(userPoolId: string) { - const client = new CognitoIdentityProviderClient(customAwsConfig()); - - try { - const command = new DescribeUserPoolCommand({ UserPoolId: userPoolId }); - const response = await client.send(command); - - if (response?.UserPool?.Domain) { - return response.UserPool.Domain; - } else { - throw new Error('No domain found for this user pool.'); - } - } catch (error) { - logger.error(`Error fetching user pool details. Error: ${error}`); - throw error; - } -} diff --git a/source/lambda/use-case-management/model/use-case.ts b/source/lambda/use-case-management/model/use-case.ts index ed244b1d..8d65d859 100644 --- a/source/lambda/use-case-management/model/use-case.ts +++ b/source/lambda/use-case-management/model/use-case.ts @@ -1,8 +1,14 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -import { CfnParameterKeys } from '../utils/constants'; -import { UseCaseConfiguration } from './types'; +import { CfnParameterKeys, RetainedCfnParameterKeys } from '../utils/constants'; +import { + MCPUseCaseConfiguration, + UseCaseConfiguration, + AgentUseCaseConfiguration, + AgentBuilderUseCaseConfiguration, + WorkflowUseCaseConfiguration +} from './types'; /** * Data Model to store capture use case specific information @@ -41,12 +47,17 @@ export class UseCase { /** * Name of the provider for the use case */ - public readonly providerName: string; + public readonly providerName: string | undefined; /** * Additional configuration for the use case, stored as a JSON object in SSM */ - public configuration: UseCaseConfiguration; + public configuration: + | UseCaseConfiguration + | AgentUseCaseConfiguration + | MCPUseCaseConfiguration + | AgentBuilderUseCaseConfiguration + | WorkflowUseCaseConfiguration; /** * The template which should be used to deploy the use case @@ -74,9 +85,9 @@ export class UseCase { name: string, description: string | undefined, cfnParameters: Map | undefined, - configuration: UseCaseConfiguration, + configuration: UseCaseConfiguration | AgentUseCaseConfiguration | MCPUseCaseConfiguration | AgentBuilderUseCaseConfiguration | WorkflowUseCaseConfiguration, userId: string, - providerName: string, + providerName: string | undefined, useCaseType: string, ) { this.useCaseId = useCaseId; @@ -87,8 +98,20 @@ export class UseCase { this.userId = userId; this.providerName = providerName this.shortUUID = this.useCaseId.substring(0, 8); - this.templateName = `${providerName}${useCaseType}`; this.useCaseType = useCaseType; + this.templateName = this.generateTemplateName(providerName, useCaseType); + } + + /** + * Generates the template name for the use case. Can be overridden by subclasses + * to provide custom template naming logic. + * + * @param providerName The provider name (e.g., 'Bedrock', 'SageMaker') + * @param useCaseType The use case type (e.g., 'Chat', 'Agent') + * @returns The template name to use for CloudFormation deployment + */ + protected generateTemplateName(providerName: string | undefined, useCaseType: string): string { + return providerName === undefined ? useCaseType : `${providerName}${useCaseType}`; } private createCfnParametersMapIfNotExists(): void { @@ -122,6 +145,16 @@ export class UseCase { return id.substring(0, 8); } + /** + * Returns the list of CloudFormation parameters that should be retained during stack updates. + * Can be overridden by subclasses to provide use case-specific retention behavior. + * + * @returns Array of parameter keys that should use previous values during updates + */ + public getRetainedParameterKeys(): string[] { + return RetainedCfnParameterKeys; + } + /** * Performs a deep copy of this object, preserving methods and property values * @@ -135,11 +168,8 @@ export class UseCase { new Map(this.cfnParameters), { ...this.configuration }, this.userId, - this.templateName - .split(/(?=[A-Z])/) - .slice(0, -1) - .join(''), // provider name - this.templateName.split(/(?=[A-Z])/).pop()! // use case type, the last capitalized portion + this.providerName, + this.useCaseType ); return newUseCase; diff --git a/source/lambda/use-case-management/model/validators/agent-builder-validator.ts b/source/lambda/use-case-management/model/validators/agent-builder-validator.ts new file mode 100644 index 00000000..b62f6c70 --- /dev/null +++ b/source/lambda/use-case-management/model/validators/agent-builder-validator.ts @@ -0,0 +1,156 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { + AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH, + AUTHENTICATION_PROVIDERS, + CfnParameterKeys, + CHAT_PROVIDERS +} from '../../utils/constants'; +import RequestValidationError from '../../utils/error'; +import { tracer } from '../../power-tools-init'; +import { UseCase } from '../use-case'; +import { AgentBuilderUseCaseConfiguration } from '../types'; +import { UseCaseValidator } from './base-validator'; +import { getCognitoDomainPrefixByUserPool } from './validation-utils'; +import { ConfigMergeUtils } from './config-merge-utils'; + +/** + * Validator for Agent Builder use cases (AgentCore). + * Handles validation of agent builder specific parameters including system prompts, + * tools, and memory configuration. + */ +export class AgentBuilderUseCaseValidator extends UseCaseValidator { + /** + * Validates a new Agent Builder use case. + * Ensures all required parameters are present and valid for AgentCore deployment. + * + * @param useCase - The Agent Builder use case to validate + * @returns A promise that resolves to the validated use case + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateNewAgentBuilderUseCase' }) + public async validateNewUseCase(useCase: UseCase): Promise { + const config = this.getTypedConfiguration(useCase); + + // Validate that required AgentBuilderParams are present + if (!config.AgentBuilderParams?.SystemPrompt) { + throw new RequestValidationError('SystemPrompt is required for Agent Builder use cases.'); + } + + // Validate LLM provider is AgentCore + if (config.LlmParams?.ModelProvider !== CHAT_PROVIDERS.BEDROCK) { + throw new RequestValidationError('Agent Builder use cases must use BEDROCK as the ModelProvider.'); + } + + // Validate system prompt length + const systemPrompt = config.AgentBuilderParams.SystemPrompt; + if (systemPrompt.length > AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH) { + throw new RequestValidationError( + `SystemPrompt exceeds maximum length of ${AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH} characters.` + ); + } + + // Validate Tools format if provided + if (config.AgentBuilderParams.Tools) { + this.validateTools(config.AgentBuilderParams.Tools); + } + + // Handle authentication configuration + await this.handleAuthenticationConfig(useCase, config); + + return useCase; + } + + /** + * Validates an updated Agent Builder use case. + * Merges existing configuration with new configuration and validates the result. + * + * @param useCase - The Agent Builder use case to validate + * @param oldDynamoDbRecordKey - The key of the old DynamoDB record + * @returns A promise that resolves to the validated use case + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateUpdateAgentBuilderUseCase' }) + public async validateUpdateUseCase(useCase: UseCase, oldDynamoDbRecordKey: string): Promise { + // retrieve the existing config from DynamoDB using a dummy use case object + let dummyOldUseCase = useCase.clone(); + dummyOldUseCase.setUseCaseConfigRecordKey(oldDynamoDbRecordKey); + const existingConfigObj = await this.useCaseConfigMgmt.getUseCaseConfigFromTable(dummyOldUseCase); + + // Merge existing config with new config using Agent Builder specific merge logic + useCase.configuration = await ConfigMergeUtils.mergeAgentBuilderConfigs( + existingConfigObj, + useCase.configuration + ); + const config = this.getTypedConfiguration(useCase); + + // Validate the merged configuration + if (!config.AgentBuilderParams?.SystemPrompt) { + throw new RequestValidationError('SystemPrompt is required for Agent Builder use cases.'); + } + + // Validate system prompt length + const systemPrompt = config.AgentBuilderParams.SystemPrompt; + if (systemPrompt.length > AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH) { + throw new RequestValidationError( + `SystemPrompt exceeds maximum length of ${AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH} characters.` + ); + } + + // Validate Tools format if provided + if (config.AgentBuilderParams.Tools) { + this.validateTools(config.AgentBuilderParams.Tools); + } + + return useCase; + } + + /** + * Validates Tools array format and content. + * + * @param tools - Array of tool configurations + * @throws RequestValidationError if validation fails + */ + private validateTools(tools: Array<{ ToolId: string }>): void { + tools.forEach((tool, index) => { + if (!tool.ToolId || typeof tool.ToolId !== 'string' || tool.ToolId.trim().length === 0) { + throw new RequestValidationError(`Tools[${index}].ToolId is required and must be a non-empty string.`); + } + }); + } + + /** + * Handles authentication configuration for Agent Builder use cases. + * + * @param useCase - The use case being validated + * @param config - The typed configuration + */ + private async handleAuthenticationConfig( + useCase: UseCase, + config: AgentBuilderUseCaseConfiguration + ): Promise { + if (config.AuthenticationParams) { + // prettier-ignore + switch (config.AuthenticationParams.AuthenticationProvider) { //NOSONAR - typescript:S1301, switch statement used for ease of future extensions + case AUTHENTICATION_PROVIDERS.COGNITO: + // overriding the previously set CognitoDomainPrefix parameter + // by fetching it dynamically based on the set user pool + + const existingUserPoolId = useCase.cfnParameters?.get(CfnParameterKeys.ExistingCognitoUserPoolId); + if (!existingUserPoolId) { + throw new Error('Undefined user pool provided for the cognito authentication provider.'); + } + + const cognitoDomainPrefix = await getCognitoDomainPrefixByUserPool(existingUserPoolId); + + if (!useCase.cfnParameters) { + throw new Error('CfnParameters are not available yet for setting Cognito Domain Prefix.'); + } + + useCase.cfnParameters.set(CfnParameterKeys.CognitoDomainPrefix, cognitoDomainPrefix); + break; + } + } + } +} diff --git a/source/lambda/use-case-management/model/validators/agent-validator.ts b/source/lambda/use-case-management/model/validators/agent-validator.ts new file mode 100644 index 00000000..0adf8e55 --- /dev/null +++ b/source/lambda/use-case-management/model/validators/agent-validator.ts @@ -0,0 +1,71 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import _ from 'lodash'; +import { AUTHENTICATION_PROVIDERS, CfnParameterKeys } from '../../utils/constants'; +import { tracer } from '../../power-tools-init'; +import { UseCase } from '../use-case'; +import { AgentUseCaseConfiguration } from '../types'; +import { UseCaseValidator } from './base-validator'; +import { getCognitoDomainPrefixByUserPool } from './validation-utils'; + +/** + * Validator for Agent use cases (Bedrock Agents). + * Handles validation of agent-specific parameters and authentication settings. + */ +export class AgentUseCaseValidator extends UseCaseValidator { + /** + * Validates a new agent use case. + * Primarily handles authentication configuration for agent deployments. + * + * @param useCase - The agent use case to validate + * @returns A promise that resolves to the validated use case + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateNewAgentUseCase' }) + public async validateNewUseCase(useCase: UseCase): Promise { + const config = this.getTypedConfiguration(useCase); + + if (config.AuthenticationParams) { + // prettier-ignore + switch (config.AuthenticationParams.AuthenticationProvider) { //NOSONAR - typescript:S1301, switch statement used for ease of future extensions + case AUTHENTICATION_PROVIDERS.COGNITO: + // overriding the previously set CognitoDomainPrefix parameter + // by fetching it dynamically based on the set user pool + + const existingUserPoolId = useCase.cfnParameters?.get(CfnParameterKeys.ExistingCognitoUserPoolId); + if (!existingUserPoolId) { + throw new Error('Undefined user pool provided for the cognito authentication provider.'); + } + + const cognitoDomainPrefix = await getCognitoDomainPrefixByUserPool(existingUserPoolId); + + if (!useCase.cfnParameters) { + throw new Error('CfnParameters are not available yet for setting Cognito Domain Prefix.'); + } + + useCase.cfnParameters.set(CfnParameterKeys.CognitoDomainPrefix, cognitoDomainPrefix); + break; + } + } + return useCase; + } + + /** + * Validates an updated agent use case. + * Merges existing configuration with new configuration for agent updates. + * + * @param useCase - The agent use case to validate + * @param oldDynamoDbRecordKey - The key of the old DynamoDB record + * @returns A promise that resolves to the validated use case + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateUpdateAgentUseCase' }) + public async validateUpdateUseCase(useCase: UseCase, oldDynamoDbRecordKey: string): Promise { + // retrieve the existing config from DynamoDB using a dummy use case object + let dummyOldUseCase = useCase.clone(); + dummyOldUseCase.setUseCaseConfigRecordKey(oldDynamoDbRecordKey); + const existingConfigObj = await this.useCaseConfigMgmt.getUseCaseConfigFromTable(dummyOldUseCase); + useCase.configuration = _.merge(existingConfigObj, useCase.configuration); + + return useCase; + } +} diff --git a/source/lambda/use-case-management/model/validators/base-validator.ts b/source/lambda/use-case-management/model/validators/base-validator.ts new file mode 100644 index 00000000..b142232d --- /dev/null +++ b/source/lambda/use-case-management/model/validators/base-validator.ts @@ -0,0 +1,47 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { StorageManagement } from '../../ddb/storage-management'; +import { UseCaseConfigManagement } from '../../ddb/use-case-config-management'; +import { UseCase } from '../use-case'; + +/** + * Abstract base class for use case validators. + * This class provides a common interface for validating different types of use cases. + * Uses generics to ensure type safety for specific configuration types. + */ +export abstract class UseCaseValidator { + protected storageMgmt: StorageManagement; + protected useCaseConfigMgmt: UseCaseConfigManagement; + + constructor(storageMgmt: StorageManagement, useCaseConfigMgmt: UseCaseConfigManagement) { + this.storageMgmt = storageMgmt; + this.useCaseConfigMgmt = useCaseConfigMgmt; + } + + /** + * Validates a new use case. + * + * @param useCase - The use case to be validated + * @returns A promise that resolves to the validated use case + */ + public abstract validateNewUseCase(useCase: UseCase): Promise; + + /** + * Validates an updated use case. + * + * @param useCase - The use case to be validated + * @param oldDynamoDbRecordKey - The key of the old DynamoDB record + * @returns A promise that resolves to the validated use case + */ + public abstract validateUpdateUseCase(useCase: UseCase, oldDynamoDbRecordKey: string): Promise; + + /** + * Type-safe getter for configuration with proper casting + * @param useCase - The use case to get configuration from + * @returns The configuration cast to the appropriate type + */ + protected getTypedConfiguration(useCase: UseCase): T { + return useCase.configuration as T; + } +} diff --git a/source/lambda/use-case-management/model/validators/config-merge-utils.ts b/source/lambda/use-case-management/model/validators/config-merge-utils.ts new file mode 100644 index 00000000..9c5c0a2d --- /dev/null +++ b/source/lambda/use-case-management/model/validators/config-merge-utils.ts @@ -0,0 +1,150 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import _ from 'lodash'; +import { tracer } from '../../power-tools-init'; + +/** + * Utility class for merging and resolving configuration objects during updates. + * This handles the complex logic of merging existing and new configurations + * while resolving conflicts and maintaining data integrity. + */ +export class ConfigMergeUtils { + /** + * Merge existing config with new config, replacing common parameters with the new values. + * ModelParams and ModelInputPayloadSchema are completely replaced rather than merged. + * Async to ensure consistent behavior with tracer decorator across environments. + * + * @param existingConfigObj Existing config data object + * @param newConfigObj Config data to be updated + * @returns Merged configuration object + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###mergeConfigs' }) + static async mergeConfigs(existingConfigObj: any, newConfigObj: any): Promise { + const modelParams = _.get(newConfigObj, 'LlmParams.ModelParams', undefined); + const sageMakerModelInputPayloadSchema = _.get( + newConfigObj, + 'LlmParams.SageMakerLlmParams.ModelInputPayloadSchema', + undefined + ); + let mergedConfig = _.merge(existingConfigObj, newConfigObj); + + if (modelParams) { + mergedConfig.LlmParams.ModelParams = modelParams; + } + if (sageMakerModelInputPayloadSchema) { + mergedConfig.LlmParams.SageMakerLlmParams.ModelInputPayloadSchema = sageMakerModelInputPayloadSchema; + } + mergedConfig = this.resolveKnowledgeBaseParamsOnUpdate(newConfigObj, mergedConfig); + mergedConfig = this.resolveBedrockModelSourceOnUpdate(newConfigObj, mergedConfig); + + return mergedConfig; + } + + /** + * Resolve Bedrock model source to ensure only one of InferenceProfileId or ModelId is present. + * Prevents invalid configurations where both values exist after merging. + * + * @param updateConfig The new config object coming from an update request + * @param mergedConfig A merged config from existing and new configs + * @returns Resolved config with only one model source identifier + */ + static resolveBedrockModelSourceOnUpdate(updateConfig: any, mergedConfig: any): any { + let resolvedConfig = mergedConfig; + + if ( + mergedConfig.LlmParams?.BedrockLlmParams?.ModelId && + mergedConfig.LlmParams?.BedrockLlmParams?.InferenceProfileId + ) { + if (updateConfig.LlmParams?.BedrockLlmParams?.ModelId) { + resolvedConfig.LlmParams.BedrockLlmParams.ModelId = updateConfig.LlmParams.BedrockLlmParams.ModelId; + delete resolvedConfig.LlmParams.BedrockLlmParams.InferenceProfileId; + } else if (updateConfig.LlmParams?.BedrockLlmParams?.InferenceProfileId) { + resolvedConfig.LlmParams.BedrockLlmParams.InferenceProfileId = + updateConfig.LlmParams.BedrockLlmParams.InferenceProfileId; + delete resolvedConfig.LlmParams.BedrockLlmParams.ModelId; + if (resolvedConfig.LlmParams?.BedrockLlmParams?.ModelArn) { + delete resolvedConfig.LlmParams?.BedrockLlmParams?.ModelArn; + } + } + } + + return resolvedConfig; + } + + /** + * Resolve knowledge base parameters to ensure NoDocsFoundResponse is properly cleared when removed. + * Prevents stale NoDocsFoundResponse values from persisting after updates. + * + * @param updateConfig The new config object coming from an update request + * @param mergedConfig A merged config from existing and new configs + * @returns Resolved config with correct NoDocsFoundResponse state + */ + static resolveKnowledgeBaseParamsOnUpdate(updateConfig: any, mergedConfig: any): any { + let resolvedConfig = mergedConfig; + + if ( + resolvedConfig?.KnowledgeBaseParams?.NoDocsFoundResponse && + !updateConfig?.KnowledgeBaseParams?.NoDocsFoundResponse + ) { + delete resolvedConfig.KnowledgeBaseParams.NoDocsFoundResponse; + } + + return resolvedConfig; + } + + /** + * Merge configs for Agent Builder use cases with array replacement semantics. + * Tools and MCPServers arrays are replaced entirely rather than merged. + * Async to ensure consistent behavior with tracer decorator across environments. + * + * @param existingConfigObj Existing config data object + * @param newConfigObj Config data to be updated + * @returns Merged configuration object + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###mergeAgentBuilderConfigs' }) + static async mergeAgentBuilderConfigs(existingConfigObj: any, newConfigObj: any): Promise { + let mergedConfig = await this.mergeConfigs(existingConfigObj, newConfigObj); + + if (newConfigObj.AgentBuilderParams !== undefined && mergedConfig.AgentBuilderParams) { + mergedConfig.AgentBuilderParams.Tools = + newConfigObj.AgentBuilderParams.Tools !== undefined ? newConfigObj.AgentBuilderParams.Tools : []; + + mergedConfig.AgentBuilderParams.MCPServers = + newConfigObj.AgentBuilderParams.MCPServers !== undefined + ? newConfigObj.AgentBuilderParams.MCPServers + : []; + } + + return mergedConfig; + } + + /** + * Merge configs for Workflow use cases with array replacement semantics. + * Agents array in AgentsAsToolsParams is replaced entirely rather than merged. + * Async to ensure consistent behavior with tracer decorator across environments. + * + * @param existingConfigObj Existing config data object + * @param newConfigObj Config data to be updated + * @returns Merged configuration object + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###mergeWorkflowConfigs' }) + static async mergeWorkflowConfigs(existingConfigObj: any, newConfigObj: any): Promise { + let mergedConfig = await this.mergeConfigs(existingConfigObj, newConfigObj); + + if ( + newConfigObj.WorkflowParams !== undefined && + newConfigObj.WorkflowParams.AgentsAsToolsParams !== undefined + ) { + if (!mergedConfig.WorkflowParams.AgentsAsToolsParams) { + mergedConfig.WorkflowParams.AgentsAsToolsParams = {}; + } + mergedConfig.WorkflowParams.AgentsAsToolsParams.Agents = + newConfigObj.WorkflowParams.AgentsAsToolsParams.Agents !== undefined + ? newConfigObj.WorkflowParams.AgentsAsToolsParams.Agents + : []; + } + + return mergedConfig; + } +} diff --git a/source/lambda/use-case-management/model/validators/index.ts b/source/lambda/use-case-management/model/validators/index.ts new file mode 100644 index 00000000..da9952e3 --- /dev/null +++ b/source/lambda/use-case-management/model/validators/index.ts @@ -0,0 +1,28 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +/** + * Centralized exports for all validator classes and utilities. + * This provides a clean interface for importing validator functionality + * throughout the application. + */ + +// Base validator and factory +export { UseCaseValidator } from './base-validator'; +export { ValidatorFactory } from './validator-factory'; + +// Specific validator implementations +export { TextUseCaseValidator } from './text-validator'; +export { AgentUseCaseValidator } from './agent-validator'; +export { AgentBuilderUseCaseValidator } from './agent-builder-validator'; +export { MCPUsecaseValidator } from './mcp-validator'; +export { WorkflowUseCaseValidator } from './workflow-validator'; + +// Utility classes and functions +export { ValidationUtils } from './validation-utils'; +export { ConfigMergeUtils } from './config-merge-utils'; +export { getCognitoDomainPrefixByUserPool } from './validation-utils'; + +// Backward compatibility - re-export the factory method as the original static method +import { ValidatorFactory } from './validator-factory'; +export const createValidator = ValidatorFactory.createValidator; diff --git a/source/lambda/use-case-management/model/validators/mcp-validator.ts b/source/lambda/use-case-management/model/validators/mcp-validator.ts new file mode 100644 index 00000000..47242dbb --- /dev/null +++ b/source/lambda/use-case-management/model/validators/mcp-validator.ts @@ -0,0 +1,665 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import _ from 'lodash'; +import { logger, tracer } from '../../power-tools-init'; +import { + GATEWAY_TARGET_TYPES, + SCHEMA_TYPE_FILE_EXTENSIONS, + McpOperationTypes, + MCP_CONTENT_TYPES, + SUPPORTED_MCP_FILE_EXTENSIONS, + OUTBOUND_AUTH_PROVIDER_TYPES +} from '../../utils/constants'; +import RequestValidationError from '../../utils/error'; +import { McpOperation, FileUploadInfo } from '../adapters/mcp-adapter'; +import { UseCaseValidator } from './base-validator'; +import { UseCase } from '../use-case'; +import { isValidArnWithRegexKey } from '../../utils/utils'; +import { MCPUseCaseConfiguration, TargetParams } from '../types'; + +/** + * Abstract base class for MCP operation validators. + * This class provides a common interface for validating different types of MCP operations. + * + */ +export abstract class McpOperationsValidator { + /** + * Validates an MCP operation. + * + * @param mcpOperation - The MCP operation to be validated + * @returns A promise that resolves to the validated MCP operation + */ + public abstract validateMcpOperation(mcpOperation: McpOperation): Promise; + + /** + * Factory method to create the appropriate validator based on the operation type. + * + * @param operationType - The type of MCP operation (e.g., McpOperationTypes.UPLOAD_SCHEMA) + * @returns An instance of the appropriate McpOperationsValidator subclass + * @throws Error if an invalid operation type is provided + */ + static createValidator(operationType: string): McpOperationsValidator { + if (operationType === McpOperationTypes.UPLOAD_SCHEMA) return new SchemaUploadValidator(); + + const errorMsg = `Invalid MCP operation type: ${operationType}`; + logger.error(`McpOperationsValidator factory creation failed: ${errorMsg}`); + throw new Error(errorMsg); + } +} + +/** + * Validator for schema upload MCP operations + */ +export class SchemaUploadValidator extends McpOperationsValidator { + /** + * Validates a schema upload MCP operation + * - Validates schema type against allowed types + * - Validates file extension matches schema type requirements + * + * @param mcpOperation - The MCP operation to validate + * @returns Promise resolving to validated operation + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateSchemaUpload' }) + public async validateMcpOperation(mcpOperation: McpOperation): Promise { + const operation = mcpOperation as any; // Type assertion for accessing properties + const processedFiles: FileUploadInfo[] = []; + const errors: string[] = []; + + // Process and validate each raw file + for (let i = 0; i < operation.rawFiles.length; i++) { + const rawFile = operation.rawFiles[i]; + + try { + const processedFile = await this.validateRequiredFields(rawFile, i); + + // Validate the processed file + await SchemaUploadValidator.validateSchemaType(processedFile, i); + await SchemaUploadValidator.validateFileExtensionCompatibility(processedFile, i); + await SchemaUploadValidator.setContentType(processedFile, i); + + processedFiles.push(processedFile); + } catch (error) { + if (error instanceof RequestValidationError) { + errors.push(error.message); + } else { + errors.push(`files[${i}]: ${error instanceof Error ? error.message : String(error)}`); + } + } + } + + // If there are any errors, combine them and throw once + if (errors.length > 0) { + const combinedErrorMsg = errors.join('; '); + logger.error(`SchemaUploadValidator validation failed with multiple errors: ${combinedErrorMsg}`); + throw new RequestValidationError(combinedErrorMsg); + } + + operation.files = processedFiles; + logger.info(`Schema upload validation passed, fileCount: ${operation.files.length}`); + + return mcpOperation; + } + + /** + * Validates required fields and creates FileUploadInfo object from raw file data + * @param rawFile - Raw file data from request + * @param fileIndex - Index for error reporting + * @returns FileUploadInfo object with validated required fields + */ + private async validateRequiredFields(rawFile: any, fileIndex: number): Promise { + if (!rawFile.schemaType || typeof rawFile.schemaType !== 'string' || rawFile.schemaType.trim() === '') { + const errorMsg = `files[${fileIndex}].schemaType is required and must be a non-empty string`; + logger.error(`SchemaUploadValidator validation failed: ${errorMsg}`); + throw new RequestValidationError(errorMsg); + } + + if (!rawFile.fileName || typeof rawFile.fileName !== 'string' || rawFile.fileName.trim() === '') { + const errorMsg = `files[${fileIndex}].fileName is required and must be a non-empty string`; + logger.error(`SchemaUploadValidator validation failed: ${errorMsg}`); + throw new RequestValidationError(errorMsg); + } + + const fileExtension = rawFile.fileName.toLowerCase().match(/\.[^.]+$/)?.[0] || ''; + + if (!fileExtension) { + const errorMsg = `files[${fileIndex}].fileName '${rawFile.fileName}' must have a valid file extension`; + logger.error(`SchemaUploadValidator validation failed: ${errorMsg}`); + throw new RequestValidationError(errorMsg); + } + + return { + schemaType: rawFile.schemaType, + fileName: rawFile.fileName, + fileExtension, + contentType: '' // Will be set by setContentType validation method + }; + } + + /** + * Validates that the schema type is supported + * @param file - The file object to validate + * @param fileIndex - The index of the file for error reporting + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateSchemaType' }) + private static async validateSchemaType(file: any, fileIndex: number): Promise { + const validSchemaTypes = Object.values(GATEWAY_TARGET_TYPES); + if (!validSchemaTypes.includes(file.schemaType as GATEWAY_TARGET_TYPES)) { + const errorMsg = `Invalid files[${fileIndex}].schemaType '${file.schemaType}' for file '${file.fileName}'. Must be one of: ${validSchemaTypes.join(', ')}`; + logger.error(`SchemaUploadValidator schema type validation failed: ${errorMsg}`); + throw new RequestValidationError(errorMsg); + } + } + + /** + * Validates that the file extension is compatible with the schema type (following TextUseCaseValidator pattern) + * @param file - The file object to validate + * @param fileIndex - The index of the file for error reporting + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateFileExtensionCompatibility' }) + private static async validateFileExtensionCompatibility(file: any, fileIndex: number): Promise { + const allowedExtensionsForSchema = SCHEMA_TYPE_FILE_EXTENSIONS[file.schemaType as GATEWAY_TARGET_TYPES]; + + if (!allowedExtensionsForSchema || !allowedExtensionsForSchema.includes(file.fileExtension)) { + const errorMsg = `Invalid files[${fileIndex}] file extension '${file.fileExtension}' for file '${file.fileName}' with schema type '${file.schemaType}'. Allowed extensions: ${allowedExtensionsForSchema?.join(', ') || 'none'}`; + logger.error(`SchemaUploadValidator file extension compatibility validation failed: ${errorMsg}`); + throw new RequestValidationError(errorMsg); + } + } + + /** + * Sets the appropriate content type based on file extension + * @param file - The file object to set content type for + * @param fileIndex - The index of the file for error reporting + * @throws RequestValidationError if extension is not supported + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###setContentType' }) + private static async setContentType(file: any, fileIndex: number): Promise { + switch (file.fileExtension.toLowerCase()) { + case '.json': + file.contentType = MCP_CONTENT_TYPES.JSON; + break; + case '.yaml': + case '.yml': + file.contentType = MCP_CONTENT_TYPES.YAML; + break; + case '.smithy': + file.contentType = MCP_CONTENT_TYPES.TEXT_PLAIN; + break; + default: + const errorMsg = `Unsupported file extension '${file.fileExtension}' for files[${fileIndex}] file '${file.fileName}'. Supported extensions: ${SUPPORTED_MCP_FILE_EXTENSIONS}`; + logger.error(`SchemaUploadValidator content type determination failed: ${errorMsg}`); + throw new RequestValidationError(errorMsg); + } + } +} + +export class MCPUsecaseValidator extends UseCaseValidator { + /** + * Validates a new MCP use case deployment. Will: + * - Validate required MCP parameters are present + * - Validate runtime parameters (EcrUri is required) + * - Validate gateway parameters if provided + * - Handle Cognito authentication configuration if present + * + * @param useCase - The MCP use case to validate + * @returns validated use case with values filled in where needed + * @throws if the config is invalid or cannot be validated for some reason + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateNewMCPUseCase' }) + public async validateNewUseCase(useCase: UseCase): Promise { + // Validate MCP-specific parameters + await MCPUsecaseValidator.validateMCPParams(useCase); + return useCase; + } + + /** + * Validates an updated MCP use case. Will: + * - Retrieve existing configuration from DynamoDB + * - Merge existing config with new config + * - Validate the merged configuration + * + * @param useCase - The MCP use case to validate + * @param oldDynamoDbRecordKey - The key of the old DynamoDB record + * @returns validated use case with values filled in where needed + * @throws if the config is invalid or cannot be validated for some reason + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateUpdateMCPUseCase' }) + public async validateUpdateUseCase(useCase: UseCase, oldDynamoDbRecordKey: string): Promise { + // retrieve the existing config from DynamoDB using a dummy use case object + let dummyOldUseCase = useCase.clone(); + dummyOldUseCase.setUseCaseConfigRecordKey(oldDynamoDbRecordKey); + const existingConfigObj = await this.useCaseConfigMgmt.getUseCaseConfigFromTable(dummyOldUseCase); + + // Custom merger that replaces arrays instead of merging them + // This is critical for TargetParams - we want to replace the entire array, not merge elements + const customizer = (_objValue: any, srcValue: any) => { + if (Array.isArray(srcValue)) { + return srcValue; // Replace arrays completely instead of merging + } + return undefined; // Let lodash handle the default merge behavior + }; + + // Merge existing config with new config using custom merger + useCase.configuration = _.mergeWith({}, existingConfigObj, useCase.configuration, customizer); + + logger.info(`Updated MCP configuration with ${(useCase.configuration as any).MCPParams?.GatewayParams?.TargetParams?.length || 0} targets`); + + // Validate the merged configuration + await MCPUsecaseValidator.validateMCPParams(useCase); + + return useCase; + } + + /** + * Validates MCP-specific parameters in the use case configuration + * + * @param useCase - The use case to validate + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateMCPParams' }) + private static async validateMCPParams(useCase: UseCase): Promise { + const mcpConfig = useCase.configuration as any; // Cast to access MCPParams + + if (!mcpConfig.MCPParams) { + throw new RequestValidationError('MCPParams is required for MCP use cases'); + } + + const mcpParams = mcpConfig.MCPParams; + // Validate that exactly one of GatewayParams or RuntimeParams is provided + const hasGatewayParams = !!mcpParams.GatewayParams; + const hasRuntimeParams = !!mcpParams.RuntimeParams; + + if (!hasGatewayParams && !hasRuntimeParams) { + throw new RequestValidationError( + 'Either GatewayParams or RuntimeParams must be provided for MCP use cases' + ); + } + + if (hasGatewayParams && hasRuntimeParams) { + throw new RequestValidationError('Only one of GatewayParams or RuntimeParams should be provided, not both'); + } + + // If RuntimeParams is provided, validate EcrUri is required + if (hasRuntimeParams) { + if ( + !mcpParams.RuntimeParams.EcrUri || + typeof mcpParams.RuntimeParams.EcrUri !== 'string' || + mcpParams.RuntimeParams.EcrUri.trim() === '' + ) { + throw new RequestValidationError( + 'ECR URI is required when deploying MCP servers with Agentcore Runtime' + ); + } + + // Validate EcrUri region + await MCPUsecaseValidator.validateEcrUri(mcpParams.RuntimeParams.EcrUri); + + // Validate environment variables if provided + if (mcpParams.RuntimeParams.EnvironmentVariables !== undefined) { + if (mcpParams.RuntimeParams.EnvironmentVariables === null) { + throw new RequestValidationError('Environment variables must be provided as an object'); + } + MCPUsecaseValidator.validateEnvironmentVariables(mcpParams.RuntimeParams.EnvironmentVariables); + } + } + + // If GatewayParams is provided, validate it (existing validation logic) + if (hasGatewayParams) { + await MCPUsecaseValidator.validateGatewayParams(mcpParams.GatewayParams); + } + } + + /** + * Validates gateway parameters if present + * + * @param gatewayParams - The gateway parameters to validate + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateGatewayParams' }) + private static async validateGatewayParams(gatewayParams: any): Promise { + // Validate optional gateway fields if present + await MCPUsecaseValidator.validateOptionalGatewayFields(gatewayParams); + + // TargetParams is now required + if (!gatewayParams.TargetParams) { + throw new RequestValidationError('Target parameters are required when using Gateway deployment mode'); + } + + if (!Array.isArray(gatewayParams.TargetParams)) { + throw new RequestValidationError('Target parameters must be provided as a list'); + } + + if (gatewayParams.TargetParams.length === 0) { + throw new RequestValidationError('At least one target must be configured for Gateway deployment'); + } + + for (const target of gatewayParams.TargetParams) { + await MCPUsecaseValidator.validateTargetRequiredFields(target); + + if (target.TargetType === GATEWAY_TARGET_TYPES.LAMBDA) + await MCPUsecaseValidator.validateLambdaTarget(target); + else if (target.TargetType === GATEWAY_TARGET_TYPES.OPEN_API) + await MCPUsecaseValidator.validateOpenApiTarget(target); + } + } + + /** + * Validates lambda target parameters + * + * @param target - The target parameters to validate + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateLambdaTarget' }) + private static async validateLambdaTarget(target: TargetParams): Promise { + if (typeof target.LambdaArn !== 'string' || target.LambdaArn.trim() === '') { + throw new RequestValidationError(`Lambda ARN is missing for target "${target.TargetName}"`); + } + + if (!isValidArnWithRegexKey(target.LambdaArn, 'lambda', 'lambda')) { + throw new RequestValidationError( + `Invalid Lambda ARN format for target "${target.TargetName}". Expected format: arn:aws:lambda:region:account:function:function-name` + ); + } + } + + /** + * Validates OpenAPI target parameters + * + * @param target - The target parameters to validate + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateOpenApiTarget' }) + private static async validateOpenApiTarget(target: TargetParams): Promise { + if (!target.OutboundAuthParams) { + throw new RequestValidationError(`Outbound authentication paramters are missing for the open api schema`); + } + await MCPUsecaseValidator.validateOutboundAuthParams(target.OutboundAuthParams, target.TargetName); + } + + /** + * Validates required target fields and schema URI format + * + * @param target - The target parameters to validate + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateTargetRequiredFields' }) + private static async validateTargetRequiredFields(target: TargetParams): Promise { + const { TargetName: targetName, TargetType: targetType, SchemaUri: schemaUri } = target; + + // Validate optional TargetId if present + if (target.TargetId !== undefined) { + await MCPUsecaseValidator.validateTargetId(target.TargetId, targetName); + } + + if (!targetName || typeof targetName !== 'string' || targetName.trim() === '') { + throw new RequestValidationError('Target name is required and cannot be empty'); + } + + if (/\s/.test(targetName)) { + throw new RequestValidationError('Target name must not contain spaces'); + } + + // Validate TargetType is required and valid (reusing mcp-validator pattern) + if (!targetType) { + throw new RequestValidationError(`Target type is required for target "${targetName}"`); + } + + const validTargetTypes = Object.values(GATEWAY_TARGET_TYPES); + if (!validTargetTypes.includes(targetType as GATEWAY_TARGET_TYPES)) { + throw new RequestValidationError( + `Invalid target type for "${targetName}". Must be one of: ${validTargetTypes.join(', ')}` + ); + } + + // SchemaUri is now required for ALL TargetTypes + if (!schemaUri || typeof schemaUri !== 'string' || schemaUri.trim() === '') { + throw new RequestValidationError(`Schema URI is missing for target "${targetName}"`); + } + + const schemaUriPattern = /^mcp\/schemas\/([^\/]+)\/([a-f0-9-]{36})\.([^.]+)$/; + const schemaUriMatch = schemaUri.match(schemaUriPattern); + + if (!schemaUriMatch) { + throw new RequestValidationError( + `Invalid schema URI format for target "${targetName}". Must follow pattern: mcp/schemas/{targetType}/{uuid}.{extension}` + ); + } + + const [, schemaTargetType, , fileExtension] = schemaUriMatch; + + // Validate that schema URI target type matches declared target type + if (schemaTargetType !== targetType) { + throw new RequestValidationError( + `Schema URI target type "${schemaTargetType}" does not match declared target type "${targetType}" for target "${targetName}"` + ); + } + + const allowedExtensionsForSchema = SCHEMA_TYPE_FILE_EXTENSIONS[targetType]; + const normalizedExtension = `.${fileExtension.toLowerCase()}`; + + if (!allowedExtensionsForSchema || !allowedExtensionsForSchema.includes(normalizedExtension)) { + throw new RequestValidationError( + `Invalid file extension '.${fileExtension}' for target "${targetName}" with schema type '${targetType}'. Allowed extensions: ${allowedExtensionsForSchema?.join(', ') || 'none' + }` + ); + } + } + + /** + * Validates outbound authentication parameters + * + * @param authParams - The outbound auth parameters to validate + * @param targetName - The name of the target for error reporting + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateOutboundAuthParams' }) + private static async validateOutboundAuthParams(authParams: any, targetName: string): Promise { + if ( + !authParams.OutboundAuthProviderArn || + typeof authParams.OutboundAuthProviderArn !== 'string' || + authParams.OutboundAuthProviderArn.trim() === '' + ) { + throw new RequestValidationError( + `Outbound authentication provider ARN is required for target "${targetName}"` + ); + } + + const isValidOAuth = isValidArnWithRegexKey( + authParams.OutboundAuthProviderArn, + 'bedrock-agentcore', + 'bedrock-agentcore-identity-OAUTH' + ); + const isValidApiKey = isValidArnWithRegexKey( + authParams.OutboundAuthProviderArn, + 'bedrock-agentcore', + 'bedrock-agentcore-identity-API_KEY' + ); + + if (!isValidOAuth && !isValidApiKey) { + throw new RequestValidationError( + `Invalid outbound authentication provider ARN format for target "${targetName}"` + ); + } + + if (!authParams.OutboundAuthProviderType) { + throw new RequestValidationError( + `Outbound authentication provider type is required for target "${targetName}"` + ); + } + + const validAuthTypes = Object.values(OUTBOUND_AUTH_PROVIDER_TYPES); + if (!validAuthTypes.includes(authParams.OutboundAuthProviderType)) { + throw new RequestValidationError( + `Invalid authentication type for target "${targetName}". Must be one of: ${validAuthTypes.join(', ')}` + ); + } + } + + /** + * Validates optional gateway fields if present + * + * @param gatewayParams - The gateway parameters to validate + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateOptionalGatewayFields' }) + private static async validateOptionalGatewayFields(gatewayParams: any): Promise { + this.validateGatewayIdIfPresent(gatewayParams.GatewayId); + this.validateGatewayArnIfPresent(gatewayParams.GatewayArn); + this.validateGatewayUrlIfPresent(gatewayParams.GatewayUrl); + this.validateGatewayNameIfPresent(gatewayParams.GatewayName); + } + + private static validateGatewayIdIfPresent(gatewayId: any): void { + if (gatewayId === undefined) return; + + if (typeof gatewayId !== 'string' || gatewayId.trim() === '') { + throw new RequestValidationError('GatewayId must be a non-empty string'); + } + } + + private static validateGatewayArnIfPresent(gatewayArn: any): void { + if (gatewayArn === undefined) return; + + if (typeof gatewayArn !== 'string' || gatewayArn.trim() === '') { + throw new RequestValidationError('GatewayArn must be a non-empty string'); + } + + if (!isValidArnWithRegexKey(gatewayArn, 'bedrock-agentcore', 'bedrock-agentcore-gateway')) { + throw new RequestValidationError( + 'GatewayArn must follow pattern: arn:aws:bedrock-agentcore:{region}:{AccountId}:gateway/{GatewayId}' + ); + } + } + + private static validateGatewayUrlIfPresent(gatewayUrl: any): void { + if (gatewayUrl === undefined) return; + + if (typeof gatewayUrl !== 'string' || gatewayUrl.trim() === '') { + throw new RequestValidationError('GatewayUrl must be a non-empty string'); + } + + const gatewayUrlPattern = + /^https:\/\/[a-zA-Z0-9-]+\.gateway\.bedrock-agentcore\.[a-z0-9-]+\.amazonaws\.com\/mcp$/; + if (!gatewayUrlPattern.test(gatewayUrl)) { + throw new RequestValidationError( + 'GatewayUrl must follow pattern: https://{GatewayId}.gateway.bedrock-agentcore.{Region}.amazonaws.com/mcp' + ); + } + } + + private static validateGatewayNameIfPresent(gatewayName: any): void { + if (gatewayName === undefined) return; + + if (typeof gatewayName !== 'string' || gatewayName.trim() === '') { + throw new RequestValidationError('GatewayName must be a non-empty string'); + } + } + + /** + * Validates TargetId if present + * + * @param targetId - The target ID to validate + * @param targetName - The target name for error reporting + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateTargetId' }) + private static async validateTargetId(targetId: any, targetName: string): Promise { + if (typeof targetId !== 'string' || targetId.trim() === '') { + throw new RequestValidationError(`TargetId must be a non-empty string for target "${targetName}"`); + } + + const targetIdPattern = /^[A-Z0-9]{10}$/; + if (!targetIdPattern.test(targetId)) { + throw new RequestValidationError( + `TargetId must be exactly 10 uppercase alphanumeric characters for target "${targetName}"` + ); + } + + if (targetId.length !== 10) { + throw new RequestValidationError(`TargetId must be exactly 10 characters long for target "${targetName}"`); + } + } + + /** + * Validates ECR URI region matches the deployment region + * + * @param ecrUri - The ECR URI to validate + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateEcrUri' }) + private static async validateEcrUri(ecrUri: string): Promise { + const ecrRegion = ecrUri.match(/\.dkr\.ecr\.([a-z\d-]+)\.amazonaws\.com\//)![1]; + const currentRegion = process.env.AWS_REGION; + + // Validate region matches deployment region + if (currentRegion && ecrRegion !== currentRegion) { + throw new RequestValidationError( + `ECR image must be in the same region (${currentRegion}) as the deployment.` + ); + } + } + + /** + * Validates environment variables for runtime parameters + * + * @param environmentVariables - The environment variables object to validate + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateEnvironmentVariables' }) + private static validateEnvironmentVariables(environmentVariables: { [key: string]: string }): void { + const envVarEntries = Object.entries(environmentVariables); + + // Check maximum number of environment variables (AWS Lambda limit is 4KB total, we'll limit to 50 variables) + if (envVarEntries.length > 50) { + throw new RequestValidationError('Maximum of 50 environment variables are allowed'); + } + + // Validate each environment variable + for (const [key, value] of envVarEntries) { + // Validate environment variable name + if (typeof key !== 'string' || key.trim() === '') { + throw new RequestValidationError('Environment variable names cannot be empty'); + } + + // Environment variable names must follow AWS naming conventions + const envVarNamePattern = /^[a-zA-Z_]\w*$/; + if (!envVarNamePattern.test(key)) { + throw new RequestValidationError( + `Invalid environment variable name "${key}". Names must start with a letter or underscore and contain only letters, numbers, and underscores` + ); + } + + // Check name length (AWS limit is 256 characters) + if (key.length > 256) { + throw new RequestValidationError( + `Environment variable name "${key}" exceeds maximum length of 256 characters` + ); + } + + // Validate environment variable value + if (typeof value !== 'string') { + throw new RequestValidationError(`Environment variable value for "${key}" must be a string`); + } + + // Check value length (AWS limit is 4KB per variable, we'll be more conservative) + if (value.length > 2048) { + throw new RequestValidationError( + `Environment variable value for "${key}" exceeds maximum length of 2048 characters` + ); + } + } + + // Calculate total size of environment variables (approximate) + const totalSize = envVarEntries.reduce((sum, [key, value]) => sum + key.length + value.length, 0); + if (totalSize > 4096) { + // 4KB limit + throw new RequestValidationError( + 'Total size of environment variables exceeds 4KB limit. Please reduce the number or size of environment variables' + ); + } + } +} diff --git a/source/lambda/use-case-management/model/validators/text-validator.ts b/source/lambda/use-case-management/model/validators/text-validator.ts new file mode 100644 index 00000000..5114c4df --- /dev/null +++ b/source/lambda/use-case-management/model/validators/text-validator.ts @@ -0,0 +1,106 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { AUTHENTICATION_PROVIDERS, CfnParameterKeys } from '../../utils/constants'; +import { tracer } from '../../power-tools-init'; +import { UseCase } from '../use-case'; +import { UseCaseConfiguration } from '../types'; +import { UseCaseValidator } from './base-validator'; +import { ConfigMergeUtils } from './config-merge-utils'; +import { ValidationUtils, getCognitoDomainPrefixByUserPool } from './validation-utils'; + +/** + * Validator for Text/Chat use cases. + * Handles validation of LLM parameters, prompts, knowledge base configuration, + * and authentication settings specific to text-based use cases. + */ +export class TextUseCaseValidator extends UseCaseValidator { + /** + * Validates a use case meant for a new text deployment fills in values as required. Will: + * - Check the model info database to ensure provider/modelid combination is valid + * - Populate a default prompt if none is provided + * + * @param useCase a use case to validate + * @returns validated use case with values filled in where needed + * @throws if the use case is invalid or cannot be validated for some reason + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateNewTextUseCase' }) + public async validateNewUseCase(useCase: UseCase): Promise { + const config = this.getTypedConfiguration(useCase); + const modelInfo = await this.storageMgmt.getModelInfo(useCase); // will throw if provider/model id combo does not exist + + if (!config.LlmParams!.PromptParams) { + config.LlmParams!.PromptParams = {}; + } + if (!config.LlmParams!.PromptParams.PromptTemplate) { + config.LlmParams!.PromptParams.PromptTemplate = modelInfo.Prompt; + } + if (!config.LlmParams!.PromptParams.DisambiguationPromptTemplate) { + config.LlmParams!.PromptParams.DisambiguationPromptTemplate = modelInfo.DisambiguationPrompt; + } + + if (config.AuthenticationParams) { + // prettier-ignore + switch (config.AuthenticationParams.AuthenticationProvider) { //NOSONAR - typescript:S1301, switch statement used for ease of future extensions + case AUTHENTICATION_PROVIDERS.COGNITO: + // overriding the previously set CognitoDomainPrefix parameter + // by fetching it dynamically based on the set user pool + + const existingUserPoolId = useCase.cfnParameters?.get(CfnParameterKeys.ExistingCognitoUserPoolId); + if (!existingUserPoolId) { + throw new Error('Undefined user pool provided for the cognito authentication provider.'); + } + + const cognitoDomainPrefix = await getCognitoDomainPrefixByUserPool(existingUserPoolId); + + if (!useCase.cfnParameters) { + throw new Error('CFNParameters are not available yet for setting Cognito Domain Prefix.'); + } + + useCase.cfnParameters.set(CfnParameterKeys.CognitoDomainPrefix, cognitoDomainPrefix); + + break; + } + } + + ValidationUtils.checkModelInputPayloadSchema(config); + ValidationUtils.checkPromptsAreCompatible(config); + ValidationUtils.checkPromptIsEscaped(config); + ValidationUtils.checkKnowledgeBaseTypeMatchesParams(config); + + return useCase; + } + + /** + * Validates a use case meant for an update fills in values as required. Will: + * - Check the model info database to ensure provider/modelid combination is valid + * + * @param useCase a use case to validate + * @param oldDynamoDbRecordKey the key of the old DynamoDB record. Used to retrieve the existing config and merge with the new one. + * @returns validated use case with values filled in where needed + * @throws if the use case is invalid or cannot be validated for some reason + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateUpdateTextUseCase' }) + public async validateUpdateUseCase(useCase: UseCase, oldDynamoDbRecordKey: string): Promise { + // retrieve the existing config from DynamoDB using a dummy use case object + let dummyOldUseCase = useCase.clone(); + dummyOldUseCase.setUseCaseConfigRecordKey(oldDynamoDbRecordKey); + const existingConfigObj = await this.useCaseConfigMgmt.getUseCaseConfigFromTable(dummyOldUseCase); + // this await is required for this to work on lambda, despite it seeming unnecessary here + useCase.configuration = await ConfigMergeUtils.mergeConfigs(existingConfigObj, useCase.configuration); + const config = this.getTypedConfiguration(useCase); + + await this.storageMgmt.getModelInfo(useCase); // will throw if provider/model id combo does not exist + ValidationUtils.checkModelInputPayloadSchema(config); + ValidationUtils.checkPromptsAreCompatible(config); + ValidationUtils.checkPromptIsEscaped(config); + ValidationUtils.checkKnowledgeBaseTypeMatchesParams(config); + + return useCase; + } + + // Static methods for backward compatibility with tests + public static mergeConfigs = ConfigMergeUtils.mergeConfigs; + public static resolveBedrockModelSourceOnUpdate = ConfigMergeUtils.resolveBedrockModelSourceOnUpdate; + public static resolveKnowledgeBaseParamsOnUpdate = ConfigMergeUtils.resolveKnowledgeBaseParamsOnUpdate; +} diff --git a/source/lambda/use-case-management/model/validators/validation-utils.ts b/source/lambda/use-case-management/model/validators/validation-utils.ts new file mode 100644 index 00000000..9adf8ae5 --- /dev/null +++ b/source/lambda/use-case-management/model/validators/validation-utils.ts @@ -0,0 +1,224 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { CognitoIdentityProviderClient, DescribeUserPoolCommand } from '@aws-sdk/client-cognito-identity-provider'; +import { AWSClientManager } from 'aws-sdk-lib'; +import _ from 'lodash'; +import { logger, tracer } from '../../power-tools-init'; +import { + ChatRequiredPlaceholders, + CHAT_PROVIDERS, + DisambiguationRequiredPlaceholders, + KnowledgeBaseTypes, + RAGChatRequiredPlaceholders +} from '../../utils/constants'; +import RequestValidationError from '../../utils/error'; +import { UseCaseConfiguration } from '../types'; + +/** + * Utility class containing shared validation methods used across different validators. + * This promotes code reuse and maintains consistency in validation logic. + */ +export class ValidationUtils { + /** + * Checks that the provided prompt is valid given the configuration. + * Namely, correct placeholders are present for the given RAG configuration. + * + * @param config configuration to check + * @throws if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###checkModelInputPayloadSchema' }) + static checkModelInputPayloadSchema(config: UseCaseConfiguration): void { + const modelInputPayloadSchema = config.LlmParams!.SageMakerLlmParams?.ModelInputPayloadSchema; + if (modelInputPayloadSchema !== undefined) { + // finds all the placeholders ("<>") in the payload schema + const regex = /<<\w+>>/g; + const reservedPlaceholders = ['<>', '<>']; + const modelInputPayloadSchemaStr = JSON.stringify(modelInputPayloadSchema); + const matches = modelInputPayloadSchemaStr.match(regex); + const modelParams = config.LlmParams!.ModelParams; + + if (matches) { + // reserved placeholders aren't provided in the model parameters + _.remove(matches as string[], (match: string) => { + return reservedPlaceholders.includes(match); + }); + + matches.forEach((match) => { + const key = match.replace('<<', '').replace('>>', ''); + if (modelParams === undefined) { + throw new RequestValidationError( + 'No model parameters were provided in the useCase despite requiring parameters in the input payload schema.' + ); + } else if (modelParams[key as keyof Object] !== undefined) { + return; + } else { + throw new RequestValidationError( + `InvalidModelParameter: ${key} is not a valid model parameter present in the Model Parameters` + ); + } + }); + } + } + } + + /** + * Checks that the provided prompt is valid given the configuration. + * Namely, correct placeholders are present for given RAG configuration. + * + * @param config configuration to check + * @throws if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###checkPromptsAreCompatible' }) + static checkPromptsAreCompatible(config: UseCaseConfiguration): void { + //validate main prompt template + const promptTemplate = config.LlmParams!.PromptParams!.PromptTemplate!; + const chat_provider = config.LlmParams!.ModelProvider; + const requiredPlaceholders = config.LlmParams!.RAGEnabled + ? RAGChatRequiredPlaceholders[chat_provider as CHAT_PROVIDERS] + : ChatRequiredPlaceholders[chat_provider as CHAT_PROVIDERS]; + + requiredPlaceholders.forEach((placeholder: string) => { + if (!promptTemplate.includes(placeholder)) { + throw new RequestValidationError( + `Provided prompt template does not have the required placeholder '${placeholder}'.` + ); + } + + if (promptTemplate.indexOf(placeholder) !== promptTemplate.lastIndexOf(placeholder)) { + throw new RequestValidationError( + `Placeholder '${placeholder}' should appear only once in the prompt template.` + ); + } + }); + + if (config.LlmParams!.RAGEnabled && config.LlmParams!.PromptParams?.DisambiguationEnabled) { + const disambiguationPromptTemplate = config.LlmParams!.PromptParams.DisambiguationPromptTemplate!; + + DisambiguationRequiredPlaceholders.forEach((placeholder: string) => { + if (!disambiguationPromptTemplate.includes(placeholder)) { + throw new RequestValidationError( + `Provided disambiguation prompt template does not have the required placeholder '${placeholder}'.` + ); + } + + if ( + disambiguationPromptTemplate.indexOf(placeholder) !== + disambiguationPromptTemplate.lastIndexOf(placeholder) + ) { + throw new RequestValidationError( + `Placeholder '${placeholder}' should appear only once in the disambiguation prompt template.` + ); + } + }); + } + } + + /** + * In order for a prompt to contain curly braces (e.g. providing code or JSON data in the prompt), + * LangChain requires they are escaped by being doubled ({{ }} rather than {}), + * so as to not interfere with the placeholders (e.g. history, etc.) + * + * @param config configuration to check + * @throws if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###checkPromptIsEscaped' }) + static checkPromptIsEscaped(config: UseCaseConfiguration): void { + // removes all the placeholders, which are valid uses of unescaped curly braces + let promptTemplate = config.LlmParams!.PromptParams!.PromptTemplate!; + const chat_provider = config.LlmParams!.ModelProvider!; + const requiredPlaceholders = config.LlmParams!.RAGEnabled + ? RAGChatRequiredPlaceholders[chat_provider as CHAT_PROVIDERS] + : ChatRequiredPlaceholders[chat_provider as CHAT_PROVIDERS]; + + requiredPlaceholders.forEach((placeholder: string) => { + promptTemplate = promptTemplate.replace(placeholder, ''); + }); + + // ensure both types of braces are escaped (doubled), per langchain standards + const escapableCharacters = ['{', '}']; + escapableCharacters.forEach((char) => { + let index = 0; + while (index < promptTemplate.length) { + const charIndex = promptTemplate.indexOf(char, index); + + if (charIndex === -1) { + // No more curly braces found + break; + } + + if (promptTemplate.charAt(charIndex + 1) !== char) { + throw new RequestValidationError(`Prompt template contains an unescaped curly brace '${char}'`); + } else { + index = charIndex + 2; + } + } + }); + } + + /** + * Checks that the selected KnowledgeBaseType is compatible with the KnowledgeBaseParams provided + * + * @param config configuration to check + * @throws if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###checkKnowledgeBaseTypeMatchesParams' }) + static checkKnowledgeBaseTypeMatchesParams(config: UseCaseConfiguration): void { + if (config.LlmParams?.RAGEnabled) { + const knowledgeBaseType = config.KnowledgeBaseParams!.KnowledgeBaseType!; + + let typeSpecificRequiredParamsObject: string = ''; + + switch (knowledgeBaseType) { + case KnowledgeBaseTypes.KENDRA: + if (config.KnowledgeBaseParams!.KendraKnowledgeBaseParams !== undefined) { + return; + } else { + typeSpecificRequiredParamsObject = 'KendraKnowledgeBaseParams'; + } + break; + case KnowledgeBaseTypes.BEDROCK: + if (config.KnowledgeBaseParams!.BedrockKnowledgeBaseParams !== undefined) { + return; + } else { + typeSpecificRequiredParamsObject = 'BedrockKnowledgeBaseParams'; + } + break; + default: + throw new RequestValidationError( + `Provided knowledge base type ${knowledgeBaseType} is not supported. You should not get this error.` + ); + } + + throw new RequestValidationError( + `Provided knowledge base type ${knowledgeBaseType} requires ${typeSpecificRequiredParamsObject} to be present in KnowledgeBaseParams.` + ); + } + } +} + +/** + * Utility function to get Cognito domain prefix by user pool ID. + * Separated from the main validator classes for reusability. + * + * @param userPoolId - The Cognito User Pool ID + * @returns The domain prefix for the user pool + * @throws Error if the user pool is not found or has no domain + */ +export async function getCognitoDomainPrefixByUserPool(userPoolId: string): Promise { + const client = AWSClientManager.getServiceClient('cognito', tracer); + + try { + const command = new DescribeUserPoolCommand({ UserPoolId: userPoolId }); + const response = await client.send(command); + + if (response?.UserPool?.Domain) { + return response.UserPool.Domain; + } else { + throw new Error('No domain found for this user pool.'); + } + } catch (error) { + logger.error(`Error fetching user pool details. Error: ${error}`); + throw error; + } +} diff --git a/source/lambda/use-case-management/model/validators/validator-factory.ts b/source/lambda/use-case-management/model/validators/validator-factory.ts new file mode 100644 index 00000000..b6ec8183 --- /dev/null +++ b/source/lambda/use-case-management/model/validators/validator-factory.ts @@ -0,0 +1,50 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { StorageManagement } from '../../ddb/storage-management'; +import { UseCaseConfigManagement } from '../../ddb/use-case-config-management'; +import { UseCaseTypes } from '../../utils/constants'; +import { AgentBuilderUseCaseValidator } from './agent-builder-validator'; +import { AgentUseCaseValidator } from './agent-validator'; +import { UseCaseValidator } from './base-validator'; +import { TextUseCaseValidator } from './text-validator'; +import { MCPUsecaseValidator } from './mcp-validator'; +import { WorkflowUseCaseValidator } from './workflow-validator'; + +/** + * Factory class for creating appropriate validators based on use case type. + * This centralizes validator creation and makes it easy to add new use case types. + */ +export class ValidatorFactory { + /** + * Factory method to create the appropriate validator based on the use case type. + * + * @param useCaseType - The type of use case (e.g., 'Text', 'Agent', 'AgentBuilder', 'Workflow') + * @param storageMgmt - The storage management instance + * @param useCaseConfigMgmt - The use case configuration management instance + * @returns An instance of the appropriate UseCaseValidator subclass + * @throws Error if an invalid use case type is provided + */ + static createValidator( + useCaseType: string, + storageMgmt: StorageManagement, + useCaseConfigMgmt: UseCaseConfigManagement + ): UseCaseValidator { + switch (useCaseType) { + case UseCaseTypes.CHAT: + return new TextUseCaseValidator(storageMgmt, useCaseConfigMgmt); + case UseCaseTypes.RAGChat: + return new TextUseCaseValidator(storageMgmt, useCaseConfigMgmt); + case UseCaseTypes.AGENT: + return new AgentUseCaseValidator(storageMgmt, useCaseConfigMgmt); + case UseCaseTypes.AGENT_BUILDER: + return new AgentBuilderUseCaseValidator(storageMgmt, useCaseConfigMgmt); + case UseCaseTypes.MCP_SERVER: + return new MCPUsecaseValidator(storageMgmt, useCaseConfigMgmt); + case UseCaseTypes.WORKFLOW: + return new WorkflowUseCaseValidator(storageMgmt, useCaseConfigMgmt); + default: + throw new Error(`Invalid use case type: ${useCaseType}`); + } + } +} diff --git a/source/lambda/use-case-management/model/validators/workflow-validator.ts b/source/lambda/use-case-management/model/validators/workflow-validator.ts new file mode 100644 index 00000000..3617b1b6 --- /dev/null +++ b/source/lambda/use-case-management/model/validators/workflow-validator.ts @@ -0,0 +1,208 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { + AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH, + AUTHENTICATION_PROVIDERS, + CfnParameterKeys, + CHAT_PROVIDERS, + SUPPORTED_WORKFLOW_ORCHESTRATION_PATTERNS, + UseCaseTypes, + WORKFLOW_ORCHESTRATION_PATTERNS +} from '../../utils/constants'; +import RequestValidationError from '../../utils/error'; +import { tracer } from '../../power-tools-init'; +import { UseCase } from '../use-case'; +import { WorkflowUseCaseConfiguration, AgentBuilderUseCaseConfiguration } from '../types'; +import { UseCaseValidator } from './base-validator'; +import { ConfigMergeUtils } from './config-merge-utils'; +import { getCognitoDomainPrefixByUserPool } from './validation-utils'; + +/** + * Validator for Workflow use cases. + * Handles validation of workflow specific parameters including orchestration patterns, + * system prompts, and selected agents. + */ +export class WorkflowUseCaseValidator extends UseCaseValidator { + /** + * Validates a new Workflow use case. + * Ensures all required parameters are present and valid for workflow deployment. + * + * @param useCase - The Workflow use case to validate + * @returns A promise that resolves to the validated use case + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateNewWorkflowUseCase' }) + public async validateNewUseCase(useCase: UseCase): Promise { + const config = this.getTypedConfiguration(useCase); + + // Validate that required WorkflowParams are present + if (!config.WorkflowParams?.OrchestrationPattern) { + throw new RequestValidationError('OrchestrationPattern is required for Workflow use cases.'); + } + + // Validate LLM provider is Bedrock + if (config.LlmParams?.ModelProvider !== CHAT_PROVIDERS.BEDROCK) { + throw new RequestValidationError('Workflow use cases must use BEDROCK as the ModelProvider.'); + } + + // Validate system prompt is provided and within length limits + if (!config.WorkflowParams?.SystemPrompt) { + throw new RequestValidationError('SystemPrompt is required for Workflow use cases.'); + } + if (config.WorkflowParams.SystemPrompt.length > AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH) { + throw new RequestValidationError( + `SystemPrompt exceeds maximum length of ${AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH} characters.` + ); + } + + // Validate orchestration pattern + this.validateOrchestrationPattern(config.WorkflowParams.OrchestrationPattern); + + // Validate agents as tools if provided + if (config.WorkflowParams.AgentsAsToolsParams?.Agents) { + this.validateAgentsAsTools(config.WorkflowParams.AgentsAsToolsParams.Agents); + } + + // Handle authentication configuration + await this.handleAuthenticationConfig(useCase, config); + + return useCase; + } + + /** + * Validates an updated Workflow use case. + * Merges existing configuration with new configuration and validates the result. + * + * @param useCase - The Workflow use case to validate + * @param oldDynamoDbRecordKey - The key of the old DynamoDB record + * @returns A promise that resolves to the validated use case + * @throws RequestValidationError if validation fails + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###validateUpdateWorkflowUseCase' }) + public async validateUpdateUseCase(useCase: UseCase, oldDynamoDbRecordKey: string): Promise { + // retrieve the existing config from DynamoDB using a dummy use case object + let dummyOldUseCase = useCase.clone(); + dummyOldUseCase.setUseCaseConfigRecordKey(oldDynamoDbRecordKey); + const existingConfigObj = await this.useCaseConfigMgmt.getUseCaseConfigFromTable(dummyOldUseCase); + + // Merge existing config with new config using Workflow specific merge logic + useCase.configuration = await ConfigMergeUtils.mergeWorkflowConfigs(existingConfigObj, useCase.configuration); + const config = this.getTypedConfiguration(useCase); + + // Validate system prompt is provided and within length limits + if (!config.WorkflowParams?.SystemPrompt) { + throw new RequestValidationError('SystemPrompt is required for Workflow use cases.'); + } + if (config.WorkflowParams.SystemPrompt.length > AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH) { + throw new RequestValidationError( + `SystemPrompt exceeds maximum length of ${AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH} characters.` + ); + } + + // Validate orchestration pattern + if (!config.WorkflowParams?.OrchestrationPattern) { + throw new RequestValidationError('OrchestrationPattern is required for Workflow use cases.'); + } + this.validateOrchestrationPattern(config.WorkflowParams.OrchestrationPattern); + + // Validate agents as tools if provided + if (config.WorkflowParams.OrchestrationPattern === WORKFLOW_ORCHESTRATION_PATTERNS.AGENTS_AS_TOOLS) { + if (!config.WorkflowParams.AgentsAsToolsParams?.Agents) { + throw new RequestValidationError('AgentsAsToolsParams.Agents is required for Workflow use cases.'); + } + this.validateAgentsAsTools(config.WorkflowParams.AgentsAsToolsParams.Agents); + } + + return useCase; + } + + /** + * Validates orchestration pattern format and content. + * + * @param orchestrationPattern - The orchestration pattern string + * @throws RequestValidationError if validation fails + */ + private validateOrchestrationPattern(orchestrationPattern: string): void { + if (typeof orchestrationPattern !== 'string' || orchestrationPattern.trim().length === 0) { + throw new RequestValidationError('OrchestrationPattern must be a non-empty string.'); + } + + if (!SUPPORTED_WORKFLOW_ORCHESTRATION_PATTERNS.includes(orchestrationPattern)) { + throw new RequestValidationError( + `Unsupported OrchestrationPattern: ${orchestrationPattern}. Supported patterns: ${SUPPORTED_WORKFLOW_ORCHESTRATION_PATTERNS.join(', ')}` + ); + } + } + + /** + * Validates agents as tools array format and content. + * + * @param agents - Array of agent configurations for use as tools + * @throws RequestValidationError if validation fails + */ + private validateAgentsAsTools( + agents: Array< + Pick< + AgentBuilderUseCaseConfiguration, + 'UseCaseType' | 'UseCaseName' | 'UseCaseDescription' | 'AgentBuilderParams' | 'LlmParams' + > + > + ): void { + if (!Array.isArray(agents) || agents.length === 0) { + throw new RequestValidationError('Agents must be a non-empty array.'); + } + + agents.forEach((agent, index) => { + if (!agent.UseCaseName || typeof agent.UseCaseName !== 'string' || agent.UseCaseName.trim().length === 0) { + throw new RequestValidationError( + `Agents[${index}].UseCaseName is required and must be a non-empty string.` + ); + } + + if (!agent.UseCaseType || typeof agent.UseCaseType !== 'string' || agent.UseCaseType.trim().length === 0) { + throw new RequestValidationError( + `Agents[${index}].UseCaseType is required and must be a non-empty string.` + ); + } + + //currently only support for other agents + if (agent.UseCaseType !== UseCaseTypes.AGENT_BUILDER) { + throw new RequestValidationError( + `Agents[${index}].UseCaseType must be '${UseCaseTypes.AGENT_BUILDER}'` + ); + } + }); + } + + /** + * Handles authentication configuration for Workflow use cases. + * + * @param useCase - The use case being validated + * @param config - The typed configuration + */ + private async handleAuthenticationConfig(useCase: UseCase, config: WorkflowUseCaseConfiguration): Promise { + if (config.AuthenticationParams) { + // prettier-ignore + switch (config.AuthenticationParams.AuthenticationProvider) { //NOSONAR - typescript:S1301, switch statement used for ease of future extensions + case AUTHENTICATION_PROVIDERS.COGNITO: + // overriding the previously set CognitoDomainPrefix parameter + // by fetching it dynamically based on the set user pool + + const existingUserPoolId = useCase.cfnParameters?.get(CfnParameterKeys.ExistingCognitoUserPoolId); + if (!existingUserPoolId) { + throw new Error('Undefined user pool provided for the cognito authentication provider.'); + } + + const cognitoDomainPrefix = await getCognitoDomainPrefixByUserPool(existingUserPoolId); + + if (!useCase.cfnParameters) { + throw new Error('CfnParameters are not available yet for setting Cognito Domain Prefix.'); + } + + useCase.cfnParameters.set(CfnParameterKeys.CognitoDomainPrefix, cognitoDomainPrefix); + break; + } + } + } +} diff --git a/source/lambda/use-case-management/package-lock.json b/source/lambda/use-case-management/package-lock.json index 4df7f7c2..18e05e2b 100644 --- a/source/lambda/use-case-management/package-lock.json +++ b/source/lambda/use-case-management/package-lock.json @@ -1,12 +1,12 @@ { "name": "@amzn/use-case-management", - "version": "3.0.7", + "version": "4.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@amzn/use-case-management", - "version": "3.0.7", + "version": "4.0.0", "license": "Apache-2.0", "dependencies": { "aws-jwt-verify": "^5.0.0" @@ -22,68 +22,51 @@ "eslint": "^9.16.0", "jest": "^29.7.0", "lodash": "^4.17.21", - "prettier": "^3.4.2", + "prettier": "^3.6.2", "ts-jest": "^29.2.5", "ts-node": "^10.9.2", "typescript": "^5.7.2" } }, - "node_modules/@ampproject/remapping": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", - "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.5", - "@jridgewell/trace-mapping": "^0.3.24" - }, - "engines": { - "node": ">=6.0.0" - } - }, "node_modules/@babel/code-frame": { - "version": "7.26.2", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", - "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/helper-validator-identifier": "^7.25.9", + "@babel/helper-validator-identifier": "^7.27.1", "js-tokens": "^4.0.0", - "picocolors": "^1.0.0" + "picocolors": "^1.1.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/compat-data": { - "version": "7.26.8", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.26.8.tgz", - "integrity": "sha512-oH5UPLMWR3L2wEFLnFJ1TZXqHufiTKAiLfqw5zkhS4dKXLJ10yVztfil/twG8EDTA4F/tvVNw9nOl4ZMslB8rQ==", + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.4.tgz", + "integrity": "sha512-YsmSKC29MJwf0gF8Rjjrg5LQCmyh+j/nD8/eP7f+BeoQTKYqs9RoWbjGOdy0+1Ekr68RJZMUOPVQaQisnIo4Rw==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/core": { - "version": "7.26.10", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.10.tgz", - "integrity": "sha512-vMqyb7XCDMPvJFFOaT9kxtiRh42GwlZEg1/uIgtZshS5a/8OaduUfCi7kynKgc3Tw/6Uo2D+db9qBttghhmxwQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.26.2", - "@babel/generator": "^7.26.10", - "@babel/helper-compilation-targets": "^7.26.5", - "@babel/helper-module-transforms": "^7.26.0", - "@babel/helpers": "^7.26.10", - "@babel/parser": "^7.26.10", - "@babel/template": "^7.26.9", - "@babel/traverse": "^7.26.10", - "@babel/types": "^7.26.10", + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.4.tgz", + "integrity": "sha512-2BCOP7TN8M+gVDj7/ht3hsaO/B/n5oDbiAyyvnRlNOs+u1o+JWNYTQrmpuNp1/Wq2gcFrI01JAW+paEKDMx/CA==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.3", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.28.3", + "@babel/helpers": "^7.28.4", + "@babel/parser": "^7.28.4", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.28.4", + "@babel/types": "^7.28.4", + "@jridgewell/remapping": "^2.3.5", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", @@ -103,22 +86,20 @@ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, - "license": "ISC", "bin": { "semver": "bin/semver.js" } }, "node_modules/@babel/generator": { - "version": "7.27.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.27.0.tgz", - "integrity": "sha512-VybsKvpiN1gU1sdMZIp7FcqphVVKEwcuj02x73uvcHE0PTihx1nlBcowYWhDwjpoAXRv43+gDzyggGnn1XZhVw==", + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.3.tgz", + "integrity": "sha512-3lSpxGgvnmZznmBkCRnVREPUFJv2wrv9iAoFDvADJc0ypmdOxdUtcLeBgBJ6zE0PMeTKnxeQzyk0xTBq4Ep7zw==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/parser": "^7.27.0", - "@babel/types": "^7.27.0", - "@jridgewell/gen-mapping": "^0.3.5", - "@jridgewell/trace-mapping": "^0.3.25", + "@babel/parser": "^7.28.3", + "@babel/types": "^7.28.2", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" }, "engines": { @@ -126,14 +107,13 @@ } }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.27.0", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.0.tgz", - "integrity": "sha512-LVk7fbXml0H2xH34dFzKQ7TDZ2G4/rVTOrq9V+icbbadjbVxxeFeDsNHv2SrZeWoA+6ZiTyWYWtScEIW07EAcA==", + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz", + "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/compat-data": "^7.26.8", - "@babel/helper-validator-option": "^7.25.9", + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", "browserslist": "^4.24.0", "lru-cache": "^5.1.1", "semver": "^6.3.1" @@ -147,35 +127,41 @@ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, - "license": "ISC", "bin": { "semver": "bin/semver.js" } }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@babel/helper-module-imports": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz", - "integrity": "sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/traverse": "^7.25.9", - "@babel/types": "^7.25.9" + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-transforms": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz", - "integrity": "sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==", + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz", + "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/helper-module-imports": "^7.25.9", - "@babel/helper-validator-identifier": "^7.25.9", - "@babel/traverse": "^7.25.9" + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.28.3" }, "engines": { "node": ">=6.9.0" @@ -185,67 +171,61 @@ } }, "node_modules/@babel/helper-plugin-utils": { - "version": "7.26.5", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.26.5.tgz", - "integrity": "sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz", + "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz", - "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", - "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz", + "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-option": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz", - "integrity": "sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helpers": { - "version": "7.27.0", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.0.tgz", - "integrity": "sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg==", + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz", + "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/template": "^7.27.0", - "@babel/types": "^7.27.0" + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.4" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/parser": { - "version": "7.27.0", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.0.tgz", - "integrity": "sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg==", + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.4.tgz", + "integrity": "sha512-yZbBqeM6TkpP9du/I2pUZnJsRMGGvOuIrhjzC1AwHwW+6he4mni6Bp/m8ijn0iOuZuPI2BfkCoSRunpyjnrQKg==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/types": "^7.27.0" + "@babel/types": "^7.28.4" }, "bin": { "parser": "bin/babel-parser.js" @@ -259,7 +239,6 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -272,7 +251,6 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -285,7 +263,6 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.12.13" }, @@ -298,7 +275,6 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.14.5" }, @@ -310,13 +286,12 @@ } }, "node_modules/@babel/plugin-syntax-import-attributes": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.26.0.tgz", - "integrity": "sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz", + "integrity": "sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -330,7 +305,6 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -343,7 +317,6 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -352,13 +325,12 @@ } }, "node_modules/@babel/plugin-syntax-jsx": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.25.9.tgz", - "integrity": "sha512-ld6oezHQMZsZfp6pWtbjaNDF2tiiCYYDqQszHt5VV437lewP9aSi2Of99CK0D0XB21k7FLgnLcmQKyKzynfeAA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz", + "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -372,7 +344,6 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -385,7 +356,6 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -398,7 +368,6 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -411,7 +380,6 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -424,7 +392,6 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -437,7 +404,6 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -450,7 +416,6 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.14.5" }, @@ -466,7 +431,6 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-plugin-utils": "^7.14.5" }, @@ -478,13 +442,12 @@ } }, "node_modules/@babel/plugin-syntax-typescript": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.25.9.tgz", - "integrity": "sha512-hjMgRy5hb8uJJjUcdWunWVcoi9bGpJp8p5Ol1229PoN6aytsLwNMgmdftO23wnCLMfVmTwZDWMPNq/D1SY60JQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz", + "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -494,58 +457,45 @@ } }, "node_modules/@babel/template": { - "version": "7.27.0", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.0.tgz", - "integrity": "sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA==", + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.26.2", - "@babel/parser": "^7.27.0", - "@babel/types": "^7.27.0" + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.27.0", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.27.0.tgz", - "integrity": "sha512-19lYZFzYVQkkHkl4Cy4WrAVcqBkgvV2YM2TU3xG6DIwO7O3ecbDPfW3yM3bjAGcqcQHi+CCtjMR3dIEHxsd6bA==", + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.4.tgz", + "integrity": "sha512-YEzuboP2qvQavAcjgQNVgsvHIDv6ZpwXvcvjmyySP2DIMuByS/6ioU5G9pYrWHM6T2YDfc7xga9iNzYOs12CFQ==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.26.2", - "@babel/generator": "^7.27.0", - "@babel/parser": "^7.27.0", - "@babel/template": "^7.27.0", - "@babel/types": "^7.27.0", - "debug": "^4.3.1", - "globals": "^11.1.0" + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.3", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.4", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.4", + "debug": "^4.3.1" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/traverse/node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4" - } - }, "node_modules/@babel/types": { - "version": "7.27.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.0.tgz", - "integrity": "sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg==", + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.4.tgz", + "integrity": "sha512-bkFqkLhh3pMBUQQkpVgWDWq/lqzc2678eUyDlTBhRqhCHFguYYGM0Efga7tYk4TogG/3x0EEl66/OQ+WGbWB/Q==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/helper-string-parser": "^7.25.9", - "@babel/helper-validator-identifier": "^7.25.9" + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -555,15 +505,13 @@ "version": "0.2.3", "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@cspotcode/source-map-support": { "version": "0.8.1", "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", "dev": true, - "license": "MIT", "dependencies": { "@jridgewell/trace-mapping": "0.3.9" }, @@ -576,18 +524,16 @@ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", "dev": true, - "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.0.3", "@jridgewell/sourcemap-codec": "^1.4.10" } }, "node_modules/@eslint-community/eslint-utils": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.5.1.tgz", - "integrity": "sha512-soEIOALTfTK6EjmKMMoLugwaP0rzkad90iIWd1hMO9ARkSAyjfMfkRRhLvD5qH7vvM0Cg72pieUfR6yh6XxC4w==", + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz", + "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==", "dev": true, - "license": "MIT", "dependencies": { "eslint-visitor-keys": "^3.4.3" }, @@ -606,7 +552,6 @@ "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz", "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==", "dev": true, - "license": "MIT", "engines": { "node": "^12.0.0 || ^14.0.0 || >=16.0.0" } @@ -616,7 +561,6 @@ "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.0.tgz", "integrity": "sha512-ENIdc4iLu0d93HeYirvKmrzshzofPw6VkZRKQGe9Nv46ZnWUzcF1xV01dcvEg/1wXUR61OmmlSfyeyO7EvjLxQ==", "dev": true, - "license": "Apache-2.0", "dependencies": { "@eslint/object-schema": "^2.1.6", "debug": "^4.3.1", @@ -631,7 +575,6 @@ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, - "license": "MIT", "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -642,7 +585,6 @@ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, - "license": "ISC", "dependencies": { "brace-expansion": "^1.1.7" }, @@ -651,21 +593,19 @@ } }, "node_modules/@eslint/config-helpers": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.3.0.tgz", - "integrity": "sha512-ViuymvFmcJi04qdZeDc2whTHryouGcDlaxPqarTD0ZE10ISpxGUVZGZDx4w01upyIynL3iu6IXH2bS1NhclQMw==", + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.3.1.tgz", + "integrity": "sha512-xR93k9WhrDYpXHORXpxVL5oHj3Era7wo6k/Wd8/IsQNnZUTzkGS29lyn3nAT05v6ltUuTFVCCYDEGfy2Or/sPA==", "dev": true, - "license": "Apache-2.0", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, "node_modules/@eslint/core": { - "version": "0.15.1", - "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.15.1.tgz", - "integrity": "sha512-bkOp+iumZCCbt1K1CmWf0R9pM5yKpDv+ZXtvSyQpudrI9kuFLp+bM2WOPXImuD/ceQuaa8f5pj93Y7zyECIGNA==", + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.15.2.tgz", + "integrity": "sha512-78Md3/Rrxh83gCxoUc0EiciuOHsIITzLy53m3d9UyiW8y9Dj2D29FeETqyKA+BRK76tnTp6RXWb3pCay8Oyomg==", "dev": true, - "license": "Apache-2.0", "dependencies": { "@types/json-schema": "^7.0.15" }, @@ -678,7 +618,6 @@ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz", "integrity": "sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ==", "dev": true, - "license": "MIT", "dependencies": { "ajv": "^6.12.4", "debug": "^4.3.2", @@ -707,12 +646,20 @@ "concat-map": "0.0.1" } }, + "node_modules/@eslint/eslintrc/node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, "node_modules/@eslint/eslintrc/node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, - "license": "ISC", "dependencies": { "brace-expansion": "^1.1.7" }, @@ -721,11 +668,10 @@ } }, "node_modules/@eslint/js": { - "version": "9.31.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.31.0.tgz", - "integrity": "sha512-LOm5OVt7D4qiKCqoiPbA7LWmI+tbw1VbTUowBcUMgQSuM6poJufkFkYDcQpo5KfgD39TnNySV26QjOh7VFpSyw==", + "version": "9.35.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.35.0.tgz", + "integrity": "sha512-30iXE9whjlILfWobBkNerJo+TXYsgVM5ERQwMcMKCHckHflCmf7wXDAHlARoWnh0s1U72WqlbeyE7iAcCzuCPw==", "dev": true, - "license": "MIT", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, @@ -738,19 +684,17 @@ "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.6.tgz", "integrity": "sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==", "dev": true, - "license": "Apache-2.0", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, "node_modules/@eslint/plugin-kit": { - "version": "0.3.4", - "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.3.4.tgz", - "integrity": "sha512-Ul5l+lHEcw3L5+k8POx6r74mxEYKG5kOb6Xpy2gCRW6zweT6TEhAf8vhxGgjhqrd/VO/Dirhsb+1hNpD1ue9hw==", + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.3.5.tgz", + "integrity": "sha512-Z5kJ+wU3oA7MMIqVR9tyZRtjYPr4OC004Q4Rw7pgOKUOKkJfZ3O24nz3WYfGRpMDNmcOi3TwQOmgm7B7Tpii0w==", "dev": true, - "license": "Apache-2.0", "dependencies": { - "@eslint/core": "^0.15.1", + "@eslint/core": "^0.15.2", "levn": "^0.4.1" }, "engines": { @@ -762,45 +706,28 @@ "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", "dev": true, - "license": "Apache-2.0", "engines": { "node": ">=18.18.0" } }, "node_modules/@humanfs/node": { - "version": "0.16.6", - "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.6.tgz", - "integrity": "sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw==", + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", "dev": true, - "license": "Apache-2.0", "dependencies": { "@humanfs/core": "^0.19.1", - "@humanwhocodes/retry": "^0.3.0" + "@humanwhocodes/retry": "^0.4.0" }, "engines": { "node": ">=18.18.0" } }, - "node_modules/@humanfs/node/node_modules/@humanwhocodes/retry": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.3.1.tgz", - "integrity": "sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=18.18" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/nzakas" - } - }, "node_modules/@humanwhocodes/module-importer": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", "dev": true, - "license": "Apache-2.0", "engines": { "node": ">=12.22" }, @@ -810,11 +737,10 @@ } }, "node_modules/@humanwhocodes/retry": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.2.tgz", - "integrity": "sha512-xeO57FpIu4p1Ri3Jq/EXq4ClRm86dVF2z/+kvFnyqVYRavTZmaFaUBbWCOuuTh0o/g7DSsk6kc2vrS4Vl5oPOQ==", + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", "dev": true, - "license": "Apache-2.0", "engines": { "node": ">=18.18" }, @@ -828,7 +754,6 @@ "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", "dev": true, - "license": "ISC", "dependencies": { "camelcase": "^5.3.1", "find-up": "^4.1.0", @@ -845,7 +770,6 @@ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", "dev": true, - "license": "MIT", "dependencies": { "sprintf-js": "~1.0.2" } @@ -855,7 +779,6 @@ "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", "dev": true, - "license": "MIT", "dependencies": { "locate-path": "^5.0.0", "path-exists": "^4.0.0" @@ -865,9 +788,9 @@ } }, "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, "license": "MIT", "dependencies": { @@ -883,7 +806,6 @@ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", "dev": true, - "license": "MIT", "dependencies": { "p-locate": "^4.1.0" }, @@ -896,7 +818,6 @@ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "dev": true, - "license": "MIT", "dependencies": { "p-try": "^2.0.0" }, @@ -912,7 +833,6 @@ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", "dev": true, - "license": "MIT", "dependencies": { "p-limit": "^2.2.0" }, @@ -925,7 +845,6 @@ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -935,7 +854,6 @@ "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -945,7 +863,6 @@ "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", "dev": true, - "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@types/node": "*", @@ -963,7 +880,6 @@ "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", "dev": true, - "license": "MIT", "dependencies": { "@jest/console": "^29.7.0", "@jest/reporters": "^29.7.0", @@ -1011,7 +927,6 @@ "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", "dev": true, - "license": "MIT", "dependencies": { "@jest/fake-timers": "^29.7.0", "@jest/types": "^29.6.3", @@ -1027,7 +942,6 @@ "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", "dev": true, - "license": "MIT", "dependencies": { "expect": "^29.7.0", "jest-snapshot": "^29.7.0" @@ -1041,7 +955,6 @@ "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", "dev": true, - "license": "MIT", "dependencies": { "jest-get-type": "^29.6.3" }, @@ -1054,7 +967,6 @@ "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", "dev": true, - "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@sinonjs/fake-timers": "^10.0.2", @@ -1072,7 +984,6 @@ "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", "dev": true, - "license": "MIT", "dependencies": { "@jest/environment": "^29.7.0", "@jest/expect": "^29.7.0", @@ -1088,7 +999,6 @@ "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", "dev": true, - "license": "MIT", "dependencies": { "@bcoe/v8-coverage": "^0.2.3", "@jest/console": "^29.7.0", @@ -1132,7 +1042,6 @@ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", "dev": true, - "license": "MIT", "dependencies": { "@sinclair/typebox": "^0.27.8" }, @@ -1145,7 +1054,6 @@ "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", "dev": true, - "license": "MIT", "dependencies": { "@jridgewell/trace-mapping": "^0.3.18", "callsites": "^3.0.0", @@ -1160,7 +1068,6 @@ "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", "dev": true, - "license": "MIT", "dependencies": { "@jest/console": "^29.7.0", "@jest/types": "^29.6.3", @@ -1176,7 +1083,6 @@ "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", "dev": true, - "license": "MIT", "dependencies": { "@jest/test-result": "^29.7.0", "graceful-fs": "^4.2.9", @@ -1192,7 +1098,6 @@ "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", "dev": true, - "license": "MIT", "dependencies": { "@babel/core": "^7.11.6", "@jest/types": "^29.6.3", @@ -1219,7 +1124,6 @@ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", "dev": true, - "license": "MIT", "dependencies": { "@jest/schemas": "^29.6.3", "@types/istanbul-lib-coverage": "^2.0.0", @@ -1233,18 +1137,23 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.8", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", - "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", "dev": true, - "license": "MIT", "dependencies": { - "@jridgewell/set-array": "^1.2.1", - "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" - }, - "engines": { - "node": ">=6.0.0" } }, "node_modules/@jridgewell/resolve-uri": { @@ -1252,34 +1161,21 @@ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", "dev": true, - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/set-array": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", - "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", - "dev": true, - "license": "MIT", "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", - "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", - "dev": true, - "license": "MIT" + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.25", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", - "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", "dev": true, - "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" @@ -1290,7 +1186,6 @@ "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", "dev": true, - "license": "MIT", "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" @@ -1304,7 +1199,6 @@ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", "dev": true, - "license": "MIT", "engines": { "node": ">= 8" } @@ -1314,7 +1208,6 @@ "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", "dev": true, - "license": "MIT", "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" @@ -1327,15 +1220,13 @@ "version": "0.27.8", "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@sinonjs/commons": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz", "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "type-detect": "4.0.8" } @@ -1345,20 +1236,17 @@ "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "@sinonjs/commons": "^3.0.0" } }, "node_modules/@sinonjs/samsam": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/@sinonjs/samsam/-/samsam-8.0.2.tgz", - "integrity": "sha512-v46t/fwnhejRSFTGqbpn9u+LQ9xJDse10gNnPgAcxgdoCDMXj/G2asWAC/8Qs+BAZDicX+MNZouXT1A7c83kVw==", + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/@sinonjs/samsam/-/samsam-8.0.3.tgz", + "integrity": "sha512-hw6HbX+GyVZzmaYNh82Ecj1vdGZrqVIn/keDTg63IgAwiQPO+xCz99uG6Woqgb4tM0mUiFENKZ4cqd7IX94AXQ==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "@sinonjs/commons": "^3.0.1", - "lodash.get": "^4.4.2", "type-detect": "^4.1.0" } }, @@ -1367,7 +1255,6 @@ "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.1.0.tgz", "integrity": "sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw==", "dev": true, - "license": "MIT", "engines": { "node": ">=4" } @@ -1376,43 +1263,37 @@ "version": "0.7.3", "resolved": "https://registry.npmjs.org/@sinonjs/text-encoding/-/text-encoding-0.7.3.tgz", "integrity": "sha512-DE427ROAphMQzU4ENbliGYrBSYPXF+TtLg9S8vzeA+OF4ZKzoDdzfL8sxuMUGS/lgRhM6j1URSk9ghf7Xo1tyA==", - "dev": true, - "license": "(Unlicense OR Apache-2.0)" + "dev": true }, "node_modules/@tsconfig/node10": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz", "integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@tsconfig/node12": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@tsconfig/node14": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@tsconfig/node16": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@types/babel__core": { "version": "7.20.5", "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", "dev": true, - "license": "MIT", "dependencies": { "@babel/parser": "^7.20.7", "@babel/types": "^7.20.7", @@ -1422,11 +1303,10 @@ } }, "node_modules/@types/babel__generator": { - "version": "7.6.8", - "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.8.tgz", - "integrity": "sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", "dev": true, - "license": "MIT", "dependencies": { "@babel/types": "^7.0.0" } @@ -1436,35 +1316,46 @@ "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", "dev": true, - "license": "MIT", "dependencies": { "@babel/parser": "^7.1.0", "@babel/types": "^7.0.0" } }, "node_modules/@types/babel__traverse": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.7.tgz", - "integrity": "sha512-dkO5fhS7+/oos4ciWxyEyjWe48zmG6wbCheo/G2ZnHx4fs3EU6YC6UM8rk56gAjNJ9P3MTH2jo5jb92/K6wbng==", + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/types": "^7.20.7" + "@babel/types": "^7.28.2" } }, - "node_modules/@types/estree": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.7.tgz", - "integrity": "sha512-w28IoSUCJpidD/TGviZwwMJckNESJZXFu7NBZ5YJ4mEUnNraUn9Pm8HSZm/jDF1pDWYKspWE7oVphigUPRakIQ==", + "node_modules/@types/chai": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.2.tgz", + "integrity": "sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==", "dev": true, - "license": "MIT" + "dependencies": { + "@types/deep-eql": "*" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true }, "node_modules/@types/graceful-fs": { "version": "4.1.9", "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", "dev": true, - "license": "MIT", "dependencies": { "@types/node": "*" } @@ -1473,15 +1364,13 @@ "version": "2.0.6", "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@types/istanbul-lib-report": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", "dev": true, - "license": "MIT", "dependencies": { "@types/istanbul-lib-coverage": "*" } @@ -1491,7 +1380,6 @@ "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", "dev": true, - "license": "MIT", "dependencies": { "@types/istanbul-lib-report": "*" } @@ -1501,7 +1389,6 @@ "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz", "integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==", "dev": true, - "license": "MIT", "dependencies": { "expect": "^29.0.0", "pretty-format": "^29.0.0" @@ -1511,24 +1398,21 @@ "version": "7.0.15", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@types/lodash": { - "version": "4.17.16", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.16.tgz", - "integrity": "sha512-HX7Em5NYQAXKW+1T+FiuG27NGwzJfCX3s1GjOa7ujxZa52kjJLOr4FUxT+giF6Tgxv1e+/czV/iTtBw27WTU9g==", - "dev": true, - "license": "MIT" + "version": "4.17.20", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.20.tgz", + "integrity": "sha512-H3MHACvFUEiujabxhaI/ImO6gUrd8oOurg7LQtS7mbwIXA/cUqWrvBsaeJ23aZEPk1TAYkurjfMbSELfoCXlGA==", + "dev": true }, "node_modules/@types/node": { - "version": "22.13.13", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.13.tgz", - "integrity": "sha512-ClsL5nMwKaBRwPcCvH8E7+nU4GxHVx1axNvMZTFHMEfNI7oahimt26P5zjVCRrjiIWj6YFXfE1v3dEp94wLcGQ==", + "version": "22.18.1", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.18.1.tgz", + "integrity": "sha512-rzSDyhn4cYznVG+PCzGe1lwuMYJrcBS1fc3JqSa2PvtABwWo+dZ1ij5OVok3tqfpEBCBoaR4d7upFJk73HRJDw==", "dev": true, - "license": "MIT", "dependencies": { - "undici-types": "~6.20.0" + "undici-types": "~6.21.0" } }, "node_modules/@types/sinon": { @@ -1536,7 +1420,6 @@ "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-17.0.4.tgz", "integrity": "sha512-RHnIrhfPO3+tJT0s7cFaXGZvsL4bbR3/k7z3P312qMS4JaS2Tk+KiwiLx1S0rQ56ERj00u1/BtdyVd0FY+Pdew==", "dev": true, - "license": "MIT", "dependencies": { "@types/sinonjs__fake-timers": "*" } @@ -1545,22 +1428,19 @@ "version": "8.1.5", "resolved": "https://registry.npmjs.org/@types/sinonjs__fake-timers/-/sinonjs__fake-timers-8.1.5.tgz", "integrity": "sha512-mQkU2jY8jJEF7YHjHvsQO8+3ughTL1mcnn96igfhONmR+fUPSKIkefQYpSe8bsly2Ep7oQbn/6VG5/9/0qcArQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@types/stack-utils": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@types/yargs": { "version": "17.0.33", "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.33.tgz", "integrity": "sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==", "dev": true, - "license": "MIT", "dependencies": { "@types/yargs-parser": "*" } @@ -1569,25 +1449,23 @@ "version": "21.0.3", "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "8.28.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.28.0.tgz", - "integrity": "sha512-lvFK3TCGAHsItNdWZ/1FkvpzCxTHUVuFrdnOGLMa0GGCFIbCgQWVk3CzCGdA7kM3qGVc+dfW9tr0Z/sHnGDFyg==", + "version": "8.43.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.43.0.tgz", + "integrity": "sha512-8tg+gt7ENL7KewsKMKDHXR1vm8tt9eMxjJBYINf6swonlWgkYn5NwyIgXpbbDxTNU5DgpDFfj95prcTq2clIQQ==", "dev": true, - "license": "MIT", "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "8.28.0", - "@typescript-eslint/type-utils": "8.28.0", - "@typescript-eslint/utils": "8.28.0", - "@typescript-eslint/visitor-keys": "8.28.0", + "@typescript-eslint/scope-manager": "8.43.0", + "@typescript-eslint/type-utils": "8.43.0", + "@typescript-eslint/utils": "8.43.0", + "@typescript-eslint/visitor-keys": "8.43.0", "graphemer": "^1.4.0", - "ignore": "^5.3.1", + "ignore": "^7.0.0", "natural-compare": "^1.4.0", - "ts-api-utils": "^2.0.1" + "ts-api-utils": "^2.1.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -1597,22 +1475,21 @@ "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "@typescript-eslint/parser": "^8.0.0 || ^8.0.0-alpha.0", + "@typescript-eslint/parser": "^8.43.0", "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <5.9.0" + "typescript": ">=4.8.4 <6.0.0" } }, "node_modules/@typescript-eslint/parser": { - "version": "8.28.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.28.0.tgz", - "integrity": "sha512-LPcw1yHD3ToaDEoljFEfQ9j2xShY367h7FZ1sq5NJT9I3yj4LHer1Xd1yRSOdYy9BpsrxU7R+eoDokChYM53lQ==", + "version": "8.43.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.43.0.tgz", + "integrity": "sha512-B7RIQiTsCBBmY+yW4+ILd6mF5h1FUwJsVvpqkrgpszYifetQ2Ke+Z4u6aZh0CblkUGIdR59iYVyXqqZGkZ3aBw==", "dev": true, - "license": "MIT", "dependencies": { - "@typescript-eslint/scope-manager": "8.28.0", - "@typescript-eslint/types": "8.28.0", - "@typescript-eslint/typescript-estree": "8.28.0", - "@typescript-eslint/visitor-keys": "8.28.0", + "@typescript-eslint/scope-manager": "8.43.0", + "@typescript-eslint/types": "8.43.0", + "@typescript-eslint/typescript-estree": "8.43.0", + "@typescript-eslint/visitor-keys": "8.43.0", "debug": "^4.3.4" }, "engines": { @@ -1624,18 +1501,38 @@ }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <5.9.0" + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.43.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.43.0.tgz", + "integrity": "sha512-htB/+D/BIGoNTQYffZw4uM4NzzuolCoaA/BusuSIcC8YjmBYQioew5VUZAYdAETPjeed0hqCaW7EHg+Robq8uw==", + "dev": true, + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.43.0", + "@typescript-eslint/types": "^8.43.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "8.28.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.28.0.tgz", - "integrity": "sha512-u2oITX3BJwzWCapoZ/pXw6BCOl8rJP4Ij/3wPoGvY8XwvXflOzd1kLrDUUUAIEdJSFh+ASwdTHqtan9xSg8buw==", + "version": "8.43.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.43.0.tgz", + "integrity": "sha512-daSWlQ87ZhsjrbMLvpuuMAt3y4ba57AuvadcR7f3nl8eS3BjRc8L9VLxFLk92RL5xdXOg6IQ+qKjjqNEimGuAg==", "dev": true, - "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.28.0", - "@typescript-eslint/visitor-keys": "8.28.0" + "@typescript-eslint/types": "8.43.0", + "@typescript-eslint/visitor-keys": "8.43.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -1645,17 +1542,33 @@ "url": "https://opencollective.com/typescript-eslint" } }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.43.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.43.0.tgz", + "integrity": "sha512-ALC2prjZcj2YqqL5X/bwWQmHA2em6/94GcbB/KKu5SX3EBDOsqztmmX1kMkvAJHzxk7TazKzJfFiEIagNV3qEA==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, "node_modules/@typescript-eslint/type-utils": { - "version": "8.28.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.28.0.tgz", - "integrity": "sha512-oRoXu2v0Rsy/VoOGhtWrOKDiIehvI+YNrDk5Oqj40Mwm0Yt01FC/Q7nFqg088d3yAsR1ZcZFVfPCTTFCe/KPwg==", + "version": "8.43.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.43.0.tgz", + "integrity": "sha512-qaH1uLBpBuBBuRf8c1mLJ6swOfzCXryhKND04Igr4pckzSEW9JX5Aw9AgW00kwfjWJF0kk0ps9ExKTfvXfw4Qg==", "dev": true, - "license": "MIT", "dependencies": { - "@typescript-eslint/typescript-estree": "8.28.0", - "@typescript-eslint/utils": "8.28.0", + "@typescript-eslint/types": "8.43.0", + "@typescript-eslint/typescript-estree": "8.43.0", + "@typescript-eslint/utils": "8.43.0", "debug": "^4.3.4", - "ts-api-utils": "^2.0.1" + "ts-api-utils": "^2.1.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -1666,15 +1579,14 @@ }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <5.9.0" + "typescript": ">=4.8.4 <6.0.0" } }, "node_modules/@typescript-eslint/types": { - "version": "8.28.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.28.0.tgz", - "integrity": "sha512-bn4WS1bkKEjx7HqiwG2JNB3YJdC1q6Ue7GyGlwPHyt0TnVq6TtD/hiOdTZt71sq0s7UzqBFXD8t8o2e63tXgwA==", + "version": "8.43.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.43.0.tgz", + "integrity": "sha512-vQ2FZaxJpydjSZJKiSW/LJsabFFvV7KgLC5DiLhkBcykhQj8iK9BOaDmQt74nnKdLvceM5xmhaTF+pLekrxEkw==", "dev": true, - "license": "MIT", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, @@ -1684,20 +1596,21 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "8.28.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.28.0.tgz", - "integrity": "sha512-H74nHEeBGeklctAVUvmDkxB1mk+PAZ9FiOMPFncdqeRBXxk1lWSYraHw8V12b7aa6Sg9HOBNbGdSHobBPuQSuA==", + "version": "8.43.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.43.0.tgz", + "integrity": "sha512-7Vv6zlAhPb+cvEpP06WXXy/ZByph9iL6BQRBDj4kmBsW98AqEeQHlj/13X+sZOrKSo9/rNKH4Ul4f6EICREFdw==", "dev": true, - "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.28.0", - "@typescript-eslint/visitor-keys": "8.28.0", + "@typescript-eslint/project-service": "8.43.0", + "@typescript-eslint/tsconfig-utils": "8.43.0", + "@typescript-eslint/types": "8.43.0", + "@typescript-eslint/visitor-keys": "8.43.0", "debug": "^4.3.4", "fast-glob": "^3.3.2", "is-glob": "^4.0.3", "minimatch": "^9.0.4", "semver": "^7.6.0", - "ts-api-utils": "^2.0.1" + "ts-api-utils": "^2.1.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -1707,20 +1620,19 @@ "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "typescript": ">=4.8.4 <5.9.0" + "typescript": ">=4.8.4 <6.0.0" } }, "node_modules/@typescript-eslint/utils": { - "version": "8.28.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.28.0.tgz", - "integrity": "sha512-OELa9hbTYciYITqgurT1u/SzpQVtDLmQMFzy/N8pQE+tefOyCWT79jHsav294aTqV1q1u+VzqDGbuujvRYaeSQ==", + "version": "8.43.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.43.0.tgz", + "integrity": "sha512-S1/tEmkUeeswxd0GGcnwuVQPFWo8NzZTOMxCvw8BX7OMxnNae+i8Tm7REQen/SwUIPoPqfKn7EaZ+YLpiB3k9g==", "dev": true, - "license": "MIT", "dependencies": { - "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "8.28.0", - "@typescript-eslint/types": "8.28.0", - "@typescript-eslint/typescript-estree": "8.28.0" + "@eslint-community/eslint-utils": "^4.7.0", + "@typescript-eslint/scope-manager": "8.43.0", + "@typescript-eslint/types": "8.43.0", + "@typescript-eslint/typescript-estree": "8.43.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -1731,18 +1643,17 @@ }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <5.9.0" + "typescript": ">=4.8.4 <6.0.0" } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "8.28.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.28.0.tgz", - "integrity": "sha512-hbn8SZ8w4u2pRwgQ1GlUrPKE+t2XvcCW5tTRF7j6SMYIuYG37XuzIW44JCZPa36evi0Oy2SnM664BlIaAuQcvg==", + "version": "8.43.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.43.0.tgz", + "integrity": "sha512-T+S1KqRD4sg/bHfLwrpF/K3gQLBM1n7Rp7OjjikjTEssI2YJzQpi5WXoynOaQ93ERIuq3O8RBTOUYDKszUCEHw==", "dev": true, - "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.28.0", - "eslint-visitor-keys": "^4.2.0" + "@typescript-eslint/types": "8.43.0", + "eslint-visitor-keys": "^4.2.1" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -1753,11 +1664,10 @@ } }, "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", - "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", "dev": true, - "license": "Apache-2.0", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, @@ -1766,14 +1676,14 @@ } }, "node_modules/@vitest/expect": { - "version": "3.0.9", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.0.9.tgz", - "integrity": "sha512-5eCqRItYgIML7NNVgJj6TVCmdzE7ZVgJhruW0ziSQV4V7PvLkDL1bBkBdcTs/VuIz0IxPb5da1IDSqc1TR9eig==", + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.4.tgz", + "integrity": "sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==", "dev": true, - "license": "MIT", "dependencies": { - "@vitest/spy": "3.0.9", - "@vitest/utils": "3.0.9", + "@types/chai": "^5.2.2", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", "chai": "^5.2.0", "tinyrainbow": "^2.0.0" }, @@ -1782,11 +1692,10 @@ } }, "node_modules/@vitest/pretty-format": { - "version": "3.0.9", - "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.0.9.tgz", - "integrity": "sha512-OW9F8t2J3AwFEwENg3yMyKWweF7oRJlMyHOMIhO5F3n0+cgQAJZBjNgrF8dLwFTEXl5jUqBLXd9QyyKv8zEcmA==", + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.2.4.tgz", + "integrity": "sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==", "dev": true, - "license": "MIT", "dependencies": { "tinyrainbow": "^2.0.0" }, @@ -1795,27 +1704,25 @@ } }, "node_modules/@vitest/spy": { - "version": "3.0.9", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.0.9.tgz", - "integrity": "sha512-/CcK2UDl0aQ2wtkp3YVWldrpLRNCfVcIOFGlVGKO4R5eajsH393Z1yiXLVQ7vWsj26JOEjeZI0x5sm5P4OGUNQ==", + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.2.4.tgz", + "integrity": "sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==", "dev": true, - "license": "MIT", "dependencies": { - "tinyspy": "^3.0.2" + "tinyspy": "^4.0.3" }, "funding": { "url": "https://opencollective.com/vitest" } }, "node_modules/@vitest/utils": { - "version": "3.0.9", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.0.9.tgz", - "integrity": "sha512-ilHM5fHhZ89MCp5aAaM9uhfl1c2JdxVxl3McqsdVyVNN6JffnEen8UMCdRTzOhGXNQGo5GNL9QugHrz727Wnng==", + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.2.4.tgz", + "integrity": "sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==", "dev": true, - "license": "MIT", "dependencies": { - "@vitest/pretty-format": "3.0.9", - "loupe": "^3.1.3", + "@vitest/pretty-format": "3.2.4", + "loupe": "^3.1.4", "tinyrainbow": "^2.0.0" }, "funding": { @@ -1827,7 +1734,6 @@ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, - "license": "MIT", "bin": { "acorn": "bin/acorn" }, @@ -1840,7 +1746,6 @@ "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", "dev": true, - "license": "MIT", "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } @@ -1850,7 +1755,6 @@ "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", "dev": true, - "license": "MIT", "dependencies": { "acorn": "^8.11.0" }, @@ -1863,7 +1767,6 @@ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dev": true, - "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -1880,7 +1783,6 @@ "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", "dev": true, - "license": "MIT", "dependencies": { "type-fest": "^0.21.3" }, @@ -1896,7 +1798,6 @@ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -1906,7 +1807,6 @@ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, - "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -1922,7 +1822,6 @@ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", "dev": true, - "license": "ISC", "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" @@ -1935,40 +1834,29 @@ "version": "4.1.3", "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/argparse": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true, - "license": "Python-2.0" + "dev": true }, "node_modules/assertion-error": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", "dev": true, - "license": "MIT", "engines": { "node": ">=12" } }, - "node_modules/async": { - "version": "3.2.6", - "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz", - "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==", - "dev": true, - "license": "MIT" - }, "node_modules/aws-jwt-verify": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/aws-jwt-verify/-/aws-jwt-verify-5.0.0.tgz", - "integrity": "sha512-FQM5EYEm7AnVJ3oeTpvBZQm7hYnpI067Em1oopHBs7cCNAcw8Aw8NFbojFjkNXifsOzyYe1ks+bg7Q9fMsTZSA==", - "license": "Apache-2.0", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/aws-jwt-verify/-/aws-jwt-verify-5.1.0.tgz", + "integrity": "sha512-98ioOBMyrLU5jW5rPvkJo20XlNB2rAX3tZR3BM6AamfBkOoSRLV1EyGkbgHQzgFOWyQ7yV8+tce6M24rOpMkgw==", "engines": { - "node": ">=16.0.0" + "node": ">=18.0.0" } }, "node_modules/aws-sdk-client-mock": { @@ -1976,7 +1864,6 @@ "resolved": "https://registry.npmjs.org/aws-sdk-client-mock/-/aws-sdk-client-mock-4.1.0.tgz", "integrity": "sha512-h/tOYTkXEsAcV3//6C1/7U4ifSpKyJvb6auveAepqqNJl6TdZaPFEtKjBQNf8UxQdDP850knB2i/whq4zlsxJw==", "dev": true, - "license": "MIT", "dependencies": { "@types/sinon": "^17.0.3", "sinon": "^18.0.1", @@ -1988,7 +1875,6 @@ "resolved": "https://registry.npmjs.org/aws-sdk-client-mock-jest/-/aws-sdk-client-mock-jest-4.1.0.tgz", "integrity": "sha512-+g4a5Hp+MmPqqNnvwfLitByggrqf+xSbk1pm6fBYHNcon6+aQjL5iB+3YB6HuGPemY+/mUKN34iP62S14R61bA==", "dev": true, - "license": "MIT", "dependencies": { "@vitest/expect": ">1.6.0", "expect": ">28.1.3", @@ -2009,7 +1895,6 @@ "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", "dev": true, - "license": "MIT", "dependencies": { "@jest/transform": "^29.7.0", "@types/babel__core": "^7.1.14", @@ -2031,7 +1916,6 @@ "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "@babel/helper-plugin-utils": "^7.0.0", "@istanbuljs/load-nyc-config": "^1.0.0", @@ -2048,7 +1932,6 @@ "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "@babel/core": "^7.12.3", "@babel/parser": "^7.14.7", @@ -2065,7 +1948,6 @@ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, - "license": "ISC", "bin": { "semver": "bin/semver.js" } @@ -2075,7 +1957,6 @@ "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", "dev": true, - "license": "MIT", "dependencies": { "@babel/template": "^7.3.3", "@babel/types": "^7.3.3", @@ -2087,11 +1968,10 @@ } }, "node_modules/babel-preset-current-node-syntax": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.1.0.tgz", - "integrity": "sha512-ldYss8SbBlWva1bs28q78Ju5Zq1F+8BrqBZZ0VFhLBvhh6lCpC2o3gDJi/5DRLs9FgYZCnmPYIVFU4lRXCkyUw==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.2.0.tgz", + "integrity": "sha512-E/VlAEzRrsLEb2+dv8yp3bo4scof3l9nR4lrld+Iy5NyVqgVYUJnDAmunkhPMisRI32Qc4iRiz425d8vM++2fg==", "dev": true, - "license": "MIT", "dependencies": { "@babel/plugin-syntax-async-generators": "^7.8.4", "@babel/plugin-syntax-bigint": "^7.8.3", @@ -2110,7 +1990,7 @@ "@babel/plugin-syntax-top-level-await": "^7.14.5" }, "peerDependencies": { - "@babel/core": "^7.0.0" + "@babel/core": "^7.0.0 || ^8.0.0-0" } }, "node_modules/babel-preset-jest": { @@ -2118,7 +1998,6 @@ "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", "dev": true, - "license": "MIT", "dependencies": { "babel-plugin-jest-hoist": "^29.6.3", "babel-preset-current-node-syntax": "^1.0.0" @@ -2134,8 +2013,7 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/brace-expansion": { "version": "2.0.2", @@ -2151,7 +2029,6 @@ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dev": true, - "license": "MIT", "dependencies": { "fill-range": "^7.1.1" }, @@ -2160,9 +2037,9 @@ } }, "node_modules/browserslist": { - "version": "4.24.4", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.4.tgz", - "integrity": "sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A==", + "version": "4.25.4", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.25.4.tgz", + "integrity": "sha512-4jYpcjabC606xJ3kw2QwGEZKX0Aw7sgQdZCvIK9dhVSPh76BKo+C+btT1RRofH7B+8iNpEbgGNVWiLki5q93yg==", "dev": true, "funding": [ { @@ -2178,12 +2055,11 @@ "url": "https://github.com/sponsors/ai" } ], - "license": "MIT", "dependencies": { - "caniuse-lite": "^1.0.30001688", - "electron-to-chromium": "^1.5.73", + "caniuse-lite": "^1.0.30001737", + "electron-to-chromium": "^1.5.211", "node-releases": "^2.0.19", - "update-browserslist-db": "^1.1.1" + "update-browserslist-db": "^1.1.3" }, "bin": { "browserslist": "cli.js" @@ -2197,7 +2073,6 @@ "resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz", "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==", "dev": true, - "license": "MIT", "dependencies": { "fast-json-stable-stringify": "2.x" }, @@ -2210,7 +2085,6 @@ "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", "dev": true, - "license": "Apache-2.0", "dependencies": { "node-int64": "^0.4.0" } @@ -2219,15 +2093,13 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } @@ -2237,15 +2109,14 @@ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/caniuse-lite": { - "version": "1.0.30001707", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001707.tgz", - "integrity": "sha512-3qtRjw/HQSMlDWf+X79N206fepf4SOOU6SQLMaq/0KkZLmSjPxAkBOQQ+FxbHKfHmYLZFfdWsO3KA90ceHPSnw==", + "version": "1.0.30001741", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001741.tgz", + "integrity": "sha512-QGUGitqsc8ARjLdgAfxETDhRbJ0REsP6O3I96TAth/mVjh2cYzN2u+3AzPP3aVSm2FehEItaJw1xd+IGBXWeSw==", "dev": true, "funding": [ { @@ -2260,15 +2131,13 @@ "type": "github", "url": "https://github.com/sponsors/ai" } - ], - "license": "CC-BY-4.0" + ] }, "node_modules/chai": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/chai/-/chai-5.2.0.tgz", - "integrity": "sha512-mCuXncKXk5iCLhfhwTc0izo0gtEmpz5CtG2y8GiOINBlMVS6v8TMRc5TaLWKS6692m9+dVVfzgeVxR5UxWHTYw==", + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", + "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", "dev": true, - "license": "MIT", "dependencies": { "assertion-error": "^2.0.1", "check-error": "^2.1.1", @@ -2277,7 +2146,7 @@ "pathval": "^2.0.0" }, "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/chalk": { @@ -2285,7 +2154,6 @@ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, - "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -2302,7 +2170,6 @@ "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", "dev": true, - "license": "MIT", "engines": { "node": ">=10" } @@ -2312,7 +2179,6 @@ "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", "dev": true, - "license": "MIT", "engines": { "node": ">= 16" } @@ -2328,7 +2194,6 @@ "url": "https://github.com/sponsors/sibiraj-s" } ], - "license": "MIT", "engines": { "node": ">=8" } @@ -2337,15 +2202,13 @@ "version": "1.4.3", "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/cliui": { "version": "8.0.1", "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", "dev": true, - "license": "ISC", "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", @@ -2360,7 +2223,6 @@ "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", "dev": true, - "license": "MIT", "engines": { "iojs": ">= 1.0.0", "node": ">= 0.12.0" @@ -2370,15 +2232,13 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz", "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, - "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -2390,29 +2250,25 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/convert-source-map": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/create-jest": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", "dev": true, - "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "chalk": "^4.0.0", @@ -2433,15 +2289,13 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "dev": true, - "license": "MIT", "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -2452,11 +2306,10 @@ } }, "node_modules/debug": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", - "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "dev": true, - "license": "MIT", "dependencies": { "ms": "^2.1.3" }, @@ -2470,11 +2323,10 @@ } }, "node_modules/dedent": { - "version": "1.5.3", - "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.3.tgz", - "integrity": "sha512-NHQtfOOW68WD8lgypbLA5oT+Bt0xXJhiYvoR6SmmNXZfpzOGXwdKWmcwG8N7PwVVWV3eF/68nmD9BaJSsTBhyQ==", + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.7.0.tgz", + "integrity": "sha512-HGFtf8yhuhGhqO07SV79tRp+br4MnbdjeVxotpn1QBl30pcLLCQjX5b2295ll0fv8RKDKsmWYrl05usHM9CewQ==", "dev": true, - "license": "MIT", "peerDependencies": { "babel-plugin-macros": "^3.1.0" }, @@ -2489,7 +2341,6 @@ "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } @@ -2498,15 +2349,13 @@ "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/deepmerge": { "version": "4.3.1", "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -2516,7 +2365,6 @@ "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -2526,7 +2374,6 @@ "resolved": "https://registry.npmjs.org/diff/-/diff-5.2.0.tgz", "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==", "dev": true, - "license": "BSD-3-Clause", "engines": { "node": ">=0.3.1" } @@ -2536,40 +2383,21 @@ "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", "dev": true, - "license": "MIT", "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/ejs": { - "version": "3.1.10", - "resolved": "https://registry.npmjs.org/ejs/-/ejs-3.1.10.tgz", - "integrity": "sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "jake": "^10.8.5" - }, - "bin": { - "ejs": "bin/cli.js" - }, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/electron-to-chromium": { - "version": "1.5.124", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.124.tgz", - "integrity": "sha512-riELkpDUqBi00gqreV3RIGoowxGrfueEKBd6zPdOk/I8lvuFpBGNkYoHof3zUHbiTBsIU8oxdIIL/WNrAG1/7A==", - "dev": true, - "license": "ISC" + "version": "1.5.217", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.217.tgz", + "integrity": "sha512-Pludfu5iBxp9XzNl0qq2G87hdD17ZV7h5T4n6rQXDi3nCyloBV3jreE9+8GC6g4X/5yxqVgXEURpcLtM0WS4jA==", + "dev": true }, "node_modules/emittery": { "version": "0.13.1", "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=12" }, @@ -2581,15 +2409,13 @@ "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/error-ex": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", "dev": true, - "license": "MIT", "dependencies": { "is-arrayish": "^0.2.1" } @@ -2599,7 +2425,6 @@ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } @@ -2609,7 +2434,6 @@ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", "dev": true, - "license": "MIT", "engines": { "node": ">=10" }, @@ -2618,20 +2442,19 @@ } }, "node_modules/eslint": { - "version": "9.31.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.31.0.tgz", - "integrity": "sha512-QldCVh/ztyKJJZLr4jXNUByx3gR+TDYZCRXEktiZoUR3PGy4qCmSbkxcIle8GEwGpb5JBZazlaJ/CxLidXdEbQ==", + "version": "9.35.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.35.0.tgz", + "integrity": "sha512-QePbBFMJFjgmlE+cXAlbHZbHpdFVS2E/6vzCy7aKlebddvl1vadiC4JFV5u/wqTkNUwEV8WrQi257jf5f06hrg==", "dev": true, - "license": "MIT", "dependencies": { - "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", "@eslint/config-array": "^0.21.0", - "@eslint/config-helpers": "^0.3.0", - "@eslint/core": "^0.15.0", + "@eslint/config-helpers": "^0.3.1", + "@eslint/core": "^0.15.2", "@eslint/eslintrc": "^3.3.1", - "@eslint/js": "9.31.0", - "@eslint/plugin-kit": "^0.3.1", + "@eslint/js": "9.35.0", + "@eslint/plugin-kit": "^0.3.5", "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", "@humanwhocodes/retry": "^0.4.2", @@ -2683,7 +2506,6 @@ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" @@ -2700,7 +2522,6 @@ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", "dev": true, - "license": "Apache-2.0", "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" }, @@ -2723,7 +2544,6 @@ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", "dev": true, - "license": "Apache-2.0", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, @@ -2731,12 +2551,20 @@ "url": "https://opencollective.com/eslint" } }, + "node_modules/eslint/node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, "node_modules/eslint/node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, - "license": "ISC", "dependencies": { "brace-expansion": "^1.1.7" }, @@ -2749,7 +2577,6 @@ "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { "acorn": "^8.15.0", "acorn-jsx": "^5.3.2", @@ -2767,7 +2594,6 @@ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", "dev": true, - "license": "Apache-2.0", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, @@ -2780,7 +2606,6 @@ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", "dev": true, - "license": "BSD-2-Clause", "bin": { "esparse": "bin/esparse.js", "esvalidate": "bin/esvalidate.js" @@ -2794,7 +2619,6 @@ "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "estraverse": "^5.1.0" }, @@ -2807,7 +2631,6 @@ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { "estraverse": "^5.2.0" }, @@ -2820,7 +2643,6 @@ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", "dev": true, - "license": "BSD-2-Clause", "engines": { "node": ">=4.0" } @@ -2830,7 +2652,6 @@ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", "dev": true, - "license": "BSD-2-Clause", "engines": { "node": ">=0.10.0" } @@ -2840,7 +2661,6 @@ "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", "dev": true, - "license": "MIT", "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^6.0.0", @@ -2873,7 +2693,6 @@ "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", "dev": true, - "license": "MIT", "dependencies": { "@jest/expect-utils": "^29.7.0", "jest-get-type": "^29.6.3", @@ -2889,15 +2708,13 @@ "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/fast-glob": { "version": "3.3.3", "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", "dev": true, - "license": "MIT", "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", @@ -2914,7 +2731,6 @@ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", "dev": true, - "license": "ISC", "dependencies": { "is-glob": "^4.0.1" }, @@ -2926,22 +2742,19 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/fast-levenshtein": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/fastq": { "version": "1.19.1", "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", "dev": true, - "license": "ISC", "dependencies": { "reusify": "^1.0.4" } @@ -2951,7 +2764,6 @@ "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", "dev": true, - "license": "Apache-2.0", "dependencies": { "bser": "2.1.1" } @@ -2961,7 +2773,6 @@ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", "dev": true, - "license": "MIT", "dependencies": { "flat-cache": "^4.0.0" }, @@ -2969,35 +2780,11 @@ "node": ">=16.0.0" } }, - "node_modules/filelist": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/filelist/-/filelist-1.0.4.tgz", - "integrity": "sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "minimatch": "^5.0.1" - } - }, - "node_modules/filelist/node_modules/minimatch": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", - "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/fill-range": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dev": true, - "license": "MIT", "dependencies": { "to-regex-range": "^5.0.1" }, @@ -3010,7 +2797,6 @@ "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", "dev": true, - "license": "MIT", "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" @@ -3027,7 +2813,6 @@ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", "dev": true, - "license": "MIT", "dependencies": { "flatted": "^3.2.9", "keyv": "^4.5.4" @@ -3040,15 +2825,13 @@ "version": "3.3.3", "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", - "dev": true, - "license": "ISC" + "dev": true }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "dev": true, - "license": "ISC" + "dev": true }, "node_modules/fsevents": { "version": "2.3.3", @@ -3056,7 +2839,6 @@ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", "dev": true, "hasInstallScript": true, - "license": "MIT", "optional": true, "os": [ "darwin" @@ -3070,7 +2852,6 @@ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", "dev": true, - "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -3080,7 +2861,6 @@ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } @@ -3090,7 +2870,6 @@ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", "dev": true, - "license": "ISC", "engines": { "node": "6.* || 8.* || >= 10.*" } @@ -3100,7 +2879,6 @@ "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", "dev": true, - "license": "MIT", "engines": { "node": ">=8.0.0" } @@ -3110,7 +2888,6 @@ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", "dev": true, - "license": "MIT", "engines": { "node": ">=10" }, @@ -3124,7 +2901,6 @@ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, - "license": "ISC", "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -3145,7 +2921,6 @@ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", "dev": true, - "license": "ISC", "dependencies": { "is-glob": "^4.0.3" }, @@ -3168,7 +2943,6 @@ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, - "license": "ISC", "dependencies": { "brace-expansion": "^1.1.7" }, @@ -3181,7 +2955,6 @@ "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=18" }, @@ -3193,22 +2966,40 @@ "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", - "dev": true, - "license": "ISC" + "dev": true }, "node_modules/graphemer": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true + }, + "node_modules/handlebars": { + "version": "4.7.8", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz", + "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==", "dev": true, - "license": "MIT" + "dependencies": { + "minimist": "^1.2.5", + "neo-async": "^2.6.2", + "source-map": "^0.6.1", + "wordwrap": "^1.0.0" + }, + "bin": { + "handlebars": "bin/handlebars" + }, + "engines": { + "node": ">=0.4.7" + }, + "optionalDependencies": { + "uglify-js": "^3.1.4" + } }, "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -3218,7 +3009,6 @@ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "dev": true, - "license": "MIT", "dependencies": { "function-bind": "^1.1.2" }, @@ -3230,25 +3020,22 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/human-signals": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", "dev": true, - "license": "Apache-2.0", "engines": { "node": ">=10.17.0" } }, "node_modules/ignore": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", - "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", "dev": true, - "license": "MIT", "engines": { "node": ">= 4" } @@ -3258,7 +3045,6 @@ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", "dev": true, - "license": "MIT", "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" @@ -3275,7 +3061,6 @@ "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.2.0.tgz", "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==", "dev": true, - "license": "MIT", "dependencies": { "pkg-dir": "^4.2.0", "resolve-cwd": "^3.0.0" @@ -3295,7 +3080,6 @@ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.8.19" } @@ -3306,7 +3090,6 @@ "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", "dev": true, - "license": "ISC", "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -3316,22 +3099,19 @@ "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true, - "license": "ISC" + "dev": true }, "node_modules/is-arrayish": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/is-core-module": { "version": "2.16.1", "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", "dev": true, - "license": "MIT", "dependencies": { "hasown": "^2.0.2" }, @@ -3347,7 +3127,6 @@ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -3357,7 +3136,6 @@ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -3367,7 +3145,6 @@ "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } @@ -3377,7 +3154,6 @@ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "dev": true, - "license": "MIT", "dependencies": { "is-extglob": "^2.1.1" }, @@ -3390,7 +3166,6 @@ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.12.0" } @@ -3400,7 +3175,6 @@ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" }, @@ -3412,15 +3186,13 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true, - "license": "ISC" + "dev": true }, "node_modules/istanbul-lib-coverage": { "version": "3.2.2", "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", "dev": true, - "license": "BSD-3-Clause", "engines": { "node": ">=8" } @@ -3430,7 +3202,6 @@ "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz", "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "@babel/core": "^7.23.9", "@babel/parser": "^7.23.9", @@ -3447,7 +3218,6 @@ "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "istanbul-lib-coverage": "^3.0.0", "make-dir": "^4.0.0", @@ -3462,7 +3232,6 @@ "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "debug": "^4.1.1", "istanbul-lib-coverage": "^3.0.0", @@ -3473,11 +3242,10 @@ } }, "node_modules/istanbul-reports": { - "version": "3.1.7", - "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.7.tgz", - "integrity": "sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", + "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "html-escaper": "^2.0.0", "istanbul-lib-report": "^3.0.0" @@ -3486,54 +3254,11 @@ "node": ">=8" } }, - "node_modules/jake": { - "version": "10.9.2", - "resolved": "https://registry.npmjs.org/jake/-/jake-10.9.2.tgz", - "integrity": "sha512-2P4SQ0HrLQ+fw6llpLnOaGAvN2Zu6778SJMrCUwns4fOoG9ayrTiZk3VV8sCPkVZF8ab0zksVpS8FDY5pRCNBA==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "async": "^3.2.3", - "chalk": "^4.0.2", - "filelist": "^1.0.4", - "minimatch": "^3.1.2" - }, - "bin": { - "jake": "bin/cli.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/jake/node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/jake/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, "node_modules/jest": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", "dev": true, - "license": "MIT", "dependencies": { "@jest/core": "^29.7.0", "@jest/types": "^29.6.3", @@ -3560,7 +3285,6 @@ "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", "dev": true, - "license": "MIT", "dependencies": { "execa": "^5.0.0", "jest-util": "^29.7.0", @@ -3575,7 +3299,6 @@ "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", "dev": true, - "license": "MIT", "dependencies": { "@jest/environment": "^29.7.0", "@jest/expect": "^29.7.0", @@ -3607,7 +3330,6 @@ "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", "dev": true, - "license": "MIT", "dependencies": { "@jest/core": "^29.7.0", "@jest/test-result": "^29.7.0", @@ -3641,7 +3363,6 @@ "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", "dev": true, - "license": "MIT", "dependencies": { "@babel/core": "^7.11.6", "@jest/test-sequencer": "^29.7.0", @@ -3687,7 +3408,6 @@ "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", "dev": true, - "license": "MIT", "dependencies": { "chalk": "^4.0.0", "diff-sequences": "^29.6.3", @@ -3703,7 +3423,6 @@ "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", "dev": true, - "license": "MIT", "dependencies": { "detect-newline": "^3.0.0" }, @@ -3716,7 +3435,6 @@ "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", "dev": true, - "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "chalk": "^4.0.0", @@ -3733,7 +3451,6 @@ "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", "dev": true, - "license": "MIT", "dependencies": { "@jest/environment": "^29.7.0", "@jest/fake-timers": "^29.7.0", @@ -3751,7 +3468,6 @@ "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", "dev": true, - "license": "MIT", "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } @@ -3761,7 +3477,6 @@ "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", "dev": true, - "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@types/graceful-fs": "^4.1.3", @@ -3787,7 +3502,6 @@ "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", "dev": true, - "license": "MIT", "dependencies": { "jest-get-type": "^29.6.3", "pretty-format": "^29.7.0" @@ -3801,7 +3515,6 @@ "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", "dev": true, - "license": "MIT", "dependencies": { "chalk": "^4.0.0", "jest-diff": "^29.7.0", @@ -3817,7 +3530,6 @@ "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", "dev": true, - "license": "MIT", "dependencies": { "@babel/code-frame": "^7.12.13", "@jest/types": "^29.6.3", @@ -3838,7 +3550,6 @@ "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", "dev": true, - "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@types/node": "*", @@ -3853,7 +3564,6 @@ "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" }, @@ -3871,7 +3581,6 @@ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", "dev": true, - "license": "MIT", "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } @@ -3881,7 +3590,6 @@ "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", "dev": true, - "license": "MIT", "dependencies": { "chalk": "^4.0.0", "graceful-fs": "^4.2.9", @@ -3902,7 +3610,6 @@ "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", "dev": true, - "license": "MIT", "dependencies": { "jest-regex-util": "^29.6.3", "jest-snapshot": "^29.7.0" @@ -3916,7 +3623,6 @@ "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", "dev": true, - "license": "MIT", "dependencies": { "@jest/console": "^29.7.0", "@jest/environment": "^29.7.0", @@ -3949,7 +3655,6 @@ "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", "dev": true, - "license": "MIT", "dependencies": { "@jest/environment": "^29.7.0", "@jest/fake-timers": "^29.7.0", @@ -3983,7 +3688,6 @@ "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", "dev": true, - "license": "MIT", "dependencies": { "@babel/core": "^7.11.6", "@babel/generator": "^7.7.2", @@ -4015,7 +3719,6 @@ "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", "dev": true, - "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "@types/node": "*", @@ -4033,7 +3736,6 @@ "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", "dev": true, - "license": "MIT", "dependencies": { "@jest/types": "^29.6.3", "camelcase": "^6.2.0", @@ -4051,7 +3753,6 @@ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", "dev": true, - "license": "MIT", "engines": { "node": ">=10" }, @@ -4064,7 +3765,6 @@ "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", "dev": true, - "license": "MIT", "dependencies": { "@jest/test-result": "^29.7.0", "@jest/types": "^29.6.3", @@ -4084,7 +3784,6 @@ "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", "dev": true, - "license": "MIT", "dependencies": { "@types/node": "*", "jest-util": "^29.7.0", @@ -4100,7 +3799,6 @@ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "dev": true, - "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -4115,13 +3813,12 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -4136,7 +3833,6 @@ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", "dev": true, - "license": "MIT", "bin": { "jsesc": "bin/jsesc" }, @@ -4148,36 +3844,31 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/json-parse-even-better-errors": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/json-schema-traverse": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/json-stable-stringify-without-jsonify": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/json5": { "version": "2.2.3", "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", "dev": true, - "license": "MIT", "bin": { "json5": "lib/cli.js" }, @@ -4189,15 +3880,13 @@ "version": "6.2.0", "resolved": "https://registry.npmjs.org/just-extend/-/just-extend-6.2.0.tgz", "integrity": "sha512-cYofQu2Xpom82S6qD778jBDpwvvy39s1l/hrYij2u9AMdQcGRpaBu6kY4mVhuno5kJVi1DAz4aiphA2WI1/OAw==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/keyv": { "version": "4.5.4", "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", "dev": true, - "license": "MIT", "dependencies": { "json-buffer": "3.0.1" } @@ -4207,7 +3896,6 @@ "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } @@ -4217,7 +3905,6 @@ "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } @@ -4227,7 +3914,6 @@ "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", "dev": true, - "license": "MIT", "dependencies": { "prelude-ls": "^1.2.1", "type-check": "~0.4.0" @@ -4240,15 +3926,13 @@ "version": "1.2.4", "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/locate-path": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", "dev": true, - "license": "MIT", "dependencies": { "p-locate": "^5.0.0" }, @@ -4263,44 +3947,31 @@ "version": "4.17.21", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true, - "license": "MIT" - }, - "node_modules/lodash.get": { - "version": "4.4.2", - "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz", - "integrity": "sha512-z+Uw/vLuy6gQe8cfaFWD7p0wVv8fJl3mbzXh33RS+0oW2wvUqiRXiQ69gLWSLpgB5/6sU+r6BlQR0MBILadqTQ==", - "deprecated": "This package is deprecated. Use the optional chaining (?.) operator instead.", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/lodash.memoize": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/lodash.merge": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/loupe": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.3.tgz", - "integrity": "sha512-kkIp7XSkP78ZxJEsSxW3712C6teJVoeHHwgo9zJ380de7IYyJ2ISlxojcH2pC5OFLewESmnRi/+XCDIEEVyoug==", - "dev": true, - "license": "MIT" + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", + "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", + "dev": true }, "node_modules/lru-cache": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", "dev": true, - "license": "ISC", "dependencies": { "yallist": "^3.0.2" } @@ -4310,7 +3981,6 @@ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", "dev": true, - "license": "MIT", "dependencies": { "semver": "^7.5.3" }, @@ -4325,15 +3995,13 @@ "version": "1.3.6", "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", - "dev": true, - "license": "ISC" + "dev": true }, "node_modules/makeerror": { "version": "1.0.12", "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "tmpl": "1.0.5" } @@ -4342,15 +4010,13 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/merge2": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", "dev": true, - "license": "MIT", "engines": { "node": ">= 8" } @@ -4360,7 +4026,6 @@ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", "dev": true, - "license": "MIT", "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" @@ -4374,7 +4039,6 @@ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } @@ -4384,7 +4048,6 @@ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", "dev": true, - "license": "ISC", "dependencies": { "brace-expansion": "^2.0.1" }, @@ -4395,26 +4058,38 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/natural-compare": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", - "dev": true, - "license": "MIT" + "dev": true + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "dev": true }, "node_modules/nise": { "version": "6.1.1", "resolved": "https://registry.npmjs.org/nise/-/nise-6.1.1.tgz", "integrity": "sha512-aMSAzLVY7LyeM60gvBS423nBmIPP+Wy7St7hsb+8/fc1HmeoHJfLO8CKse4u3BtOZvQLJghYPI2i/1WZrEj5/g==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "@sinonjs/commons": "^3.0.1", "@sinonjs/fake-timers": "^13.0.1", @@ -4428,7 +4103,6 @@ "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-13.0.5.tgz", "integrity": "sha512-36/hTbH2uaWuGVERyC6da9YwGWnzUZXuPro/F2LfsdOsLnCojz/iSH8MxUt/FD2S5XBSVPhmArFUXcpCQ2Hkiw==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "@sinonjs/commons": "^3.0.1" } @@ -4437,22 +4111,19 @@ "version": "0.4.0", "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/node-releases": { - "version": "2.0.19", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz", - "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==", - "dev": true, - "license": "MIT" + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.20.tgz", + "integrity": "sha512-7gK6zSXEH6neM212JgfYFXe+GmZQM+fia5SsusuBIUgnPheLFBmIPhtFoAQRj8/7wASYQnbDlHPVwY0BefoFgA==", + "dev": true }, "node_modules/normalize-path": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -4462,7 +4133,6 @@ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", "dev": true, - "license": "MIT", "dependencies": { "path-key": "^3.0.0" }, @@ -4475,7 +4145,6 @@ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", "dev": true, - "license": "ISC", "dependencies": { "wrappy": "1" } @@ -4485,7 +4154,6 @@ "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", "dev": true, - "license": "MIT", "dependencies": { "mimic-fn": "^2.1.0" }, @@ -4501,7 +4169,6 @@ "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", "dev": true, - "license": "MIT", "dependencies": { "deep-is": "^0.1.3", "fast-levenshtein": "^2.0.6", @@ -4519,7 +4186,6 @@ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dev": true, - "license": "MIT", "dependencies": { "yocto-queue": "^0.1.0" }, @@ -4535,7 +4201,6 @@ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", "dev": true, - "license": "MIT", "dependencies": { "p-limit": "^3.0.2" }, @@ -4551,7 +4216,6 @@ "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } @@ -4561,7 +4225,6 @@ "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", "dev": true, - "license": "MIT", "dependencies": { "callsites": "^3.0.0" }, @@ -4574,7 +4237,6 @@ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", "dev": true, - "license": "MIT", "dependencies": { "@babel/code-frame": "^7.0.0", "error-ex": "^1.3.1", @@ -4593,7 +4255,6 @@ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -4603,7 +4264,6 @@ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -4613,7 +4273,6 @@ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -4622,25 +4281,23 @@ "version": "1.0.7", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/path-to-regexp": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.2.0.tgz", - "integrity": "sha512-TdrF7fW9Rphjq4RjrW0Kp2AW0Ahwu9sRGTkS6bvDi0SCwZlEZYmcfDbEsTz8RVk0EHIS/Vd1bv3JhG+1xZuAyQ==", + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.3.0.tgz", + "integrity": "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==", "dev": true, - "license": "MIT", - "engines": { - "node": ">=16" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/pathval": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.0.tgz", - "integrity": "sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", + "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", "dev": true, - "license": "MIT", "engines": { "node": ">= 14.16" } @@ -4649,15 +4306,13 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", - "dev": true, - "license": "ISC" + "dev": true }, "node_modules/picomatch": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", "dev": true, - "license": "MIT", "engines": { "node": ">=8.6" }, @@ -4666,11 +4321,10 @@ } }, "node_modules/pirates": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", - "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", "dev": true, - "license": "MIT", "engines": { "node": ">= 6" } @@ -4680,7 +4334,6 @@ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", "dev": true, - "license": "MIT", "dependencies": { "find-up": "^4.0.0" }, @@ -4693,7 +4346,6 @@ "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", "dev": true, - "license": "MIT", "dependencies": { "locate-path": "^5.0.0", "path-exists": "^4.0.0" @@ -4707,7 +4359,6 @@ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", "dev": true, - "license": "MIT", "dependencies": { "p-locate": "^4.1.0" }, @@ -4720,7 +4371,6 @@ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "dev": true, - "license": "MIT", "dependencies": { "p-try": "^2.0.0" }, @@ -4736,7 +4386,6 @@ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", "dev": true, - "license": "MIT", "dependencies": { "p-limit": "^2.2.0" }, @@ -4749,17 +4398,15 @@ "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", "dev": true, - "license": "MIT", "engines": { "node": ">= 0.8.0" } }, "node_modules/prettier": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.3.tgz", - "integrity": "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==", + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", "dev": true, - "license": "MIT", "bin": { "prettier": "bin/prettier.cjs" }, @@ -4775,7 +4422,6 @@ "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", "dev": true, - "license": "MIT", "dependencies": { "@jest/schemas": "^29.6.3", "ansi-styles": "^5.0.0", @@ -4790,7 +4436,6 @@ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, - "license": "MIT", "engines": { "node": ">=10" }, @@ -4803,7 +4448,6 @@ "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", "dev": true, - "license": "MIT", "dependencies": { "kleur": "^3.0.3", "sisteransi": "^1.0.5" @@ -4817,7 +4461,6 @@ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } @@ -4836,8 +4479,7 @@ "type": "opencollective", "url": "https://opencollective.com/fast-check" } - ], - "license": "MIT" + ] }, "node_modules/queue-microtask": { "version": "1.2.3", @@ -4857,22 +4499,19 @@ "type": "consulting", "url": "https://feross.org/support" } - ], - "license": "MIT" + ] }, "node_modules/react-is": { "version": "18.3.1", "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/require-directory": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -4882,7 +4521,6 @@ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", "dev": true, - "license": "MIT", "dependencies": { "is-core-module": "^2.16.0", "path-parse": "^1.0.7", @@ -4903,7 +4541,6 @@ "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", "dev": true, - "license": "MIT", "dependencies": { "resolve-from": "^5.0.0" }, @@ -4916,7 +4553,6 @@ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -4926,7 +4562,6 @@ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", "dev": true, - "license": "MIT", "engines": { "node": ">=4" } @@ -4936,7 +4571,6 @@ "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz", "integrity": "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==", "dev": true, - "license": "MIT", "engines": { "node": ">=10" } @@ -4946,7 +4580,6 @@ "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", "dev": true, - "license": "MIT", "engines": { "iojs": ">=1.0.0", "node": ">=0.10.0" @@ -4971,17 +4604,15 @@ "url": "https://feross.org/support" } ], - "license": "MIT", "dependencies": { "queue-microtask": "^1.2.2" } }, "node_modules/semver": { - "version": "7.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", - "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", "dev": true, - "license": "ISC", "bin": { "semver": "bin/semver.js" }, @@ -4994,7 +4625,6 @@ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", "dev": true, - "license": "MIT", "dependencies": { "shebang-regex": "^3.0.0" }, @@ -5007,7 +4637,6 @@ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -5016,15 +4645,13 @@ "version": "3.0.7", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "dev": true, - "license": "ISC" + "dev": true }, "node_modules/sinon": { "version": "18.0.1", "resolved": "https://registry.npmjs.org/sinon/-/sinon-18.0.1.tgz", "integrity": "sha512-a2N2TDY1uGviajJ6r4D1CyRAkzE9NNVlYOV1wX5xQDuAk0ONgzgRl0EjCQuRCPxOwp13ghsMwt9Gdldujs39qw==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "@sinonjs/commons": "^3.0.1", "@sinonjs/fake-timers": "11.2.2", @@ -5043,7 +4670,6 @@ "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-11.2.2.tgz", "integrity": "sha512-G2piCSxQ7oWOxwGSAyFHfPIsyeJGXYtc6mFbnFA+kRXkiEnTl8c/8jul2S329iFBnDI9HGoeWWAZvuvOkZccgw==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "@sinonjs/commons": "^3.0.0" } @@ -5052,15 +4678,13 @@ "version": "1.0.5", "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/slash": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -5070,7 +4694,6 @@ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", "dev": true, - "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } @@ -5080,7 +4703,6 @@ "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", "dev": true, - "license": "MIT", "dependencies": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" @@ -5090,15 +4712,13 @@ "version": "1.0.3", "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", - "dev": true, - "license": "BSD-3-Clause" + "dev": true }, "node_modules/stack-utils": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", "dev": true, - "license": "MIT", "dependencies": { "escape-string-regexp": "^2.0.0" }, @@ -5111,7 +4731,6 @@ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -5121,7 +4740,6 @@ "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", "dev": true, - "license": "MIT", "dependencies": { "char-regex": "^1.0.2", "strip-ansi": "^6.0.0" @@ -5135,7 +4753,6 @@ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "dev": true, - "license": "MIT", "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", @@ -5150,7 +4767,6 @@ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "dev": true, - "license": "MIT", "dependencies": { "ansi-regex": "^5.0.1" }, @@ -5163,7 +4779,6 @@ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } @@ -5173,7 +4788,6 @@ "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } @@ -5183,7 +4797,6 @@ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" }, @@ -5196,7 +4809,6 @@ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, - "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -5209,7 +4821,6 @@ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", "dev": true, - "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -5222,7 +4833,6 @@ "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", "dev": true, - "license": "ISC", "dependencies": { "@istanbuljs/schema": "^0.1.2", "glob": "^7.1.4", @@ -5247,7 +4857,6 @@ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, - "license": "ISC", "dependencies": { "brace-expansion": "^1.1.7" }, @@ -5260,17 +4869,15 @@ "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz", "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==", "dev": true, - "license": "MIT", "engines": { "node": ">=14.0.0" } }, "node_modules/tinyspy": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-3.0.2.tgz", - "integrity": "sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-4.0.3.tgz", + "integrity": "sha512-t2T/WLB2WRgZ9EpE4jgPJ9w+i66UZfDc8wHh0xrwiRNN+UwH98GIJkTeZqX9rg0i0ptwzqW+uYeIF0T4F8LR7A==", "dev": true, - "license": "MIT", "engines": { "node": ">=14.0.0" } @@ -5279,15 +4886,13 @@ "version": "1.0.5", "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", - "dev": true, - "license": "BSD-3-Clause" + "dev": true }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "dev": true, - "license": "MIT", "dependencies": { "is-number": "^7.0.0" }, @@ -5300,7 +4905,6 @@ "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz", "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=18.12" }, @@ -5309,21 +4913,19 @@ } }, "node_modules/ts-jest": { - "version": "29.3.0", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.3.0.tgz", - "integrity": "sha512-4bfGBX7Gd1Aqz3SyeDS9O276wEU/BInZxskPrbhZLyv+c1wskDCqDFMJQJLWrIr/fKoAH4GE5dKUlrdyvo+39A==", + "version": "29.4.1", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.4.1.tgz", + "integrity": "sha512-SaeUtjfpg9Uqu8IbeDKtdaS0g8lS6FT6OzM3ezrDfErPJPHNDo/Ey+VFGP1bQIDfagYDLyRpd7O15XpG1Es2Uw==", "dev": true, - "license": "MIT", "dependencies": { "bs-logger": "^0.2.6", - "ejs": "^3.1.10", "fast-json-stable-stringify": "^2.1.0", - "jest-util": "^29.0.0", + "handlebars": "^4.7.8", "json5": "^2.2.3", "lodash.memoize": "^4.1.2", "make-error": "^1.3.6", - "semver": "^7.7.1", - "type-fest": "^4.37.0", + "semver": "^7.7.2", + "type-fest": "^4.41.0", "yargs-parser": "^21.1.1" }, "bin": { @@ -5334,10 +4936,11 @@ }, "peerDependencies": { "@babel/core": ">=7.0.0-beta.0 <8", - "@jest/transform": "^29.0.0", - "@jest/types": "^29.0.0", - "babel-jest": "^29.0.0", - "jest": "^29.0.0", + "@jest/transform": "^29.0.0 || ^30.0.0", + "@jest/types": "^29.0.0 || ^30.0.0", + "babel-jest": "^29.0.0 || ^30.0.0", + "jest": "^29.0.0 || ^30.0.0", + "jest-util": "^29.0.0 || ^30.0.0", "typescript": ">=4.3 <6" }, "peerDependenciesMeta": { @@ -5355,15 +4958,17 @@ }, "esbuild": { "optional": true + }, + "jest-util": { + "optional": true } } }, "node_modules/ts-jest/node_modules/type-fest": { - "version": "4.38.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.38.0.tgz", - "integrity": "sha512-2dBz5D5ycHIoliLYLi0Q2V7KRaDlH0uWIvmk7TYlAg5slqwiPv1ezJdZm1QEM0xgk29oYWMCbIG7E6gHpvChlg==", + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", "dev": true, - "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=16" }, @@ -5376,7 +4981,6 @@ "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", "dev": true, - "license": "MIT", "dependencies": { "@cspotcode/source-map-support": "^0.8.0", "@tsconfig/node10": "^1.0.7", @@ -5420,7 +5024,6 @@ "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", "dev": true, - "license": "BSD-3-Clause", "engines": { "node": ">=0.3.1" } @@ -5429,15 +5032,13 @@ "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", - "dev": true, - "license": "0BSD" + "dev": true }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", "dev": true, - "license": "MIT", "dependencies": { "prelude-ls": "^1.2.1" }, @@ -5450,7 +5051,6 @@ "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", "dev": true, - "license": "MIT", "engines": { "node": ">=4" } @@ -5460,7 +5060,6 @@ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", "dev": true, - "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=10" }, @@ -5469,11 +5068,10 @@ } }, "node_modules/typescript": { - "version": "5.8.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.2.tgz", - "integrity": "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==", + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.2.tgz", + "integrity": "sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A==", "dev": true, - "license": "Apache-2.0", "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -5482,12 +5080,24 @@ "node": ">=14.17" } }, - "node_modules/undici-types": { - "version": "6.20.0", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz", - "integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==", + "node_modules/uglify-js": { + "version": "3.19.3", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz", + "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==", "dev": true, - "license": "MIT" + "optional": true, + "bin": { + "uglifyjs": "bin/uglifyjs" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true }, "node_modules/update-browserslist-db": { "version": "1.1.3", @@ -5508,7 +5118,6 @@ "url": "https://github.com/sponsors/ai" } ], - "license": "MIT", "dependencies": { "escalade": "^3.2.0", "picocolors": "^1.1.1" @@ -5525,7 +5134,6 @@ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { "punycode": "^2.1.0" } @@ -5534,15 +5142,13 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/v8-to-istanbul": { "version": "9.3.0", "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", "dev": true, - "license": "ISC", "dependencies": { "@jridgewell/trace-mapping": "^0.3.12", "@types/istanbul-lib-coverage": "^2.0.1", @@ -5557,7 +5163,6 @@ "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", "dev": true, - "license": "Apache-2.0", "dependencies": { "makeerror": "1.0.12" } @@ -5567,7 +5172,6 @@ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "dev": true, - "license": "ISC", "dependencies": { "isexe": "^2.0.0" }, @@ -5583,17 +5187,21 @@ "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.10.0" } }, + "node_modules/wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==", + "dev": true + }, "node_modules/wrap-ansi": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", "dev": true, - "license": "MIT", "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", @@ -5610,15 +5218,13 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "dev": true, - "license": "ISC" + "dev": true }, "node_modules/write-file-atomic": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", "dev": true, - "license": "ISC", "dependencies": { "imurmurhash": "^0.1.4", "signal-exit": "^3.0.7" @@ -5632,7 +5238,6 @@ "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", "dev": true, - "license": "ISC", "engines": { "node": ">=10" } @@ -5641,15 +5246,13 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", - "dev": true, - "license": "ISC" + "dev": true }, "node_modules/yargs": { "version": "17.7.2", "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", "dev": true, - "license": "MIT", "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", @@ -5668,7 +5271,6 @@ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", "dev": true, - "license": "ISC", "engines": { "node": ">=12" } @@ -5678,7 +5280,6 @@ "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } @@ -5688,7 +5289,6 @@ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", "dev": true, - "license": "MIT", "engines": { "node": ">=10" }, diff --git a/source/lambda/use-case-management/package.json b/source/lambda/use-case-management/package.json index 84c9e68b..758d5ea0 100644 --- a/source/lambda/use-case-management/package.json +++ b/source/lambda/use-case-management/package.json @@ -1,6 +1,6 @@ { "name": "@amzn/use-case-management", - "version": "3.0.7", + "version": "4.0.0", "description": "This lambda supports APIs that provide the capability to deploy GenAI use cases", "main": "index.ts", "scripts": { @@ -13,7 +13,7 @@ "code-linter-js": "./node_modules/eslint/bin/eslint.js lambda --ext .js", "code-linter-ts": "./node_modules/eslint/bin/eslint.js bin lib --ext .ts", "code-linter": "npm run code-linter-ts && npm run code-linter-js", - "code-formatter": "./node_modules/prettier/bin-prettier.js --config .prettierrc.yml '**/*.ts' '**/*.js' --write" + "code-formatter": "prettier --config ../../../.prettierrc.yml --ignore-path ../../../.prettierignore --write '**/*.{js,ts,json,css,md}'" }, "author": { "name": "Amazon Web Services", @@ -31,7 +31,7 @@ "eslint": "^9.16.0", "jest": "^29.7.0", "lodash": "^4.17.21", - "prettier": "^3.4.2", + "prettier": "^3.6.2", "ts-jest": "^29.2.5", "ts-node": "^10.9.2", "typescript": "^5.7.2" diff --git a/source/lambda/use-case-management/power-tools-init.ts b/source/lambda/use-case-management/power-tools-init.ts index f35d175c..4507e6c8 100644 --- a/source/lambda/use-case-management/power-tools-init.ts +++ b/source/lambda/use-case-management/power-tools-init.ts @@ -13,4 +13,4 @@ export const logger = new Logger(serviceName); export const metrics = new Metrics({ namespace: CloudWatchNamespace.USE_CASE_DEPLOYMENTS, serviceName: serviceName.serviceName -}); +}); \ No newline at end of file diff --git a/source/lambda/use-case-management/s3/s3-management.ts b/source/lambda/use-case-management/s3/s3-management.ts new file mode 100644 index 00000000..1fafa6dd --- /dev/null +++ b/source/lambda/use-case-management/s3/s3-management.ts @@ -0,0 +1,172 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { S3Client } from '@aws-sdk/client-s3'; +import { createPresignedPost } from '@aws-sdk/s3-presigned-post'; +import { AWSClientManager } from 'aws-sdk-lib'; +import { logger, tracer } from '../power-tools-init'; +import { MCP_SCHEMA_UPLOAD_CONSTRAINTS } from '../utils/constants'; +import { generateUUID } from '../utils/utils'; +import { FileUploadInfo } from '../model/adapters/mcp-adapter'; + +/** + * Interface for S3 presigned POST response + */ +export interface PresignedPostResponse { + uploadUrl: string; + formFields: Record; + fileName: string; + expiresIn: number; + createdAt: string; +} + +/** + * Interface for schema upload parameters + */ +export interface SchemaUploadParams { + fileName: string; + schemaType: string; + userId: string; + contentType: string; + fileExtension: string; +} + +/** + * Interface for multiple schema uploads response + */ +export interface SchemaUploadResponse { + uploads: PresignedPostResponse[]; +} + +/** + * S3 management class that encapsulates S3 client operations + */ +export class S3Management { + private s3Client: S3Client; + + constructor() { + this.s3Client = AWSClientManager.getServiceClient('s3', tracer); + } + + /** + * Creates a presigned POST for schema upload to S3 + * @param params - Schema upload parameters + * @returns Promise - The presigned POST response + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###createSchemaUploadPresignedPost' }) + public async createSchemaUploadPresignedPost(params: SchemaUploadParams): Promise { + const bucketName = process.env.GAAB_DEPLOYMENTS_BUCKET!; + const keyPrefix = `mcp/schemas/${params.schemaType}/`; + + // Generate unique s3Key for the file + const uuid = generateUUID(); + const generatedFileName = `${uuid}${params.fileExtension}`; + const s3Key = `${keyPrefix}${generatedFileName}`; + + try { + // Create XML object tagging for metadata and to apply lifecycle policy rule using 'status' tag + const createTag = (tagKey: string, tagValue: string): string => { + return `${tagKey}${tagValue}`; + }; + + const createTagSet = (tags: string[]): string => { + return `${tags.join('')}`; + }; + + const schemaTypeTag = createTag('schemaType', params.schemaType); + const uploadedByTag = createTag('uploadedBy', params.userId); + const sourceTag = createTag('source', 'mcp-api'); + const statusTag = createTag('status', 'inactive'); + + const tags = [schemaTypeTag, uploadedByTag, sourceTag, statusTag]; + const tagging = createTagSet(tags); + + const presignedPost = await createPresignedPost(this.s3Client, { + Bucket: bucketName, + Key: s3Key, + Conditions: [ + // Ensure key starts with expected prefix to prevent path traversal + ['starts-with', '$key', keyPrefix], + [ + 'content-length-range', + MCP_SCHEMA_UPLOAD_CONSTRAINTS.MIN_FILE_SIZE_BYTES, + MCP_SCHEMA_UPLOAD_CONSTRAINTS.MAX_FILE_SIZE_BYTES + ], + ['eq', '$x-amz-meta-userid', params.userId], + ['eq', '$x-amz-meta-filename', params.fileName], + ['eq', '$x-amz-meta-fileextension', params.fileExtension], + // Enforce content type to prevent MIME type confusion attacks + ['eq', '$Content-Type', params.contentType], + ['eq', '$tagging', tagging] + ], + Fields: { + key: s3Key, + 'x-amz-meta-userid': params.userId, + 'x-amz-meta-filename': params.fileName, + 'x-amz-meta-fileextension': params.fileExtension, + 'Content-Type': params.contentType, + 'tagging': tagging + }, + Expires: MCP_SCHEMA_UPLOAD_CONSTRAINTS.PRESIGNED_URL_EXPIRY_SECONDS + }); + + logger.info( + `Generated S3 presigned POST for schema upload - s3Key: ${s3Key}, fileName: ${params.fileName}, schemaType: ${params.schemaType}` + ); + + return { + uploadUrl: presignedPost.url, + formFields: presignedPost.fields, + fileName: params.fileName, + expiresIn: MCP_SCHEMA_UPLOAD_CONSTRAINTS.PRESIGNED_URL_EXPIRY_SECONDS, + createdAt: new Date().toISOString() + }; + } catch (error) { + logger.error( + `Failed to generate S3 presigned POST - bucketName: ${bucketName}, key: ${s3Key}, error: ${(error as Error).message}` + ); + const errorMsg = `Failed to generate presigned POST: ${(error as Error).message}`; + logger.error(`S3Management presigned POST generation failed: ${errorMsg}`); + throw new Error(errorMsg); + } + } + + /** + * Creates presigned POSTs for multiple schema uploads to S3 + * @param userId - The user ID + * @param files - Array of file upload information + * @returns Promise - The multiple presigned POST response + */ + @tracer.captureMethod({ captureResponse: true, subSegmentName: '###createMultipleSchemaUploadPresignedPosts' }) + public async createSchemaUploadPresignedPosts( + userId: string, + files: FileUploadInfo[] + ): Promise { + logger.info(`Creating presigned POSTs for ${files.length} schema uploads`); + + try { + const uploads = await Promise.all( + files.map(async (file) => { + const params: SchemaUploadParams = { + fileName: file.fileName, + schemaType: file.schemaType, + userId, + contentType: file.contentType, + fileExtension: file.fileExtension + }; + + return await this.createSchemaUploadPresignedPost(params); + }) + ); + + logger.info(`Successfully created ${uploads.length} presigned POSTs for schema uploads`); + + return { + uploads + }; + } catch (error) { + logger.error(`Failed to create multiple presigned POSTs, error: ${(error as Error).message}`); + throw error; + } + } +} diff --git a/source/lambda/use-case-management/test/agents-handler.test.ts b/source/lambda/use-case-management/test/agents-handler.test.ts new file mode 100644 index 00000000..61fe5a8b --- /dev/null +++ b/source/lambda/use-case-management/test/agents-handler.test.ts @@ -0,0 +1,898 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { agentsLambdaHandler } from '../agents-handler'; + +import { + CloudFormationClient, + CreateStackCommand, + DeleteStackCommand, + DescribeStacksCommand, + UpdateStackCommand +} from '@aws-sdk/client-cloudformation'; +import { + DeleteItemCommand, + DescribeTableCommand, + DynamoDBClient, + GetItemCommand, + PutItemCommand, + ScanCommand, + UpdateItemCommand +} from '@aws-sdk/client-dynamodb'; +import { APIGatewayEvent } from 'aws-lambda'; +import { mockClient } from 'aws-sdk-client-mock'; +import { APIGatewayClient, GetResourcesCommand } from '@aws-sdk/client-api-gateway'; +import { + ARTIFACT_BUCKET_ENV_VAR, + CLIENT_ID_ENV_VAR, + COGNITO_DOMAIN_PREFIX_VAR, + COGNITO_POLICY_TABLE_ENV_VAR, + CfnParameterKeys, + FILES_METADATA_TABLE_NAME_ENV_VAR, + IS_INTERNAL_USER_ENV_VAR, + MCP_CONTENT_TYPES, + MODEL_INFO_TABLE_NAME_ENV_VAR, + MULTIMODAL_DATA_BUCKET_ENV_VAR, + POWERTOOLS_METRICS_NAMESPACE_ENV_VAR, + TEMPLATE_FILE_EXTN_ENV_VAR, + USER_POOL_ID_ENV_VAR, + USE_CASES_TABLE_NAME_ENV_VAR, + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, + DEPLOYMENT_PLATFORM_STACK_NAME_ENV_VAR, + GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR, + SHARED_ECR_CACHE_PREFIX_ENV_VAR, + AMAZON_TRACE_ID_HEADER +} from '../utils/constants'; + +import { marshall } from '@aws-sdk/util-dynamodb'; + +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn().mockReturnValue({}) +})); + +jest.mock('../power-tools-init', () => ({ + logger: { + info: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + warn: jest.fn() + }, + metrics: { + addMetric: jest.fn(), + publishStoredMetrics: jest.fn() + }, + tracer: { + getRootXrayTraceId: jest.fn().mockReturnValue('test-trace-id'), + captureMethod: () => () => {}, + captureAWSv3Client: jest.fn() + } +})); + +jest.mock('../utils/cognito_jwt_verifier', () => ({ + TokenVerifier: jest.fn().mockImplementation(() => ({ + verifyToken: jest.fn().mockResolvedValue({ + 'cognito:groups': ['admin'] + }) + })) +})); + +// Create agent builder test events +const createAgentBuilderUseCaseApiEvent = { + body: JSON.stringify({ + UseCaseType: 'AgentBuilder', + UseCaseName: 'fake-agent-builder', + UseCaseDescription: 'fake-description', + DefaultUserEmail: 'fake-email@example.com', + DeployUI: true, + FeedbackParams: { + FeedbackEnabled: true + }, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { ModelId: 'fake-model' }, + Temperature: 0.1, + RAGEnabled: false, + Streaming: true + }, + AgentParams: { + SystemPrompt: 'You are a helpful assistant', + MCPServers: [], + Tools: [], + MemoryConfig: { + LongTermEnabled: true + } + } + }), + resource: '/deployments/agents', + httpMethod: 'POST', + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +const updateAgentBuilderUseCaseApiEvent = { + body: JSON.stringify({ + UseCaseType: 'AgentBuilder', + UseCaseName: 'updated-agent-builder', + UseCaseDescription: 'updated-description', + DefaultUserEmail: 'fake-email@example.com', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { ModelId: 'fake-model' }, + Temperature: 0.2, + RAGEnabled: false, + Streaming: true + }, + AgentParams: { + SystemPrompt: 'You are an updated helpful assistant', + MCPServers: [], + Tools: [], + MemoryConfig: { + LongTermEnabled: false + } + } + }), + resource: '/deployments/agents/{useCaseId}', + pathParameters: { + useCaseId: '11111111-2222-2222-3333-333344444444' + }, + httpMethod: 'PATCH', + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +const getAgentBuilderUseCaseApiEvent = { + resource: '/deployments/agents', + httpMethod: 'GET', + queryStringParameters: { + pageNumber: '1' + }, + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +const deleteAgentBuilderUseCaseApiEvent = { + resource: '/deployments/agents/{useCaseId}', + pathParameters: { + useCaseId: '11111111-2222-2222-3333-333344444444' + }, + httpMethod: 'DELETE', + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +const permanentlyDeleteAgentBuilderUseCaseApiEvent = { + resource: '/deployments/agents/{useCaseId}', + pathParameters: { + useCaseId: '11111111-2222-2222-3333-333344444444' + }, + httpMethod: 'DELETE', + queryStringParameters: { + permanent: 'true' + }, + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +const getSingleAgentBuilderUseCaseApiEvent = { + resource: '/deployments/agents/{useCaseId}', + pathParameters: { + useCaseId: '11111111-2222-2222-3333-333344444444' + }, + httpMethod: 'GET', + headers: { + Authorization: 'Bearer fake-jwt-token' + }, + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +describe('When invoking the agents lambda function', () => { + let cfnMockedClient: any; + let ddbMockedClient: any; + let apiGatewayMockedClient: any; + + beforeAll(() => { + process.env.AWS_SDK_USER_AGENT = `{ "customUserAgent": "AWSSOLUTION/SO0276/v2.1.0" }`; + process.env[AMAZON_TRACE_ID_HEADER] = 'test-trace-id'; + process.env[POWERTOOLS_METRICS_NAMESPACE_ENV_VAR] = 'UnitTest'; + process.env[USE_CASES_TABLE_NAME_ENV_VAR] = 'UseCaseTable'; + process.env[ARTIFACT_BUCKET_ENV_VAR] = 'fake-artifact-bucket'; + process.env[COGNITO_POLICY_TABLE_ENV_VAR] = 'fake-table'; + process.env[USER_POOL_ID_ENV_VAR] = 'fake-user-pool-id'; + process.env[CLIENT_ID_ENV_VAR] = 'fake-client-id'; + process.env[COGNITO_DOMAIN_PREFIX_VAR] = 'fake-domain-prefix'; + process.env[TEMPLATE_FILE_EXTN_ENV_VAR] = '.json'; + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] = 'fake-use-case-config-table'; + process.env[MODEL_INFO_TABLE_NAME_ENV_VAR] = 'fake-model-info-table'; + process.env[DEPLOYMENT_PLATFORM_STACK_NAME_ENV_VAR] = 'fake-deployment-platform-stack'; + process.env[GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR] = 'fake-deployments-bucket'; + process.env[SHARED_ECR_CACHE_PREFIX_ENV_VAR] = 'fake-ecr-prefix'; + process.env[FILES_METADATA_TABLE_NAME_ENV_VAR] = 'fake-multimodal-table'; + process.env[MULTIMODAL_DATA_BUCKET_ENV_VAR] = 'fake-multimodal-bucket'; + + cfnMockedClient = mockClient(CloudFormationClient); + ddbMockedClient = mockClient(DynamoDBClient); + apiGatewayMockedClient = mockClient(APIGatewayClient); + }); + + afterEach(() => { + cfnMockedClient.reset(); + ddbMockedClient.reset(); + apiGatewayMockedClient.reset(); + }); + + describe('on success', () => { + beforeEach(() => { + cfnMockedClient.on(CreateStackCommand).resolves({ + StackId: 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid' + }); + ddbMockedClient.on(PutItemCommand, { TableName: `${process.env[USE_CASES_TABLE_NAME_ENV_VAR]}` }).resolves({ + Attributes: { + StackId: { + S: 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid' + } + } + }); + + ddbMockedClient.on(PutItemCommand).resolves({ + Attributes: { + key: { + S: 'key' + }, + config: { + S: 'config' + } + } + }); + + ddbMockedClient.on(DescribeTableCommand).resolves({ + Table: { + TableStatus: 'ACTIVE' + } + }); + + cfnMockedClient.on(UpdateStackCommand).resolves({ + StackId: 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid' + }); + ddbMockedClient.on(UpdateItemCommand).resolves({ + Attributes: { + StackId: { + S: 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid' + } + } + }); + + cfnMockedClient.on(DeleteStackCommand).resolves({}); + ddbMockedClient.on(DeleteItemCommand).resolves({}); + ddbMockedClient.on(GetItemCommand).resolvesOnce({ + Item: marshall({ + UseCaseId: 'fake-id', + StackId: 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid', + UseCaseConfigRecordKey: '11111111-fake-id' + }) + }); + apiGatewayMockedClient.on(GetResourcesCommand).resolves({ + items: [ + { + id: 'abc123', + path: '/' + } + ] + }); + }); + + it('should create an agent builder stack and update ddb for create action', async () => { + expect(await agentsLambdaHandler(createAgentBuilderUseCaseApiEvent as unknown as APIGatewayEvent)).toEqual({ + 'body': 'SUCCESS', + 'headers': { + 'Access-Control-Allow-Credentials': true, + 'Access-Control-Allow-Headers': 'Origin,X-Requested-With,Content-Type,Accept', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + 'Access-Control-Allow-Origin': '*', + 'Content-Type': MCP_CONTENT_TYPES.JSON + }, + 'isBase64Encoded': false, + 'statusCode': 200 + }); + }); + + it('should allow update to an agent builder stack', async () => { + ddbMockedClient + .on(GetItemCommand, { TableName: process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] }) + .resolves({ + Item: marshall({ + key: 'mockUseCaseConfigRecordKey', + config: { + UseCaseName: 'fake-use-case', + UseCaseType: 'AgentBuilder' + } + }) + }); + + cfnMockedClient.on(DescribeStacksCommand).resolves({ + Stacks: [ + { + StackName: 'test', + StackId: 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid', + CreationTime: new Date(), + StackStatus: 'CREATE_COMPLETE' + } + ] + }); + + expect(await agentsLambdaHandler(updateAgentBuilderUseCaseApiEvent as unknown as APIGatewayEvent)).toEqual({ + 'body': 'SUCCESS', + 'headers': { + 'Access-Control-Allow-Credentials': true, + 'Access-Control-Allow-Headers': 'Origin,X-Requested-With,Content-Type,Accept', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + 'Access-Control-Allow-Origin': '*', + 'Content-Type': MCP_CONTENT_TYPES.JSON + }, + 'isBase64Encoded': false, + 'statusCode': 200 + }); + }); + + it('should get deployed agent builder stacks with a GET request', async () => { + ddbMockedClient.on(ScanCommand).resolves({ + Items: [ + { + 'Description': { 'S': 'test agent builder case 1' }, + 'CreatedBy': { 'S': 'fake-user-id' }, + 'CreatedDate': { 'S': '2024-07-22T20:31:00Z' }, + 'StackId': { + 'S': 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid-1' + }, + 'Name': { 'S': 'test-agent-builder-1' }, + 'UseCaseId': { 'S': '11111111-fake-id' }, + 'UseCaseType': { 'S': 'AgentBuilder' }, + 'useCaseUUID': { 'S': 'fake-uuid' }, + 'UseCaseConfigRecordKey': { 'S': 'fake-uuid' } + }, + { + 'Description': { 'S': 'test agent builder case 2' }, + 'CreatedBy': { 'S': 'fake-user-id' }, + 'CreatedDate': { 'S': '2024-07-22T20:32:00Z' }, + 'StackId': { + 'S': 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid-2' + }, + 'Name': { 'S': 'test-agent-builder-2' }, + 'UseCaseId': { 'S': '11111111-fake-id' }, + 'UseCaseType': { 'S': 'AgentBuilder' }, + 'useCaseUUID': { 'S': 'fake-uuid' }, + 'UseCaseConfigRecordKey': { 'S': 'fake-uuid-2' } + } + ], + ScannedCount: 2, + LastEvaluatedKey: { + 'Description': { 'S': 'test agent builder case 2' }, + 'CreatedBy': { 'S': 'fake-user-id' }, + 'StackId': { + 'S': 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid-2' + }, + 'Name': { 'S': 'test-agent-builder-2' }, + 'UseCaseId': { 'S': '11111111-fake-id' }, + 'UseCaseConfigRecordKey': { 'S': 'fake-uuid-2' } + } + }); + + ddbMockedClient + .on(GetItemCommand, { TableName: process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] }) + .resolves({ + Item: marshall({ + key: 'mockUseCaseConfigRecordKey', + config: { + LlmParams: { + ModelProvider: 'Bedrock' + }, + UseCaseType: 'AgentBuilder' + } + }) + }); + + cfnMockedClient.on(DescribeStacksCommand).resolves({ + Stacks: [ + { + StackName: 'test', + StackId: 'fake-stack-id', + CreationTime: new Date(), + StackStatus: 'CREATE_COMPLETE', + Parameters: [ + { + ParameterKey: CfnParameterKeys.UseCaseConfigRecordKey, + ParameterValue: 'fake-id' + }, + { + ParameterKey: CfnParameterKeys.UseCaseUUID, + ParameterValue: 'fake-uuid' + } + ], + Outputs: [ + { + OutputKey: 'WebConfigKey', + OutputValue: 'mock-webconfig-ssm-parameter-key' + }, + { + OutputKey: 'CloudFrontWebUrl', + OutputValue: 'mock-cloudfront-url' + } + ] + } + ] + }); + + expect(await agentsLambdaHandler(getAgentBuilderUseCaseApiEvent as unknown as APIGatewayEvent)).toEqual({ + 'body': JSON.stringify({ + 'deployments': [ + { + 'Name': 'test-agent-builder-2', + 'UseCaseId': '11111111-fake-id', + 'CreatedDate': '2024-07-22T20:32:00Z', + 'Description': 'test agent builder case 2', + 'useCaseUUID': 'fake-uuid', + 'status': 'CREATE_COMPLETE', + 'cloudFrontWebUrl': 'mock-cloudfront-url', + 'ModelProvider': 'Bedrock', + 'UseCaseType': 'AgentBuilder' + }, + { + 'Name': 'test-agent-builder-1', + 'UseCaseId': '11111111-fake-id', + 'CreatedDate': '2024-07-22T20:31:00Z', + 'Description': 'test agent builder case 1', + 'useCaseUUID': 'fake-uuid', + 'status': 'CREATE_COMPLETE', + 'cloudFrontWebUrl': 'mock-cloudfront-url', + 'ModelProvider': 'Bedrock', + 'UseCaseType': 'AgentBuilder' + } + ], + 'numUseCases': 2 + }), + 'headers': { + 'Access-Control-Allow-Credentials': true, + 'Access-Control-Allow-Headers': 'Origin,X-Requested-With,Content-Type,Accept', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + 'Access-Control-Allow-Origin': '*', + 'Content-Type': MCP_CONTENT_TYPES.JSON + }, + 'isBase64Encoded': false, + 'statusCode': 200 + }); + }); + + it('should get a single agent builder use case with GET request', async () => { + // Reset mocks + ddbMockedClient.reset(); + cfnMockedClient.reset(); + + ddbMockedClient.on(GetItemCommand, { TableName: process.env[USE_CASES_TABLE_NAME_ENV_VAR] }).resolves({ + Item: marshall({ + UseCaseId: '11111111-2222-2222-3333-333344444444', + StackId: 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid', + UseCaseConfigRecordKey: '11111111-fake-id', + UseCaseName: 'test-agent-builder', + UseCaseType: 'AgentBuilder', + Description: 'test description', + CreatedBy: 'fake-user-id', + CreatedDate: '2024-07-22T20:31:00Z' + }) + }); + + ddbMockedClient + .on(GetItemCommand, { TableName: process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] }) + .resolves({ + Item: marshall({ + key: 'mockUseCaseConfigRecordKey', + config: { + UseCaseName: 'test-agent-builder', + UseCaseType: 'AgentBuilder', + LlmParams: { + ModelProvider: 'Bedrock', + Streaming: true, + Temperature: 0.5 + }, + AgentBuilderParams: { + SystemPrompt: 'You are a helpful AI assistant.', + MemoryConfig: { + LongTermEnabled: false + } + }, + FeedbackParams: { + FeedbackEnabled: false + } + } + }) + }); + + cfnMockedClient.on(DescribeStacksCommand).resolves({ + Stacks: [ + { + StackName: 'test-agent-builder', + StackId: 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid', + CreationTime: new Date(), + StackStatus: 'CREATE_COMPLETE', + Parameters: [ + { + ParameterKey: CfnParameterKeys.UseCaseConfigRecordKey, + ParameterValue: 'fake-id' + } + ], + Outputs: [ + { + OutputKey: 'WebConfigKey', + OutputValue: 'mock-webconfig-ssm-parameter-key' + }, + { + OutputKey: 'CloudFrontWebUrl', + OutputValue: 'mock-cloudfront-url' + } + ] + } + ] + }); + + const result = await agentsLambdaHandler( + getSingleAgentBuilderUseCaseApiEvent as unknown as APIGatewayEvent + ); + const responseBody = JSON.parse(result.body); + + expect(result.statusCode).toBe(200); + expect(responseBody.UseCaseName).toBe('test-agent-builder'); + expect(responseBody.UseCaseType).toBe('AgentBuilder'); + expect(responseBody.LlmParams.ModelProvider).toBe('Bedrock'); + expect(responseBody.AgentBuilderParams).toBeDefined(); + expect(responseBody.AgentBuilderParams.SystemPrompt).toBe('You are a helpful AI assistant.'); + expect(responseBody.AgentBuilderParams.MemoryConfig.LongTermEnabled).toBe(false); + }); + + it('should delete an agent builder stack', async () => { + // Reset mocks + ddbMockedClient.reset(); + cfnMockedClient.reset(); + + // Mock the USE_CASES_TABLE_NAME_ENV_VAR table for delete operation + ddbMockedClient.on(GetItemCommand).resolves({ + Item: marshall({ + UseCaseId: '11111111-2222-2222-3333-333344444444', + StackId: 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid', + UseCaseConfigRecordKey: '11111111-fake-id', + UseCaseName: 'test-agent-builder', + UseCaseType: 'AgentBuilder', + Description: 'test description', + CreatedBy: 'fake-user-id', + CreatedDate: '2024-07-22T20:31:00Z' + }) + }); + + // Mock CloudFormation describe stacks (for getting role ARN) + cfnMockedClient.on(DescribeStacksCommand).resolves({ + Stacks: [ + { + StackName: 'test-agent-builder', + StackId: 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid', + CreationTime: new Date(), + StackStatus: 'CREATE_COMPLETE', + RoleArn: 'arn:aws:iam::123456789012:role/fake-role' + } + ] + }); + + // Mock CloudFormation delete stack + cfnMockedClient.on(DeleteStackCommand).resolves({}); + + // Mock DynamoDB update operations for marking records for deletion + ddbMockedClient.on(UpdateItemCommand).resolves({}); + + expect(await agentsLambdaHandler(deleteAgentBuilderUseCaseApiEvent as unknown as APIGatewayEvent)).toEqual({ + 'body': 'SUCCESS', + 'headers': { + 'Access-Control-Allow-Credentials': true, + 'Access-Control-Allow-Headers': 'Origin,X-Requested-With,Content-Type,Accept', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + 'Access-Control-Allow-Origin': '*', + 'Content-Type': MCP_CONTENT_TYPES.JSON + }, + 'isBase64Encoded': false, + 'statusCode': 200 + }); + }); + + it('should permanently delete an agent builder stack', async () => { + // Reset mocks + ddbMockedClient.reset(); + cfnMockedClient.reset(); + + // Mock the USE_CASES_TABLE_NAME_ENV_VAR table for permanent delete operation + ddbMockedClient.on(GetItemCommand).resolves({ + Item: marshall({ + UseCaseId: '11111111-2222-2222-3333-333344444444', + StackId: 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid', + UseCaseConfigRecordKey: '11111111-fake-id', + UseCaseName: 'test-agent-builder', + UseCaseType: 'AgentBuilder', + Description: 'test description', + CreatedBy: 'fake-user-id', + CreatedDate: '2024-07-22T20:31:00Z' + }) + }); + + // Mock CloudFormation describe stacks (for getting role ARN) + cfnMockedClient.on(DescribeStacksCommand).resolves({ + Stacks: [ + { + StackName: 'test-agent-builder', + StackId: 'arn:aws:cloudformation:us-west-2:123456789012:stack/fake-stack-name/fake-uuid', + CreationTime: new Date(), + StackStatus: 'CREATE_COMPLETE', + RoleArn: 'arn:aws:iam::123456789012:role/fake-role' + } + ] + }); + + // Mock CloudFormation delete stack + cfnMockedClient.on(DeleteStackCommand).resolves({}); + + // Mock DynamoDB delete operations for permanent deletion + ddbMockedClient.on(DeleteItemCommand).resolves({}); + + expect( + await agentsLambdaHandler(permanentlyDeleteAgentBuilderUseCaseApiEvent as unknown as APIGatewayEvent) + ).toEqual({ + 'body': 'SUCCESS', + 'headers': { + 'Access-Control-Allow-Credentials': true, + 'Access-Control-Allow-Headers': 'Origin,X-Requested-With,Content-Type,Accept', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + 'Access-Control-Allow-Origin': '*', + 'Content-Type': MCP_CONTENT_TYPES.JSON + }, + 'isBase64Encoded': false, + 'statusCode': 200 + }); + }); + + it('should create an agent builder stack with ExistingRestApiId', async () => { + const eventWithRestApiId = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + ExistingRestApiId: 'api123' + }) + }; + + expect(await agentsLambdaHandler(eventWithRestApiId as unknown as APIGatewayEvent)).toEqual({ + 'body': 'SUCCESS', + 'headers': { + 'Access-Control-Allow-Credentials': true, + 'Access-Control-Allow-Headers': 'Origin,X-Requested-With,Content-Type,Accept', + 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', + 'Access-Control-Allow-Origin': '*', + 'Content-Type': MCP_CONTENT_TYPES.JSON + }, + 'isBase64Encoded': false, + 'statusCode': 200 + }); + + // Verify the API Gateway client was called correctly + expect(apiGatewayMockedClient.calls()).toHaveLength(1); + const getResourcesCall = apiGatewayMockedClient.calls()[0]; + expect(getResourcesCall.args[0].input).toEqual({ + restApiId: 'api123' + }); + }); + + it('should handle API Gateway errors gracefully', async () => { + // Mock API Gateway to throw an error + apiGatewayMockedClient.on(GetResourcesCommand).rejects(new Error('API Gateway Error')); + + const eventWithRestApiId = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + ExistingRestApiId: 'api123' + }) + }; + + expect(await agentsLambdaHandler(eventWithRestApiId as unknown as APIGatewayEvent)).toEqual({ + 'body': 'Internal Error - Please contact support and quote the following trace id: test-trace-id', + 'headers': { + 'Access-Control-Allow-Origin': '*', + 'Content-Type': MCP_CONTENT_TYPES.TEXT_PLAIN, + '_X_AMZN_TRACE_ID': 'test-trace-id', + 'x-amzn-ErrorType': 'CustomExecutionError' + }, + 'isBase64Encoded': false, + 'statusCode': '400' + }); + }); + + it('should handle missing root resource', async () => { + // Mock API Gateway to return no root resource + apiGatewayMockedClient.on(GetResourcesCommand).resolves({ + items: [ + { + id: 'abc123', + path: '/other' + } + ] + }); + + const eventWithRestApiId = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + ExistingRestApiId: 'api123' + }) + }; + + expect(await agentsLambdaHandler(eventWithRestApiId as unknown as APIGatewayEvent)).toEqual({ + 'body': 'Internal Error - Please contact support and quote the following trace id: test-trace-id', + 'headers': { + 'Access-Control-Allow-Origin': '*', + 'Content-Type': MCP_CONTENT_TYPES.TEXT_PLAIN, + '_X_AMZN_TRACE_ID': 'test-trace-id', + 'x-amzn-ErrorType': 'CustomExecutionError' + }, + 'isBase64Encoded': false, + 'statusCode': '400' + }); + }); + }); + + describe('on failure', () => { + it('should handle invalid HTTP method', async () => { + const invalidEvent = { + ...createAgentBuilderUseCaseApiEvent, + httpMethod: 'PUT', + resource: '/deployments/agents' + }; + + await expect(agentsLambdaHandler(invalidEvent as unknown as APIGatewayEvent)).rejects.toThrow( + 'Invalid HTTP method: PUT, at resource: /deployments/agents' + ); + }); + + it('should handle invalid resource path', async () => { + const invalidEvent = { + ...createAgentBuilderUseCaseApiEvent, + resource: '/invalid/path' + }; + + await expect(agentsLambdaHandler(invalidEvent as unknown as APIGatewayEvent)).rejects.toThrow( + 'Invalid HTTP method: POST, at resource: /invalid/path' + ); + }); + + it('should handle unsupported UseCaseType', async () => { + const eventWithUnsupportedType = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + UseCaseType: 'UnsupportedType' + }) + }; + + expect(await agentsLambdaHandler(eventWithUnsupportedType as unknown as APIGatewayEvent)).toEqual({ + 'body': 'Internal Error - Please contact support and quote the following trace id: test-trace-id', + 'headers': { + 'Access-Control-Allow-Origin': '*', + 'Content-Type': MCP_CONTENT_TYPES.TEXT_PLAIN, + '_X_AMZN_TRACE_ID': 'test-trace-id', + 'x-amzn-ErrorType': 'CustomExecutionError' + }, + 'isBase64Encoded': false, + 'statusCode': '400' + }); + }); + }); + + describe('on failure from missing env vars', () => { + it('Should fail to invoke lambda when multimodal environment variables are missing', async () => { + // Store original values + const originalFilesMetadata = process.env[FILES_METADATA_TABLE_NAME_ENV_VAR]; + const originalMultimodalBucket = process.env[MULTIMODAL_DATA_BUCKET_ENV_VAR]; + + try { + // Delete the multimodal env vars to test the specific failure + delete process.env[FILES_METADATA_TABLE_NAME_ENV_VAR]; + delete process.env[MULTIMODAL_DATA_BUCKET_ENV_VAR]; + + await expect( + agentsLambdaHandler(createAgentBuilderUseCaseApiEvent as unknown as APIGatewayEvent) + ).rejects.toThrow( + `Missing required environment variables: ${FILES_METADATA_TABLE_NAME_ENV_VAR}, ${MULTIMODAL_DATA_BUCKET_ENV_VAR}. This should not happen and indicates an issue with your deployment.` + ); + } finally { + if (originalFilesMetadata) process.env[FILES_METADATA_TABLE_NAME_ENV_VAR] = originalFilesMetadata; + if (originalMultimodalBucket) process.env[MULTIMODAL_DATA_BUCKET_ENV_VAR] = originalMultimodalBucket; + } + }); + + it('Should fail to invoke lambda when FILES_METADATA_TABLE_NAME_ENV_VAR environment variable is missing', async () => { + const originalFilesMetadata = process.env[FILES_METADATA_TABLE_NAME_ENV_VAR]; + + try { + delete process.env[FILES_METADATA_TABLE_NAME_ENV_VAR]; + + await expect( + agentsLambdaHandler(createAgentBuilderUseCaseApiEvent as unknown as APIGatewayEvent) + ).rejects.toThrow( + `Missing required environment variables: ${FILES_METADATA_TABLE_NAME_ENV_VAR}. This should not happen and indicates an issue with your deployment.` + ); + } finally { + if (originalFilesMetadata) process.env[FILES_METADATA_TABLE_NAME_ENV_VAR] = originalFilesMetadata; + } + }); + + it('Should fail to invoke lambda when MULTIMODAL_DATA_BUCKET environment variable is missing', async () => { + process.env[USER_POOL_ID_ENV_VAR] = 'fake-user-pool-id'; + process.env.AWS_SDK_USER_AGENT = `{ "customUserAgent": "AWSSOLUTION/SO0276/v2.1.0" }`; + process.env[AMAZON_TRACE_ID_HEADER] = 'test-trace-id'; + + const originalMultimodalBucket = process.env[MULTIMODAL_DATA_BUCKET_ENV_VAR]; + + try { + delete process.env[MULTIMODAL_DATA_BUCKET_ENV_VAR]; + + await expect( + agentsLambdaHandler(createAgentBuilderUseCaseApiEvent as unknown as APIGatewayEvent) + ).rejects.toThrow( + 'Missing required environment variables: MULTIMODAL_DATA_BUCKET. This should not happen and indicates an issue with your deployment.' + ); + } finally { + if (originalMultimodalBucket) process.env[MULTIMODAL_DATA_BUCKET_ENV_VAR] = originalMultimodalBucket; + } + }); + + it('Should return error response when USER_POOL_ID_ENV_VAR is missing', async () => { + const result = await agentsLambdaHandler(createAgentBuilderUseCaseApiEvent as unknown as APIGatewayEvent); + + expect(result.statusCode).toBe('400'); + expect(result.body).toBe( + 'Internal Error - Please contact support and quote the following trace id: test-trace-id' + ); + expect(result.headers['x-amzn-ErrorType']).toBe('CustomExecutionError'); + }); + }); + + afterAll(() => { + delete process.env.AWS_SDK_USER_AGENT; + delete process.env._X_AMZN_TRACE_ID; + delete process.env[POWERTOOLS_METRICS_NAMESPACE_ENV_VAR]; + delete process.env[USE_CASES_TABLE_NAME_ENV_VAR]; + delete process.env[ARTIFACT_BUCKET_ENV_VAR]; + delete process.env[COGNITO_POLICY_TABLE_ENV_VAR]; + delete process.env[USER_POOL_ID_ENV_VAR]; + delete process.env[TEMPLATE_FILE_EXTN_ENV_VAR]; + delete process.env[IS_INTERNAL_USER_ENV_VAR]; + delete process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]; + delete process.env[DEPLOYMENT_PLATFORM_STACK_NAME_ENV_VAR]; + delete process.env[GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR]; + delete process.env[SHARED_ECR_CACHE_PREFIX_ENV_VAR]; + + cfnMockedClient.restore(); + ddbMockedClient.restore(); + apiGatewayMockedClient.restore(); + }); +}); diff --git a/source/lambda/use-case-management/test/cfn/stack-operation-builder.test.ts b/source/lambda/use-case-management/test/cfn/stack-operation-builder.test.ts index f482744f..27dab748 100644 --- a/source/lambda/use-case-management/test/cfn/stack-operation-builder.test.ts +++ b/source/lambda/use-case-management/test/cfn/stack-operation-builder.test.ts @@ -32,8 +32,8 @@ import { USE_CASE_CONFIG_TABLE_NAME_ENV_VAR } from '../../utils/constants'; -import { AgentUseCaseDeploymentAdapter } from '../../model/agent-use-case-adapter'; -import { ChatUseCaseDeploymentAdapter, ChatUseCaseInfoAdapter } from '../../model/chat-use-case-adapter'; +import { AgentUseCaseDeploymentAdapter } from '../../model/adapters/agent-use-case-adapter'; +import { ChatUseCaseDeploymentAdapter, ChatUseCaseInfoAdapter } from '../../model/adapters/chat-use-case-adapter'; import { createAgentWithCognitoConfig, createUseCaseEvent, @@ -152,6 +152,7 @@ describe('When creating StackCommandBuilders', () => { { ParameterKey: CfnParameterKeys.DefaultUserEmail, ParameterValue: 'fake-email@example.com' }, { ParameterKey: CfnParameterKeys.DeployUI, ParameterValue: 'Yes' }, { ParameterKey: CfnParameterKeys.FeedbackEnabled, ParameterValue: 'Yes' }, + { ParameterKey: CfnParameterKeys.ProvisionedConcurrencyValue, ParameterValue: '0' }, { ParameterKey: CfnParameterKeys.UseCaseConfigRecordKey, ParameterValue: '11111111-11111111' @@ -214,6 +215,7 @@ describe('When creating StackCommandBuilders', () => { { ParameterKey: CfnParameterKeys.DefaultUserEmail, ParameterValue: 'fake-email@example.com' }, { ParameterKey: CfnParameterKeys.DeployUI, ParameterValue: 'Yes' }, { ParameterKey: CfnParameterKeys.FeedbackEnabled, ParameterValue: 'Yes' }, + { ParameterKey: CfnParameterKeys.ProvisionedConcurrencyValue, ParameterValue: '0' }, { ParameterKey: CfnParameterKeys.UseCaseConfigRecordKey, ParameterValue: '11111111-11111111' @@ -455,6 +457,7 @@ describe('When creating StackCommandBuilders', () => { { ParameterKey: CfnParameterKeys.DefaultUserEmail, ParameterValue: 'fake-email@example.com' }, { ParameterKey: CfnParameterKeys.DeployUI, ParameterValue: 'Yes' }, { ParameterKey: CfnParameterKeys.FeedbackEnabled, ParameterValue: 'Yes' }, + { ParameterKey: CfnParameterKeys.ProvisionedConcurrencyValue, ParameterValue: '0' }, { ParameterKey: CfnParameterKeys.UseCaseConfigRecordKey, ParameterValue: '11111111-11111111' diff --git a/source/lambda/use-case-management/test/ddb/builder.test.ts b/source/lambda/use-case-management/test/ddb/builder.test.ts index 9e9164e3..9f4220cc 100644 --- a/source/lambda/use-case-management/test/ddb/builder.test.ts +++ b/source/lambda/use-case-management/test/ddb/builder.test.ts @@ -30,7 +30,7 @@ import { permanentlyDeleteUseCaseEvent, updateUseCaseEvent } from '../event-test-data'; -import { ChatUseCaseInfoAdapter } from '../../model/chat-use-case-adapter'; +import { ChatUseCaseInfoAdapter } from '../../model/adapters/chat-use-case-adapter'; describe('When creating StackCommandBuilders', () => { let createEvent: any; diff --git a/source/lambda/use-case-management/test/ddb/use-case-config-management.test.ts b/source/lambda/use-case-management/test/ddb/use-case-config-management.test.ts index c083df51..e7cbd924 100644 --- a/source/lambda/use-case-management/test/ddb/use-case-config-management.test.ts +++ b/source/lambda/use-case-management/test/ddb/use-case-config-management.test.ts @@ -5,7 +5,8 @@ import { DescribeTableCommandOutput, DynamoDBClient, GetItemCommand, - PutItemCommand + PutItemCommand, + UpdateItemCommand } from '@aws-sdk/client-dynamodb'; import { marshall } from '@aws-sdk/util-dynamodb'; import { mockClient } from 'aws-sdk-client-mock'; @@ -204,6 +205,32 @@ describe('When retrieving the use case config details from the config table', () }); }); + describe('When marking config for deletion', () => { + it('should set TTL on config record', async () => { + const cfnParameters = new Map(); + cfnParameters.set(CfnParameterKeys.UseCaseConfigRecordKey, '1111111-fake-key'); + const useCase = new UseCase( + '11111111-2222-2222', + 'fake-test', + 'Create a stack for test', + cfnParameters, + {}, + 'test-user', + 'fake-template-name', + 'Chat' + ); + + ddbMockedClient.on(UpdateItemCommand).resolves({}); + + await useCaseConfigManagement.markUseCaseConfigForDeletion(useCase); + + expect(ddbMockedClient).toHaveReceivedCommandWith(UpdateItemCommand, { + UpdateExpression: 'SET #TTL = :expiry_time', + ExpressionAttributeNames: { '#TTL': 'TTL' } + }); + }); + }); + afterAll(() => { delete process.env.AWS_SDK_USER_AGENT; delete process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]; diff --git a/source/lambda/use-case-management/test/ddb/use-case-config-operation-builder.test.ts b/source/lambda/use-case-management/test/ddb/use-case-config-operation-builder.test.ts index e1b73460..95b3b343 100644 --- a/source/lambda/use-case-management/test/ddb/use-case-config-operation-builder.test.ts +++ b/source/lambda/use-case-management/test/ddb/use-case-config-operation-builder.test.ts @@ -15,7 +15,6 @@ import { CHAT_PROVIDERS, CfnParameterKeys, DYNAMODB_TTL_ATTRIBUTE_NAME, - TTL_SECONDS, USE_CASE_CONFIG_TABLE_NAME_ENV_VAR } from '../../utils/constants'; import { createUseCaseEvent } from '../event-test-data'; diff --git a/source/lambda/use-case-management/test/event-test-data.ts b/source/lambda/use-case-management/test/event-test-data.ts index 771e24b8..d56a8e35 100644 --- a/source/lambda/use-case-management/test/event-test-data.ts +++ b/source/lambda/use-case-management/test/event-test-data.ts @@ -10,6 +10,7 @@ export const createAgentUseCaseEvent = { UseCaseDescription: 'fake-description', DefaultUserEmail: 'fake-email@example.com', DeployUI: false, + ProvisionedConcurrencyValue: 0, FeedbackParams: { FeedbackEnabled: true }, @@ -149,6 +150,7 @@ export const createUseCaseEvent = { UseCaseDescription: 'fake-description', DefaultUserEmail: 'fake-email@example.com', DeployUI: true, + ProvisionedConcurrencyValue: 0, FeedbackParams: { FeedbackEnabled: true }, @@ -658,3 +660,204 @@ export const getUseCaseApiEvent = { } } }; + +// Agent Builder test events +export const createAgentBuilderUseCaseEvent = { + body: { + UseCaseType: 'AgentBuilder', + UseCaseName: 'fake-agent-builder', + UseCaseDescription: 'fake-description', + DefaultUserEmail: 'fake-email@example.com', + DeployUI: true, + FeedbackParams: { + FeedbackEnabled: true + }, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { ModelId: 'fake-model' }, + Temperature: 0.1, + RAGEnabled: false, + Streaming: true + }, + AgentParams: { + SystemPrompt: 'You are a helpful assistant', + MCPServers: [], + Tools: [], + MemoryConfig: { + LongTermEnabled: true + } + } + }, + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +export const createAgentBuilderUseCaseWithCognitoEvent = { + body: { + UseCaseType: 'AgentBuilder', + UseCaseName: 'fake-agent-builder', + UseCaseDescription: 'fake-description', + DefaultUserEmail: 'fake-email@example.com', + DeployUI: true, + FeedbackParams: { + FeedbackEnabled: true + }, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { ModelId: 'fake-model' }, + Temperature: 0.1, + RAGEnabled: false, + Streaming: true + }, + AgentParams: { + SystemPrompt: 'You are a helpful assistant', + MCPServers: [], + Tools: [], + MemoryConfig: { + LongTermEnabled: true + } + }, + AuthenticationParams: { + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, + CognitoParams: { + ExistingUserPoolId: 'fake-user-pool-id', + ExistingUserPoolClientId: 'fake-user-pool-client-id' + } + } + }, + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +export const createAgentBuilderUseCaseWithApiEvent = { + body: { + UseCaseType: 'AgentBuilder', + UseCaseName: 'fake-agent-builder', + UseCaseDescription: 'fake-description', + DefaultUserEmail: 'fake-email@example.com', + DeployUI: true, + ExistingRestApiId: 'fake-api-id', + ExistingApiRootResourceId: 'fake-root-resource-id', + FeedbackParams: { + FeedbackEnabled: true + }, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { ModelId: 'fake-model' }, + Temperature: 0.1, + RAGEnabled: false, + Streaming: true + }, + AgentParams: { + SystemPrompt: 'You are a helpful assistant', + MCPServers: [], + Tools: [], + MemoryConfig: { + LongTermEnabled: false + } + } + }, + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +export const createAgentBuilderUseCaseApiEvent = { + body: JSON.stringify(createAgentBuilderUseCaseEvent.body), + resource: '/deployments/agents', + httpMethod: 'POST', + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +export const createAgentBuilderUseCaseApiEventWithCognito = { + body: JSON.stringify(createAgentBuilderUseCaseWithCognitoEvent.body), + resource: '/deployments/agents', + httpMethod: 'POST', + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +export const createAgentBuilderUseCaseApiEventWithApi = { + body: JSON.stringify(createAgentBuilderUseCaseWithApiEvent.body), + resource: '/deployments/agents', + httpMethod: 'POST', + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +export const updateAgentBuilderUseCaseEvent = { + body: { + UseCaseType: 'AgentBuilder', + UseCaseName: 'updated-agent-builder', + UseCaseDescription: 'updated-description', + DefaultUserEmail: 'fake-email@example.com', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { ModelId: 'fake-model' }, + Temperature: 0.2, + RAGEnabled: false, + Streaming: true + }, + AgentParams: { + SystemPrompt: 'You are an updated helpful assistant', + MCPServers: [], + Tools: [], + MemoryConfig: { + LongTermEnabled: false + } + } + }, + pathParameters: { + useCaseId: '11111111-2222-2222-3333-333344444444' + }, + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +export const updateAgentBuilderUseCaseApiEvent = { + body: JSON.stringify(updateAgentBuilderUseCaseEvent.body), + resource: '/deployments/agents/{useCaseId}', + pathParameters: { + useCaseId: '11111111-2222-2222-3333-333344444444' + }, + httpMethod: 'PATCH', + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +export const getAgentBuilderUseCaseApiEvent = { + resource: '/deployments/agents', + httpMethod: 'GET', + queryStringParameters: { + pageNumber: '1' + }, + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; diff --git a/source/lambda/use-case-management/test/mcp-handler.test.ts b/source/lambda/use-case-management/test/mcp-handler.test.ts new file mode 100644 index 00000000..81dad466 --- /dev/null +++ b/source/lambda/use-case-management/test/mcp-handler.test.ts @@ -0,0 +1,274 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { APIGatewayEvent } from 'aws-lambda'; +import { mcpLambdaHandler, mcpHandler } from '../mcp-handler'; +import { handleLambdaError } from '../utils/utils'; +import { GATEWAY_TARGET_TYPES, MCP_CONTENT_TYPES } from '../utils/constants'; +import { createPresignedPost } from '@aws-sdk/s3-presigned-post'; + +jest.mock('../utils/utils', () => ({ + ...jest.requireActual('../utils/utils'), + checkEnv: jest.fn(), + handleLambdaError: jest.fn(), + extractUserId: jest.fn() +})); +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn().mockReturnValue({}) +})); +jest.mock('@aws-sdk/client-s3'); +jest.mock('@aws-sdk/s3-presigned-post'); +jest.mock('../power-tools-init', () => ({ + logger: { + info: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + warn: jest.fn(), + addContext: jest.fn(), + removeKeys: jest.fn(), + logEventIfEnabled: jest.fn() + }, + metrics: { + addMetric: jest.fn(), + publishStoredMetrics: jest.fn() + }, + tracer: { + getRootXrayTraceId: jest.fn().mockReturnValue('test-trace-id'), + captureMethod: () => () => {}, + captureAWSv3Client: jest.fn() + } +})); + +describe('MCP Handler', () => { + // Factory function for creating test events + const createMockEvent = (overrides: Partial = {}): APIGatewayEvent => ({ + httpMethod: 'POST', + resource: '/deployments/mcp/upload-schemas', + body: JSON.stringify({ + files: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'my-api-schema.json' + } + ] + }), + headers: { + 'Content-Type': MCP_CONTENT_TYPES.JSON + }, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/mcp/upload-schemas', + pathParameters: null, + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: { + authorizer: { + UserId: 'test-user-123' + }, + requestId: 'test-request-id', + stage: 'test' + } as any, + ...overrides + }); + + const mockEvent = createMockEvent(); + + beforeEach(() => { + jest.clearAllMocks(); + + process.env.POWERTOOLS_METRICS_NAMESPACE = 'test'; + process.env.USE_CASES_TABLE_NAME = 'test-table'; + process.env.USE_CASE_CONFIG_TABLE_NAME = 'test-config-table'; + process.env.GAAB_DEPLOYMENTS_BUCKET = 'test-deployments-bucket'; + process.env.AWS_SDK_USER_AGENT = JSON.stringify({ customUserAgent: 'test-agent' }); + process.env._X_AMZN_TRACE_ID = 'test-trace-id'; + + const { extractUserId } = require('../utils/utils'); + (extractUserId as jest.Mock).mockReturnValue('test-user-123'); + (handleLambdaError as jest.Mock).mockReturnValue({ + statusCode: 400, + headers: { 'Content-Type': MCP_CONTENT_TYPES.JSON }, + body: JSON.stringify({ message: 'Mocked error response' }) + }); + }); + + afterEach(() => { + delete process.env.POWERTOOLS_METRICS_NAMESPACE; + delete process.env.USE_CASES_TABLE_NAME; + delete process.env.USE_CASE_CONFIG_TABLE_NAME; + delete process.env.GAAB_DEPLOYMENTS_BUCKET; + delete process.env.REQUIRED_MCP_ENV_VARS; + delete process.env.AWS_SDK_USER_AGENT; + delete process.env._X_AMZN_TRACE_ID; + }); + + describe('Environment and Route Validation', () => { + it('should handle missing environment variables gracefully', async () => { + const originalBucketName = process.env.GAAB_DEPLOYMENTS_BUCKET; + delete process.env.GAAB_DEPLOYMENTS_BUCKET; + + try { + const result = await mcpLambdaHandler(mockEvent); + expect(handleLambdaError).toHaveBeenCalled(); + expect(result.statusCode).toBe(400); + } finally { + if (originalBucketName) { + process.env.GAAB_DEPLOYMENTS_BUCKET = originalBucketName; + } + } + }); + + it('should reject invalid HTTP method', async () => { + const invalidMethodEvent = createMockEvent({ + httpMethod: 'DELETE', + resource: '/deployments/mcp/upload-schemas' + }); + + const result = await mcpLambdaHandler(invalidMethodEvent); + + expect(handleLambdaError).toHaveBeenCalled(); + expect(result.statusCode).toBe(400); + }); + + it('should reject invalid resource path', async () => { + const invalidResourceEvent = createMockEvent({ resource: '/invalid/path' }); + + const result = await mcpLambdaHandler(invalidResourceEvent); + + expect(handleLambdaError).toHaveBeenCalled(); + expect(result.statusCode).toBe(400); + }); + }); + + describe('Handler Integration', () => { + const mockPresignedPostResponse = { + url: 'https://test-bucket.s3.amazonaws.com', + fields: { + key: 'mcp/my-use-case-123/openApi_schema_2024-01-15T10-30-45-123Z.json', + 'x-amz-meta-userid': 'test-user-123', + 'x-amz-meta-filename': 'my-api-schema.json', + 'x-amz-meta-fileextension': '.json', + 'Content-Type': MCP_CONTENT_TYPES.JSON, + 'tagging': 'schemaType=openApiSchema&uploadedBy=test-user-123&source=mcp-api&status=inactive' + } + }; + + beforeEach(() => { + (createPresignedPost as jest.Mock).mockResolvedValue(mockPresignedPostResponse); + }); + + it('should handle successful upload-schemas request end-to-end', async () => { + const result = await mcpLambdaHandler(mockEvent); + + expect(result.statusCode).toBe(200); + expect(result.headers).toHaveProperty('Content-Type', MCP_CONTENT_TYPES.JSON); + + const responseBody = JSON.parse(result.body); + expect(responseBody).toHaveProperty('uploads'); + expect(Array.isArray(responseBody.uploads)).toBe(true); + }); + + it('should handle command execution failure', async () => { + (createPresignedPost as jest.Mock).mockRejectedValue(new Error('S3 service unavailable')); + + const result = await mcpLambdaHandler(mockEvent); + + expect(handleLambdaError).toHaveBeenCalledWith( + expect.any(Error), + 'POST:/deployments/mcp/upload-schemas', + 'MCP' + ); + expect(result.statusCode).toBe(400); + }); + + it('should handle validation errors from downstream components', async () => { + const invalidEvent = createMockEvent({ + body: JSON.stringify({ + files: [{ schemaType: 'invalid-type', fileName: 'test.json' }] + }) + }); + + const result = await mcpLambdaHandler(invalidEvent); + + expect(handleLambdaError).toHaveBeenCalled(); + expect(result.statusCode).toBe(400); + }); + }); + + describe('LIST Operation', () => { + it('should handle LIST operation without adapter', async () => { + const mockStorageMgmt = { + getAllCaseRecords: jest.fn().mockResolvedValue({ + useCaseRecords: [] + }) + }; + + const { ListMCPServersCommand } = require('../model/commands/mcp-command'); + const originalExecute = ListMCPServersCommand.prototype.execute; + ListMCPServersCommand.prototype.execute = jest.fn().mockResolvedValue([]); + + const listEvent = createMockEvent({ + httpMethod: 'GET', + resource: '/deployments/mcp', + body: null, + queryStringParameters: { pageNumber: '1' } + }); + + const result = await mcpLambdaHandler(listEvent); + + expect(result.statusCode).toBe(200); + expect(result.headers).toHaveProperty('Content-Type', MCP_CONTENT_TYPES.JSON); + + const responseBody = JSON.parse(result.body); + expect(Array.isArray(responseBody)).toBe(true); + + // Restore original execute + ListMCPServersCommand.prototype.execute = originalExecute; + }); + + it('should return MCP servers list when servers exist', async () => { + const mockMcpServers = [ + { + Name: 'WeatherAPI-MCP', + Description: 'MCP server for weather data', + mcpId: 'config-key-1' + } + ]; + + const { ListMCPServersCommand } = require('../model/commands/mcp-command'); + const originalExecute = ListMCPServersCommand.prototype.execute; + ListMCPServersCommand.prototype.execute = jest.fn().mockResolvedValue(mockMcpServers); + + const listEvent = createMockEvent({ + httpMethod: 'GET', + resource: '/deployments/mcp', + body: null, + queryStringParameters: { pageNumber: '1' } + }); + + const result = await mcpLambdaHandler(listEvent); + + expect(result.statusCode).toBe(200); + const responseBody = JSON.parse(result.body); + expect(responseBody).toEqual(mockMcpServers); + + ListMCPServersCommand.prototype.execute = originalExecute; + }); + }); + + describe('Middleware Integration', () => { + it('should export middy-wrapped handler', () => { + expect(mcpHandler).toBeDefined(); + expect(typeof mcpHandler).toBe('function'); + }); + + it('should have middleware configuration', () => { + // Verify the handler has middy middleware attached + expect(mcpHandler).toHaveProperty('use'); + expect(mcpHandler).toHaveProperty('before'); + expect(mcpHandler).toHaveProperty('after'); + expect(mcpHandler).toHaveProperty('onError'); + }); + }); +}); diff --git a/source/lambda/use-case-management/test/model/adapters/agent-builder-use-case-adapter.test.ts b/source/lambda/use-case-management/test/model/adapters/agent-builder-use-case-adapter.test.ts new file mode 100644 index 00000000..942341b2 --- /dev/null +++ b/source/lambda/use-case-management/test/model/adapters/agent-builder-use-case-adapter.test.ts @@ -0,0 +1,834 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { APIGatewayEvent } from 'aws-lambda'; +import { + AgentBuilderUseCaseDeploymentAdapter, + AgentBuilderUseCaseInfoAdapter +} from '../../../model/adapters/agent-builder-use-case-adapter'; +import { + UseCaseTypes, + IS_INTERNAL_USER_ENV_VAR, + STACK_DEPLOYMENT_SOURCE_USE_CASE, + AUTHENTICATION_PROVIDERS +} from '../../../utils/constants'; +import { + createAgentBuilderUseCaseApiEvent, + createAgentBuilderUseCaseApiEventWithCognito, + createAgentBuilderUseCaseApiEventWithApi +} from '../../event-test-data'; + +jest.mock('crypto', () => { + return { + ...jest.requireActual('crypto'), + randomUUID: jest.fn().mockReturnValue('11111111-2222-2222-3333-333344444444') + }; +}); + +describe('AgentBuilderUseCaseDeploymentAdapter', () => { + beforeEach(() => { + // Set required environment variables + process.env.USE_CASE_CONFIG_TABLE_NAME = 'test-config-table'; + process.env.COGNITO_POLICY_TABLE_NAME = 'test-cognito-table'; + process.env.USER_POOL_ID = 'test-user-pool'; + process.env.IS_INTERNAL_USER = 'false'; + process.env.SHARED_ECR_CACHE_PREFIX = 'test-ecr-prefix'; + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-multimodal-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-multimodal-bucket'; + }); + + afterEach(() => { + delete process.env.USE_CASE_CONFIG_TABLE_NAME; + delete process.env.COGNITO_POLICY_TABLE_NAME; + delete process.env.USER_POOL_ID; + delete process.env.IS_INTERNAL_USER; + delete process.env.SHARED_ECR_CACHE_PREFIX; + delete process.env.MULTIMODAL_METADATA_TABLE_NAME; + delete process.env.MULTIMODAL_DATA_BUCKET; + }); + + it('should be able to be constructed with event body', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter( + createAgentBuilderUseCaseApiEvent as any as APIGatewayEvent + ); + + expect(useCase.configuration).toEqual({ + UseCaseType: 'AgentBuilder', + UseCaseName: 'fake-agent-builder', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { ModelId: 'fake-model' }, + Temperature: 0.1, + RAGEnabled: false, + Streaming: true + }, + AgentBuilderParams: { + SystemPrompt: 'You are a helpful assistant', + MCPServers: [], + Tools: [], + MemoryConfig: { + LongTermEnabled: true + } + }, + FeedbackParams: { + FeedbackEnabled: true, + CustomMappings: {} + }, + IsInternalUser: 'true' + }); + }); + + it('should have the correct cfnParameters', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter( + createAgentBuilderUseCaseApiEvent as any as APIGatewayEvent + ); + + expect(useCase.cfnParameters!.get('DefaultUserEmail')).toBe('fake-email@example.com'); + expect(useCase.cfnParameters!.get('DeployUI')).toBe('Yes'); + expect(useCase.cfnParameters!.get('EnableLongTermMemory')).toBe('Yes'); + expect(useCase.cfnParameters!.get('SharedEcrCachePrefix')).toBe('test-ecr-prefix'); + expect(useCase.cfnParameters!.get('UseCaseUUID')).toBe('11111111-2222-2222-3333-333344444444'); + expect(useCase.cfnParameters!.get('FeedbackEnabled')).toBe('Yes'); + expect(useCase.cfnParameters!.get('StackDeploymentSource')).toEqual(STACK_DEPLOYMENT_SOURCE_USE_CASE); + expect(useCase.cfnParameters!.get('UseCaseConfigTableName')).toBe('test-config-table'); + expect(useCase.cfnParameters!.get('ExistingCognitoUserPoolId')).toBe('test-user-pool'); + expect(useCase.cfnParameters!.get('ComponentCognitoUserPoolId')).toBe('test-user-pool'); + expect(useCase.cfnParameters!.get('ExistingCognitoGroupPolicyTableName')).toBe('test-cognito-table'); + }); + + it('should generate correct template name', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter( + createAgentBuilderUseCaseApiEvent as any as APIGatewayEvent + ); + + expect(useCase.templateName).toBe('AgentBuilderStack'); + }); + + it('should handle memory config when LongTermEnabled is false', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithMemoryDisabled = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + AgentParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).AgentParams, + MemoryConfig: { + LongTermEnabled: false + } + } + }) + }; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithMemoryDisabled as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.get('EnableLongTermMemory')).toBe('No'); + }); + + it('should handle feedback disabled', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithFeedbackDisabled = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + FeedbackParams: { + FeedbackEnabled: false + } + }) + }; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithFeedbackDisabled as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.get('FeedbackEnabled')).toBe('No'); + expect(useCase.configuration.FeedbackParams?.FeedbackEnabled).toBe(false); + expect(useCase.configuration.FeedbackParams?.CustomMappings).toBeUndefined(); + }); + + it('should add ExistingApiRootResourceId to jsonBody when apiRootResourceId is provided', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const apiRootResourceId = 'test-root-resource-id'; + let useCase = new AgentBuilderUseCaseDeploymentAdapter( + createAgentBuilderUseCaseApiEvent as any as APIGatewayEvent, + apiRootResourceId + ); + + expect(useCase.configuration).toEqual({ + 'UseCaseType': 'AgentBuilder', + 'UseCaseName': 'fake-agent-builder', + 'IsInternalUser': 'true', + 'LlmParams': { + 'ModelProvider': 'Bedrock', + 'BedrockLlmParams': { 'ModelId': 'fake-model' }, + 'Temperature': 0.1, + 'RAGEnabled': false, + 'Streaming': true + }, + 'AgentBuilderParams': { + 'SystemPrompt': 'You are a helpful assistant', + 'MCPServers': [], + 'Tools': [], + 'MemoryConfig': { + 'LongTermEnabled': true + } + }, + 'FeedbackParams': { + 'FeedbackEnabled': true, + 'CustomMappings': {} + } + }); + }); + + it('should handle apiRootResourceId with existing API configuration', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const apiRootResourceId = 'test-root-resource-id'; + const eventWithExistingApi = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + ExistingRestApiId: 'test-api-id' + }) + }; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter( + eventWithExistingApi as any as APIGatewayEvent, + apiRootResourceId + ); + + expect(useCase.cfnParameters!.get('ExistingRestApiId')).toBe('test-api-id'); + expect(useCase.cfnParameters!.get('ExistingApiRootResourceId')).toBe(apiRootResourceId); + }); + + it('should set API-related CFN parameters correctly when provided', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const apiRootResourceId = 'test-root-resource-id'; + const eventWithExistingApi = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + ExistingRestApiId: 'test-api-id', + AuthenticationParams: undefined // ensure no Cognito user pool to allow API params + }) + }; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter( + eventWithExistingApi as any as APIGatewayEvent, + apiRootResourceId + ); + + expect(useCase.cfnParameters!.get('ExistingRestApiId')).toBe('test-api-id'); + expect(useCase.cfnParameters!.get('ExistingApiRootResourceId')).toBe(apiRootResourceId); + }); + + it('should throw error when ModelProvider is missing', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithoutProvider = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + BedrockLlmParams: { ModelId: 'fake-model' }, + Temperature: 0.1 + } + }) + }; + + expect(() => new AgentBuilderUseCaseDeploymentAdapter(eventWithoutProvider as any as APIGatewayEvent)).toThrow( + 'Model Provider name not found in event body' + ); + }); + + it('should handle prompt parameters', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithPromptParams = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).LlmParams, + PromptParams: { + PromptTemplate: 'You are a helpful assistant. {input}', + DisambiguationPromptTemplate: 'Please clarify: {input}' + } + } + }) + }; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithPromptParams as any as APIGatewayEvent); + + const config = useCase.configuration as import('../../../model/types').AgentBuilderUseCaseConfiguration; + expect(config.LlmParams?.PromptParams).toEqual({ + PromptTemplate: 'You are a helpful assistant. {input}', + DisambiguationPromptTemplate: 'Please clarify: {input}' + }); + }); + + it('should handle model parameters', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithModelParams = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).LlmParams, + ModelParams: { + max_tokens: 1000, + top_p: 0.9 + } + } + }) + }; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithModelParams as any as APIGatewayEvent); + + const config = useCase.configuration as import('../../../model/types').AgentBuilderUseCaseConfiguration; + expect(config.LlmParams?.ModelParams).toEqual({ + max_tokens: 1000, + top_p: 0.9 + }); + }); + + it('should handle verbose mode', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithVerbose = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).LlmParams, + Verbose: true + } + }) + }; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithVerbose as any as APIGatewayEvent); + + const config = useCase.configuration as import('../../../model/types').AgentBuilderUseCaseConfiguration; + expect(config.LlmParams?.Verbose).toBe(true); + }); + + it('should not set VPC parameters for Agent Builder', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + // Even if VPC params are provided, they should be ignored for Agent Builder + const eventWithVpcParams = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + VpcParams: { + VpcEnabled: true, + CreateNewVpc: true + } + }) + }; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithVpcParams as any as APIGatewayEvent); + + // VPC parameters should not be set for Agent Builder + expect(useCase.cfnParameters!.get('VpcEnabled')).toBeUndefined(); + expect(useCase.cfnParameters!.get('CreateNewVpc')).toBeUndefined(); + expect(useCase.cfnParameters!.get('ExistingVpcId')).toBeUndefined(); + expect(useCase.cfnParameters!.get('ExistingPrivateSubnetIds')).toBeUndefined(); + expect(useCase.cfnParameters!.get('ExistingSecurityGroupIds')).toBeUndefined(); + }); + + it('should set UseInferenceProfile to Yes when BedrockInferenceType is INFERENCE_PROFILE', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithInferenceProfile = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).LlmParams, + BedrockLlmParams: { + InferenceProfileId: 'fake-profile-id', + BedrockInferenceType: 'INFERENCE_PROFILE' + } + } + }) + }; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithInferenceProfile as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.get('UseInferenceProfile')).toBe('Yes'); + }); + + it('should handle MCP servers configuration', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithMCPServers = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + AgentParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).AgentParams, + MCPServers: [ + { + UseCaseId: 'test-mcp-runtime-id-1', + UseCaseName: 'test-mcp-runtime', + Url: 'https://example-bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test-runtime/invocations', + Type: 'runtime' + }, + { + UseCaseId: 'test-mcp-gateway-id-2', + UseCaseName: 'test-mcp-gateway', + Url: 'https://example-mcp-gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp', + Type: 'gateway' + } + ] + } + }) + }; + let useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithMCPServers as any as APIGatewayEvent); + + const config = useCase.configuration as import('../../../model/types').AgentBuilderUseCaseConfiguration; + expect(config.AgentBuilderParams?.MCPServers).toEqual([ + { + UseCaseId: 'test-mcp-runtime-id-1', + UseCaseName: 'test-mcp-runtime', + Url: 'https://example-bedrock-agentcore.us-east-1.amazonaws.com/runtimes/test-runtime/invocations', + Type: 'runtime' + }, + { + UseCaseId: 'test-mcp-gateway-id-2', + UseCaseName: 'test-mcp-gateway', + Url: 'https://example-mcp-gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp', + Type: 'gateway' + } + ]); + }); + + it('should set UseInferenceProfile to No when BedrockInferenceType is not INFERENCE_PROFILE', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithModelId = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).LlmParams, + BedrockLlmParams: { + ModelId: 'fake-model-id', + BedrockInferenceType: 'ON_DEMAND' + } + } + }) + }; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithModelId as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.get('UseInferenceProfile')).toBe('No'); + }); + + it('should handle empty MCP servers configuration', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithoutMCPServers = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + AgentParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).AgentParams, + MCPServers: undefined + } + }) + }; + let useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithoutMCPServers as any as APIGatewayEvent); + const config = useCase.configuration as import('../../../model/types').AgentBuilderUseCaseConfiguration; + expect(config.AgentBuilderParams?.MCPServers).toBeUndefined(); + }); + + it('should set UseInferenceProfile to No when BedrockInferenceType is not provided', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter( + createAgentBuilderUseCaseApiEvent as any as APIGatewayEvent + ); + + expect(useCase.cfnParameters!.get('UseInferenceProfile')).toBe('No'); + }); + + it('should return empty array for retained parameter keys', () => { + const eventWithoutMCPServers = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + AgentParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).AgentParams, + MCPServers: undefined + } + }) + }; + let useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithoutMCPServers as any as APIGatewayEvent); + expect(useCase.getRetainedParameterKeys()).toEqual([]); + }); + + it('should set ComponentCognitoUserPoolId parameter', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter( + createAgentBuilderUseCaseApiEvent as any as APIGatewayEvent + ); + + expect(useCase.cfnParameters!.get('ComponentCognitoUserPoolId')).toBe('test-user-pool'); + }); + + describe('multimodal environment variables', () => { + it('should set multimodal CFN parameters when multimodal is enabled', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-multimodal-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-multimodal-bucket'; + + // Create event with multimodal enabled + const eventWithMultimodal = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: true + } + } + }) + }; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithMultimodal as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.get('MultimodalEnabled')).toBe('Yes'); + expect(useCase.cfnParameters!.get('ExistingMultimodalDataMetadataTable')).toBe('test-multimodal-table'); + expect(useCase.cfnParameters!.get('ExistingMultimodalDataBucket')).toBe('test-multimodal-bucket'); + }); + + it('should set multimodal CFN parameters to disabled state when multimodal is disabled', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-multimodal-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-multimodal-bucket'; + + // Create event with multimodal disabled + const eventWithMultimodalDisabled = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: false + } + } + }) + }; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter( + eventWithMultimodalDisabled as any as APIGatewayEvent + ); + + expect(useCase.cfnParameters!.get('MultimodalEnabled')).toBe('No'); + expect(useCase.cfnParameters!.get('ExistingMultimodalDataMetadataTable')).toBe(''); + expect(useCase.cfnParameters!.get('ExistingMultimodalDataBucket')).toBe(''); + }); + + it('should not set multimodal CFN parameters when multimodal params are not provided', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-multimodal-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-multimodal-bucket'; + + // Create event without multimodal params + const eventWithoutMultimodal = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).LlmParams, + MultimodalParams: undefined + } + }) + }; + + let useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithoutMultimodal as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.has('MultimodalEnabled')).toBe(false); + expect(useCase.cfnParameters!.has('ExistingMultimodalDataMetadataTable')).toBe(false); + expect(useCase.cfnParameters!.has('ExistingMultimodalDataBucket')).toBe(false); + }); + + it('should support updating from multimodal enabled to disabled', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-multimodal-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-multimodal-bucket'; + + // First create with multimodal enabled + const eventWithMultimodalEnabled = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: true + } + } + }) + }; + + let enabledUseCase = new AgentBuilderUseCaseDeploymentAdapter( + eventWithMultimodalEnabled as any as APIGatewayEvent + ); + + expect(enabledUseCase.cfnParameters!.get('MultimodalEnabled')).toBe('Yes'); + expect(enabledUseCase.cfnParameters!.get('ExistingMultimodalDataMetadataTable')).toBe( + 'test-multimodal-table' + ); + expect(enabledUseCase.cfnParameters!.get('ExistingMultimodalDataBucket')).toBe('test-multimodal-bucket'); + + // Update to disabled + const eventWithMultimodalDisabled = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: false + } + } + }) + }; + + let disabledUseCase = new AgentBuilderUseCaseDeploymentAdapter( + eventWithMultimodalDisabled as any as APIGatewayEvent + ); + + expect(disabledUseCase.cfnParameters!.get('MultimodalEnabled')).toBe('No'); + expect(disabledUseCase.cfnParameters!.get('ExistingMultimodalDataMetadataTable')).toBe(''); + expect(disabledUseCase.cfnParameters!.get('ExistingMultimodalDataBucket')).toBe(''); + }); + + it('should support updating from multimodal disabled to enabled', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-multimodal-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-multimodal-bucket'; + + // First create with multimodal disabled + const eventWithMultimodalDisabled = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: false + } + } + }) + }; + + let disabledUseCase = new AgentBuilderUseCaseDeploymentAdapter( + eventWithMultimodalDisabled as any as APIGatewayEvent + ); + + expect(disabledUseCase.cfnParameters!.get('MultimodalEnabled')).toBe('No'); + expect(disabledUseCase.cfnParameters!.get('ExistingMultimodalDataMetadataTable')).toBe(''); + expect(disabledUseCase.cfnParameters!.get('ExistingMultimodalDataBucket')).toBe(''); + + // Update to enabled + const eventWithMultimodalEnabled = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: true + } + } + }) + }; + + let enabledUseCase = new AgentBuilderUseCaseDeploymentAdapter( + eventWithMultimodalEnabled as any as APIGatewayEvent + ); + + expect(enabledUseCase.cfnParameters!.get('MultimodalEnabled')).toBe('Yes'); + expect(enabledUseCase.cfnParameters!.get('ExistingMultimodalDataMetadataTable')).toBe( + 'test-multimodal-table' + ); + expect(enabledUseCase.cfnParameters!.get('ExistingMultimodalDataBucket')).toBe('test-multimodal-bucket'); + }); + }); +}); + +describe('Test AgentBuilderUseCaseWithCognitoUserPool', () => { + beforeEach(() => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env.USE_CASE_CONFIG_TABLE_NAME = 'test-config-table'; + process.env.COGNITO_POLICY_TABLE_NAME = 'test-cognito-table'; + process.env.USER_POOL_ID = 'test-user-pool'; + process.env.SHARED_ECR_CACHE_PREFIX = 'test-ecr-prefix'; + }); + + afterEach(() => { + delete process.env[IS_INTERNAL_USER_ENV_VAR]; + delete process.env.USE_CASE_CONFIG_TABLE_NAME; + delete process.env.COGNITO_POLICY_TABLE_NAME; + delete process.env.USER_POOL_ID; + delete process.env.SHARED_ECR_CACHE_PREFIX; + }); + + it('should set the cfn parameters for cognito config', () => { + let useCase = new AgentBuilderUseCaseDeploymentAdapter( + createAgentBuilderUseCaseApiEventWithCognito as any as APIGatewayEvent + ); + expect(useCase.cfnParameters!.get('ExistingCognitoUserPoolId')).toBe('fake-user-pool-id'); + expect(useCase.cfnParameters!.get('ComponentCognitoUserPoolId')).toBe('fake-user-pool-id'); + expect(useCase.cfnParameters!.get('ExistingCognitoUserPoolClient')).toBe('fake-user-pool-client-id'); + expect(useCase.cfnParameters!.get('ExistingRestApiId')).toBeUndefined(); + expect(useCase.cfnParameters!.get('ExistingApiRootResourceId')).toBeUndefined(); + }); + + it('should set the cfn parameters for api config', () => { + let useCase = new AgentBuilderUseCaseDeploymentAdapter( + createAgentBuilderUseCaseApiEventWithApi as any as APIGatewayEvent + ); + expect(useCase.cfnParameters!.get('ExistingRestApiId')).toBe('fake-api-id'); + expect(useCase.cfnParameters!.get('ExistingApiRootResourceId')).toBe('fake-root-resource-id'); + }); + + it('should throw error for unsupported authentication provider', () => { + const eventWithUnsupportedAuth = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + AuthenticationParams: { + AuthenticationProvider: 'UnsupportedProvider', + CognitoParams: { + ExistingUserPoolId: 'fake-user-pool-id' + } + } + }) + }; + + expect( + () => new AgentBuilderUseCaseDeploymentAdapter(eventWithUnsupportedAuth as any as APIGatewayEvent) + ).toThrow('Error: unsupported AuthenticationProvider: UnsupportedProvider.'); + }); + + it('should throw error when ExistingUserPoolId is missing for Cognito provider', () => { + const eventWithMissingUserPoolId = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + AuthenticationParams: { + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, + CognitoParams: { + ExistingUserPoolClientId: 'fake-user-pool-client-id' + } + } + }) + }; + + expect( + () => new AgentBuilderUseCaseDeploymentAdapter(eventWithMissingUserPoolId as any as APIGatewayEvent) + ).toThrow('Required field existingUserPoolId not provided for the "Cognito" AuthenticationProvider.'); + }); +}); + +describe('AgentBuilderUseCaseInfoAdapter', () => { + let mockEvent: APIGatewayEvent; + + beforeEach(() => { + mockEvent = { + pathParameters: { + useCaseId: 'test-use-case-id' + }, + requestContext: { + authorizer: { + UserId: 'test-user-id' + } + } as any + } as any as APIGatewayEvent; + }); + + it('should create AgentBuilderUseCaseInfoAdapter with correct properties', () => { + const adapter = new AgentBuilderUseCaseInfoAdapter(mockEvent); + + expect(adapter.useCaseType).toBe(UseCaseTypes.AGENT_BUILDER); + expect(adapter.useCaseId).toBe('test-use-case-id'); + expect(adapter.userId).toBe('test-user-id'); + expect(adapter.name).toBe(''); + expect(adapter.description).toBeUndefined(); + expect(adapter.providerName).toBe(''); + expect(adapter.configuration).toEqual({}); + }); + + it('should handle multimodal parameters correctly', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithMultimodal = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: true + } + } + }) + }; + + const useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithMultimodal as any as APIGatewayEvent); + const config = useCase.configuration as any; + + expect(config.LlmParams.MultimodalParams).toBeDefined(); + expect(config.LlmParams.MultimodalParams.MultimodalEnabled).toBe(true); + }); + + it('should handle multimodal parameters when disabled', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithMultimodal = { + ...createAgentBuilderUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createAgentBuilderUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: false + } + } + }) + }; + + const useCase = new AgentBuilderUseCaseDeploymentAdapter(eventWithMultimodal as any as APIGatewayEvent); + const config = useCase.configuration as any; + + expect(config.LlmParams.MultimodalParams).toBeDefined(); + expect(config.LlmParams.MultimodalParams.MultimodalEnabled).toBe(false); + }); + + it('should handle missing multimodal parameters', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const useCase = new AgentBuilderUseCaseDeploymentAdapter( + createAgentBuilderUseCaseApiEvent as any as APIGatewayEvent + ); + const config = useCase.configuration as any; + + expect(config.LlmParams.MultimodalParams).toBeUndefined(); + }); +}); diff --git a/source/lambda/use-case-management/test/model/agent-use-case-adapter.test.ts b/source/lambda/use-case-management/test/model/adapters/agent-use-case-adapter.test.ts similarity index 59% rename from source/lambda/use-case-management/test/model/agent-use-case-adapter.test.ts rename to source/lambda/use-case-management/test/model/adapters/agent-use-case-adapter.test.ts index 716bbf66..962c5d94 100644 --- a/source/lambda/use-case-management/test/model/agent-use-case-adapter.test.ts +++ b/source/lambda/use-case-management/test/model/adapters/agent-use-case-adapter.test.ts @@ -2,15 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 import { APIGatewayEvent } from 'aws-lambda'; -import { AgentUseCaseDeploymentAdapter } from '../../model/agent-use-case-adapter'; -import { IS_INTERNAL_USER_ENV_VAR, STACK_DEPLOYMENT_SOURCE_USE_CASE } from '../../utils/constants'; +import { AgentUseCaseDeploymentAdapter, AgentUseCaseInfoAdapter } from '../../../model/adapters/agent-use-case-adapter'; +import { IS_INTERNAL_USER_ENV_VAR, STACK_DEPLOYMENT_SOURCE_USE_CASE } from '../../../utils/constants'; import { createAgentUseCaseApiEvent, createAgentUseCaseApiEventWithCognitoConfigEvent, createAgentUseCaseApiEventWithoutCognitoWithApiConfigEvent, createAgentUseCaseWithExistingVpcApiEvent, createAgentUseCaseWithVpcApiEvent -} from '../event-test-data'; +} from '../../event-test-data'; jest.mock('crypto', () => { return { @@ -22,6 +22,16 @@ jest.mock('crypto', () => { describe('Test AgentUseCaseDeploymentAdapter', () => { beforeEach(() => { process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env.USE_CASE_CONFIG_TABLE_NAME = 'test-config-table'; + process.env.COGNITO_POLICY_TABLE_NAME = 'test-cognito-table'; + process.env.USER_POOL_ID = 'test-user-pool'; + }); + + afterEach(() => { + delete process.env[IS_INTERNAL_USER_ENV_VAR]; + delete process.env.USE_CASE_CONFIG_TABLE_NAME; + delete process.env.COGNITO_POLICY_TABLE_NAME; + delete process.env.USER_POOL_ID; }); it('should be able to be constructed with event body', () => { @@ -40,7 +50,8 @@ describe('Test AgentUseCaseDeploymentAdapter', () => { FeedbackEnabled: true, CustomMappings: {} }, - IsInternalUser: 'true' + IsInternalUser: 'true', + ProvisionedConcurrencyValue: 0 }); }); @@ -52,6 +63,7 @@ describe('Test AgentUseCaseDeploymentAdapter', () => { expect(useCase.cfnParameters!.get('BedrockAgentAliasId')).toBe('fake-alias-id'); expect(useCase.cfnParameters!.get('UseCaseUUID')).toBe('11111111-2222-2222-3333-333344444444'); expect(useCase.cfnParameters!.get('FeedbackEnabled')).toBe('Yes'); + expect(useCase.cfnParameters!.get('ProvisionedConcurrencyValue')).toBe('0'); expect(useCase.cfnParameters!.get('StackDeploymentSource')).toEqual(STACK_DEPLOYMENT_SOURCE_USE_CASE); }); @@ -91,11 +103,12 @@ describe('Test AgentUseCaseDeploymentAdapter', () => { 'AgentAliasId': 'fake-alias-id', 'EnableTrace': true } - }, + }, 'FeedbackParams': { 'FeedbackEnabled': true, 'CustomMappings': {} - } + }, + 'ProvisionedConcurrencyValue': 0 }); }); @@ -149,6 +162,16 @@ describe('Test AgentUseCaseDeploymentAdapter', () => { describe('Test AgentUseCaseWithCognitoUserPool', () => { beforeEach(() => { process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env.USE_CASE_CONFIG_TABLE_NAME = 'test-config-table'; + process.env.COGNITO_POLICY_TABLE_NAME = 'test-cognito-table'; + process.env.USER_POOL_ID = 'test-user-pool'; + }); + + afterEach(() => { + delete process.env[IS_INTERNAL_USER_ENV_VAR]; + delete process.env.USE_CASE_CONFIG_TABLE_NAME; + delete process.env.COGNITO_POLICY_TABLE_NAME; + delete process.env.USER_POOL_ID; }); it('should set the cfn parameters for cognito config', () => { @@ -168,4 +191,106 @@ describe('Test AgentUseCaseWithCognitoUserPool', () => { expect(useCase.cfnParameters!.get('ExistingRestApiId')).toBe('fake-api-id'); expect(useCase.cfnParameters!.get('ExistingApiRootResourceId')).toBe('fake-root-resource-id'); }); + + it('should throw error for unsupported authentication provider', () => { + const eventWithUnsupportedAuth = { + ...createAgentUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentUseCaseApiEvent.body), + AuthenticationParams: { + AuthenticationProvider: 'UnsupportedProvider', + CognitoParams: { + ExistingUserPoolId: 'fake-user-pool-id' + } + } + }) + }; + + expect(() => new AgentUseCaseDeploymentAdapter(eventWithUnsupportedAuth as any as APIGatewayEvent)).toThrow( + 'Error: unsupported AuthenticationProvider: UnsupportedProvider.' + ); + }); + + it('should throw error when ExistingUserPoolId is missing for Cognito provider', () => { + const eventWithMissingUserPoolId = { + ...createAgentUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentUseCaseApiEvent.body), + AuthenticationParams: { + AuthenticationProvider: 'Cognito', + CognitoParams: { + ExistingUserPoolClientId: 'fake-user-pool-client-id' + } + } + }) + }; + + expect(() => new AgentUseCaseDeploymentAdapter(eventWithMissingUserPoolId as any as APIGatewayEvent)).toThrow( + 'Required field existingUserPoolId not provided for the "Cognito" AuthenticationProvider.' + ); + }); + + it('should handle feedback disabled', () => { + const eventWithFeedbackDisabled = { + ...createAgentUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentUseCaseApiEvent.body), + FeedbackParams: { + FeedbackEnabled: false + } + }) + }; + + let useCase = new AgentUseCaseDeploymentAdapter(eventWithFeedbackDisabled as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.get('FeedbackEnabled')).toBe('No'); + expect(useCase.configuration.FeedbackParams?.FeedbackEnabled).toBe(false); + expect(useCase.configuration.FeedbackParams?.CustomMappings).toBeUndefined(); + }); + + it('should handle trace disabled', () => { + const eventWithTraceDisabled = { + ...createAgentUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createAgentUseCaseApiEvent.body), + AgentParams: { + BedrockAgentParams: { + AgentId: 'fake-agent-id', + AgentAliasId: 'fake-alias-id', + EnableTrace: false + } + } + }) + }; + + let useCase = new AgentUseCaseDeploymentAdapter(eventWithTraceDisabled as any as APIGatewayEvent); + + const config = useCase.configuration as import('../../../model/types').AgentUseCaseConfiguration; + expect(config.AgentParams?.BedrockAgentParams.EnableTrace).toBe(false); + }); +}); + +describe('Test AgentUseCaseInfoAdapter', () => { + it('should create AgentUseCaseInfoAdapter with correct properties', () => { + const mockEvent = { + pathParameters: { + useCaseId: 'test-use-case-id' + }, + requestContext: { + authorizer: { + UserId: 'test-user-id' + } + } as any + } as any as APIGatewayEvent; + + const adapter = new AgentUseCaseInfoAdapter(mockEvent); + + expect(adapter.useCaseType).toBe('Agent'); + expect(adapter.useCaseId).toBe('test-use-case-id'); + expect(adapter.userId).toBe('test-user-id'); + expect(adapter.name).toBe(''); + expect(adapter.description).toBeUndefined(); + expect(adapter.providerName).toBe(''); + expect(adapter.configuration).toEqual({}); + }); }); diff --git a/source/lambda/use-case-management/test/model/chat-use-case-adapter.test.ts b/source/lambda/use-case-management/test/model/adapters/chat-use-case-adapter.test.ts similarity index 96% rename from source/lambda/use-case-management/test/model/chat-use-case-adapter.test.ts rename to source/lambda/use-case-management/test/model/adapters/chat-use-case-adapter.test.ts index ec88367c..7a6d947e 100644 --- a/source/lambda/use-case-management/test/model/chat-use-case-adapter.test.ts +++ b/source/lambda/use-case-management/test/model/adapters/chat-use-case-adapter.test.ts @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 import { APIGatewayEvent } from 'aws-lambda'; -import { ChatUseCaseDeploymentAdapter } from '../../model/chat-use-case-adapter'; +import { ChatUseCaseDeploymentAdapter } from '../../../model/adapters/chat-use-case-adapter'; -import { createUseCaseApiEvent, createUseCaseApiEventBedrockKnowledgeBaseNoOverride } from '../event-test-data'; -import { CfnParameterKeys, STACK_DEPLOYMENT_SOURCE_USE_CASE } from '../../utils/constants'; +import { createUseCaseApiEvent, createUseCaseApiEventBedrockKnowledgeBaseNoOverride } from '../../event-test-data'; +import { CfnParameterKeys, STACK_DEPLOYMENT_SOURCE_USE_CASE } from '../../../utils/constants'; jest.mock('crypto', () => { return { @@ -43,7 +43,8 @@ describe('Test ChatUseCaseDeploymentAdapter', () => { 'Temperature': 0.1, 'RAGEnabled': true, 'Streaming': true - } + }, + 'ProvisionedConcurrencyValue': 0 }); }); @@ -86,6 +87,7 @@ describe('Test ChatUseCaseDeploymentAdapter', () => { const useCase = new ChatUseCaseDeploymentAdapter(createUseCaseApiEventClone as any as APIGatewayEvent); expect(useCase.getUseCaseConfigRecordKey()).toEqual(`${mockUUID}-11111111`); + expect(useCase.cfnParameters!.get('ProvisionedConcurrencyValue')).toBe('0'); expect(useCase.cfnParameters!.get('StackDeploymentSource')).toEqual(STACK_DEPLOYMENT_SOURCE_USE_CASE); }); @@ -179,7 +181,8 @@ describe('Test ChatUseCaseDeploymentAdapter', () => { 'Temperature': 0.1, 'RAGEnabled': true, 'Streaming': true - } + }, + 'ProvisionedConcurrencyValue': 0 }); }); diff --git a/source/lambda/use-case-management/test/model/adapters/mcp-adapter.test.ts b/source/lambda/use-case-management/test/model/adapters/mcp-adapter.test.ts new file mode 100644 index 00000000..c5b13b49 --- /dev/null +++ b/source/lambda/use-case-management/test/model/adapters/mcp-adapter.test.ts @@ -0,0 +1,343 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { APIGatewayEvent } from 'aws-lambda'; +import { UploadMCPTargetSchemaAdapter, McpAdapterFactory } from '../../../model/adapters/mcp-adapter'; +import { GetUseCaseAdapter } from '../../../model/get-use-case'; +import RequestValidationError from '../../../utils/error'; +import { GATEWAY_TARGET_TYPES, McpOperationTypes } from '../../../utils/constants'; + +jest.mock('../../../power-tools-init', () => ({ + logger: { + error: jest.fn(), + debug: jest.fn() + }, + tracer: { + getRootXrayTraceId: jest.fn().mockReturnValue('test-trace-id') + } +})); + +jest.mock('../../../utils/utils', () => ({ + parseEventBody: jest.fn(), + extractUserId: jest.fn(), + generateUUID: jest.fn().mockReturnValue('test-uuid-123') +})); + +describe('MCP Adapter', () => { + let mockParseEventBody: jest.Mock; + let mockExtractUserId: jest.Mock; + + const createMockEvent = (body: any): APIGatewayEvent => ({ + httpMethod: 'POST', + resource: '/upload-schemas', + body: JSON.stringify(body), + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/upload-schemas', + pathParameters: null, + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: { + authorizer: { + UserId: 'test-user-123' + } + } as any + }); + + beforeEach(() => { + jest.clearAllMocks(); + mockParseEventBody = require('../../../utils/utils').parseEventBody; + mockExtractUserId = require('../../../utils/utils').extractUserId; + mockExtractUserId.mockReturnValue('test-user-123'); + }); + + describe('UploadMCPTargetSchemaAdapter', () => { + describe('Valid inputs', () => { + it('should create adapter with required fields for single file', () => { + mockParseEventBody.mockReturnValue({ + files: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'my-api-schema.json' + } + ] + }); + + const event = createMockEvent({ + files: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'my-api-schema.json' + } + ] + }); + + const adapter = new UploadMCPTargetSchemaAdapter(event); + expect(adapter.userId).toBe('test-user-123'); + expect(adapter.rawFiles).toHaveLength(1); + expect(adapter.rawFiles[0].schemaType).toBe(GATEWAY_TARGET_TYPES.OPEN_API); + expect(adapter.rawFiles[0].fileName).toBe('my-api-schema.json'); + expect(adapter.files).toEqual([]); // Empty until validator processes + }); + + it('should create adapter with required fields for multiple files', () => { + mockParseEventBody.mockReturnValue({ + files: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'api-spec.yaml' + }, + { + schemaType: GATEWAY_TARGET_TYPES.LAMBDA, + fileName: 'lambda-schema.json' + } + ] + }); + + const event = createMockEvent({ + files: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'api-spec.yaml' + }, + { + schemaType: GATEWAY_TARGET_TYPES.LAMBDA, + fileName: 'lambda-schema.json' + } + ] + }); + + const adapter = new UploadMCPTargetSchemaAdapter(event); + expect(adapter.userId).toBe('test-user-123'); + expect(adapter.rawFiles).toHaveLength(2); + expect(adapter.rawFiles[0].schemaType).toBe(GATEWAY_TARGET_TYPES.OPEN_API); + expect(adapter.rawFiles[0].fileName).toBe('api-spec.yaml'); + expect(adapter.rawFiles[1].schemaType).toBe(GATEWAY_TARGET_TYPES.LAMBDA); + expect(adapter.rawFiles[1].fileName).toBe('lambda-schema.json'); + expect(adapter.files).toEqual([]); // Empty until validator processes + }); + }); + + describe('Validation errors (adapter level - basic existence checks)', () => { + it('should throw error for missing files array', () => { + mockParseEventBody.mockReturnValue({}); + + const event = createMockEvent({}); + + expect(() => new UploadMCPTargetSchemaAdapter(event)).toThrow(RequestValidationError); + expect(() => new UploadMCPTargetSchemaAdapter(event)).toThrow( + "'files' is a required field and must be a non-empty array" + ); + }); + + it('should throw error for empty files array', () => { + mockParseEventBody.mockReturnValue({ + files: [] + }); + + const event = createMockEvent({ + files: [] + }); + + expect(() => new UploadMCPTargetSchemaAdapter(event)).toThrow(RequestValidationError); + expect(() => new UploadMCPTargetSchemaAdapter(event)).toThrow( + "'files' is a required field and must be a non-empty array" + ); + }); + + it('should throw error for invalid files array', () => { + const invalidFilesArrays = [null, 'not-an-array', 123]; + + invalidFilesArrays.forEach((files) => { + mockParseEventBody.mockReturnValue({ + files + }); + + const event = createMockEvent({ + files + }); + + expect(() => new UploadMCPTargetSchemaAdapter(event)).toThrow(RequestValidationError); + expect(() => new UploadMCPTargetSchemaAdapter(event)).toThrow( + "'files' is a required field and must be a non-empty array" + ); + }); + }); + }); + }); + + describe('DeployMCPAdapter', () => { + describe('Valid inputs', () => { + it('dummy test', () => { + expect(true).toEqual(true); + }); + }); + }); + + describe('McpAdapterFactory', () => { + it('should create UploadMCPTargetSchemaAdapter for UPLOAD_SCHEMA operation', () => { + mockParseEventBody.mockReturnValue({ + files: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'test.json' + } + ] + }); + + const event = createMockEvent({ + files: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'test.json' + } + ] + }); + + const adapter = McpAdapterFactory.createAdapter(event, McpOperationTypes.UPLOAD_SCHEMA); + expect(adapter).toBeInstanceOf(UploadMCPTargetSchemaAdapter); + }); + + it('should create MCPUseCaseAdapter for CREATE operation', () => { + mockParseEventBody.mockReturnValue({ + UseCaseName: 'test-mcp', + ConversationMemoryParams: {}, + LlmParams: {} + }); + + const event = createMockEvent({ + UseCaseName: 'test-mcp', + ConversationMemoryParams: {}, + LlmParams: {} + }); + + const adapter = McpAdapterFactory.createAdapter(event, McpOperationTypes.CREATE); + expect(adapter).toBeDefined(); + }); + + it('should create MCPUseCaseAdapter for UPDATE operation', () => { + mockParseEventBody.mockReturnValue({ + UseCaseName: 'test-mcp', + ConversationMemoryParams: {}, + LlmParams: {} + }); + + const event = createMockEvent({ + UseCaseName: 'test-mcp', + ConversationMemoryParams: {}, + LlmParams: {} + }); + + const adapter = McpAdapterFactory.createAdapter(event, McpOperationTypes.UPDATE); + expect(adapter).toBeDefined(); + }); + + it('should create MCPInfoAdapter for DELETE operation', () => { + const event: APIGatewayEvent = { + httpMethod: 'DELETE', + resource: '/deployments/mcp/{useCaseId}', + body: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/mcp/test-id', + pathParameters: { useCaseId: 'test-id' }, + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: { + authorizer: { + UserId: 'test-user-123' + } + } as any + }; + + const adapter = McpAdapterFactory.createAdapter(event, McpOperationTypes.DELETE); + expect(adapter).toBeDefined(); + }); + + it('should create MCPInfoAdapter for PERMANENTLY_DELETE operation', () => { + const event: APIGatewayEvent = { + httpMethod: 'DELETE', + resource: '/deployments/mcp/{useCaseId}', + body: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/mcp/test-id', + pathParameters: { useCaseId: 'test-id' }, + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: { + authorizer: { + UserId: 'test-user-123' + } + } as any + }; + + const adapter = McpAdapterFactory.createAdapter(event, McpOperationTypes.PERMANENTLY_DELETE); + expect(adapter).toBeDefined(); + }); + + it('should create GetUseCaseAdapter for GET operation', () => { + const event: APIGatewayEvent = { + httpMethod: 'GET', + resource: '/deployments/mcp/{useCaseId}', + body: null, + headers: { + Authorization: 'Bearer test-token' + }, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/mcp/test-id', + pathParameters: { useCaseId: 'test-id' }, + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: { + authorizer: { + UserId: 'test-user-123' + } + } as any + }; + + const adapter = McpAdapterFactory.createAdapter(event, McpOperationTypes.GET); + expect(adapter).toBeInstanceOf(GetUseCaseAdapter); + }); + + it('should create ListMCPAdapter for LIST operation', () => { + const event: APIGatewayEvent = { + httpMethod: 'GET', + resource: '/deployments/mcp', + body: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/mcp', + pathParameters: null, + queryStringParameters: { pageNumber: '1' }, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: { + authorizer: { + UserId: 'test-user-123' + } + } as any + }; + + const adapter = McpAdapterFactory.createAdapter(event, McpOperationTypes.LIST); + expect(adapter).toBeDefined(); + expect((adapter as any).event).toEqual(event); + }); + + it('should throw error for unsupported operation', () => { + const event = createMockEvent({}); + expect(() => McpAdapterFactory.createAdapter(event, 'unsupported')).toThrow( + 'Unsupported MCP operation: unsupported' + ); + }); + }); +}); diff --git a/source/lambda/use-case-management/test/model/adapters/workflow-use-case-adapter.test.ts b/source/lambda/use-case-management/test/model/adapters/workflow-use-case-adapter.test.ts new file mode 100644 index 00000000..33f6d65b --- /dev/null +++ b/source/lambda/use-case-management/test/model/adapters/workflow-use-case-adapter.test.ts @@ -0,0 +1,876 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { APIGatewayEvent } from 'aws-lambda'; +import { + WorkflowUseCaseDeploymentAdapter, + WorkflowUseCaseInfoAdapter +} from '../../../model/adapters/workflow-use-case-adapter'; +import { + UseCaseTypes, + IS_INTERNAL_USER_ENV_VAR, + STACK_DEPLOYMENT_SOURCE_USE_CASE, + AUTHENTICATION_PROVIDERS +} from '../../../utils/constants'; + +jest.mock('crypto', () => { + return { + ...jest.requireActual('crypto'), + randomUUID: jest.fn().mockReturnValue('11111111-2222-2222-3333-333344444444') + }; +}); + +const createWorkflowUseCaseApiEvent = { + body: JSON.stringify({ + UseCaseType: 'Workflow', + UseCaseName: 'fake-workflow', + UseCaseDescription: 'fake-workflow-description', + DefaultUserEmail: 'fake-email@example.com', + DeployUI: true, + FeedbackParams: { + FeedbackEnabled: true + }, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'fake-model', + BedrockInferenceType: 'ON_DEMAND' + }, + Temperature: 0.1, + RAGEnabled: false, + Streaming: true + }, + WorkflowParams: { + OrchestrationPattern: 'agents-as-tools', + SystemPrompt: 'You are a helpful workflow assistant', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-1', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent One', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are Agent One - First agent' + } + }, + { + UseCaseId: 'agent-2', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent Two', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are Agent Two - Second agent' + } + } + ] + } + } + }), + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +const createWorkflowUseCaseApiEventWithCognito = { + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + AuthenticationParams: { + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, + CognitoParams: { + ExistingUserPoolId: 'fake-user-pool-id', + ExistingUserPoolClientId: 'fake-user-pool-client-id' + } + } + }), + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +const createWorkflowUseCaseApiEventWithApi = { + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + ExistingRestApiId: 'fake-api-id', + ExistingApiRootResourceId: 'fake-root-resource-id' + }), + requestContext: { + authorizer: { + UserId: 'fake-user-id' + } + } +}; + +describe('WorkflowUseCaseDeploymentAdapter', () => { + beforeEach(() => { + process.env.USE_CASE_CONFIG_TABLE_NAME = 'test-config-table'; + process.env.COGNITO_POLICY_TABLE_NAME = 'test-cognito-table'; + process.env.USER_POOL_ID = 'test-user-pool'; + process.env.IS_INTERNAL_USER = 'false'; + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-multimodal-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-multimodal-bucket'; + }); + + afterEach(() => { + delete process.env.USE_CASE_CONFIG_TABLE_NAME; + delete process.env.COGNITO_POLICY_TABLE_NAME; + delete process.env.USER_POOL_ID; + delete process.env.IS_INTERNAL_USER; + delete process.env.MULTIMODAL_METADATA_TABLE_NAME; + delete process.env.MULTIMODAL_DATA_BUCKET; + }); + + it('should be able to be constructed with event body', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + let useCase = new WorkflowUseCaseDeploymentAdapter(createWorkflowUseCaseApiEvent as any as APIGatewayEvent); + + expect(useCase.configuration).toEqual({ + UseCaseType: 'Workflow', + UseCaseName: 'fake-workflow', + UseCaseDescription: 'fake-workflow-description', + AuthenticationParams: undefined, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'fake-model', + BedrockInferenceType: 'ON_DEMAND' + }, + Temperature: 0.1, + RAGEnabled: false, + Streaming: true, + Verbose: undefined, + ModelParams: undefined, + PromptParams: undefined, + MultimodalParams: undefined, + SageMakerLlmParams: undefined + }, + WorkflowParams: { + OrchestrationPattern: 'agents-as-tools', + SystemPrompt: 'You are a helpful workflow assistant', + MemoryConfig: undefined, + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-1', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent One', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are Agent One - First agent' + } + }, + { + UseCaseId: 'agent-2', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent Two', + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + BedrockInferenceType: 'QUICK_START' + }, + Temperature: 0.7, + Streaming: true, + Verbose: false, + RAGEnabled: false + }, + AgentBuilderParams: { + SystemPrompt: 'You are Agent Two - Second agent' + } + } + ] + } + }, + FeedbackParams: { + FeedbackEnabled: true, + CustomMappings: {} + }, + IsInternalUser: 'true' + }); + }); + + it('should have the correct cfnParameters', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + let useCase = new WorkflowUseCaseDeploymentAdapter(createWorkflowUseCaseApiEvent as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.get('DefaultUserEmail')).toBe('fake-email@example.com'); + expect(useCase.cfnParameters!.get('DeployUI')).toBe('Yes'); + expect(useCase.cfnParameters!.get('UseCaseUUID')).toBe('11111111-2222-2222-3333-333344444444'); + expect(useCase.cfnParameters!.get('FeedbackEnabled')).toBe('Yes'); + expect(useCase.cfnParameters!.get('StackDeploymentSource')).toEqual(STACK_DEPLOYMENT_SOURCE_USE_CASE); + expect(useCase.cfnParameters!.get('UseCaseConfigTableName')).toBe('test-config-table'); + expect(useCase.cfnParameters!.get('ExistingCognitoUserPoolId')).toBe('test-user-pool'); + expect(useCase.cfnParameters!.get('ComponentCognitoUserPoolId')).toBe('test-user-pool'); + expect(useCase.cfnParameters!.get('ExistingCognitoGroupPolicyTableName')).toBe('test-cognito-table'); + expect(useCase.cfnParameters!.get('UseInferenceProfile')).toBe('No'); + }); + + it('should generate correct template name', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + let useCase = new WorkflowUseCaseDeploymentAdapter(createWorkflowUseCaseApiEvent as any as APIGatewayEvent); + + expect(useCase.templateName).toBe('WorkflowStack'); + }); + + it('should return empty array for retained parameter keys', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + let useCase = new WorkflowUseCaseDeploymentAdapter(createWorkflowUseCaseApiEvent as any as APIGatewayEvent); + + expect(useCase.getRetainedParameterKeys()).toEqual([]); + }); + + it('should handle feedback disabled', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithFeedbackDisabled = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + FeedbackParams: { + FeedbackEnabled: false + } + }) + }; + + let useCase = new WorkflowUseCaseDeploymentAdapter(eventWithFeedbackDisabled as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.get('FeedbackEnabled')).toBe('No'); + expect(useCase.configuration.FeedbackParams?.FeedbackEnabled).toBe(false); + expect(useCase.configuration.FeedbackParams?.CustomMappings).toBeUndefined(); + }); + + it('should handle apiRootResourceId with existing API configuration', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const apiRootResourceId = 'test-root-resource-id'; + const eventWithExistingApi = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + ExistingRestApiId: 'test-api-id' + }) + }; + + let useCase = new WorkflowUseCaseDeploymentAdapter( + eventWithExistingApi as any as APIGatewayEvent, + apiRootResourceId + ); + + expect(useCase.cfnParameters!.get('ExistingRestApiId')).toBe('test-api-id'); + expect(useCase.cfnParameters!.get('ExistingApiRootResourceId')).toBe(apiRootResourceId); + }); + + it('should set API-related CFN parameters correctly when provided', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const apiRootResourceId = 'test-root-resource-id'; + const eventWithExistingApi = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + ExistingRestApiId: 'test-api-id', + AuthenticationParams: undefined // ensure no Cognito user pool to allow API params + }) + }; + + let useCase = new WorkflowUseCaseDeploymentAdapter( + eventWithExistingApi as any as APIGatewayEvent, + apiRootResourceId + ); + + expect(useCase.cfnParameters!.get('ExistingRestApiId')).toBe('test-api-id'); + expect(useCase.cfnParameters!.get('ExistingApiRootResourceId')).toBe(apiRootResourceId); + }); + + it('should throw error when ModelProvider is missing', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithoutProvider = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + BedrockLlmParams: { ModelId: 'fake-model' }, + Temperature: 0.1 + } + }) + }; + + expect(() => new WorkflowUseCaseDeploymentAdapter(eventWithoutProvider as any as APIGatewayEvent)).toThrow( + 'Model Provider name not found in event body' + ); + }); + + it('should handle prompt parameters', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithPromptParams = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createWorkflowUseCaseApiEvent.body).LlmParams, + PromptParams: { + PromptTemplate: 'You are a helpful workflow assistant. {input}', + DisambiguationPromptTemplate: 'Please clarify: {input}' + } + } + }) + }; + + let useCase = new WorkflowUseCaseDeploymentAdapter(eventWithPromptParams as any as APIGatewayEvent); + + const config = useCase.configuration as import('../../../model/types').WorkflowUseCaseConfiguration; + expect(config.LlmParams?.PromptParams).toEqual({ + PromptTemplate: 'You are a helpful workflow assistant. {input}', + DisambiguationPromptTemplate: 'Please clarify: {input}' + }); + }); + + it('should handle model parameters', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithModelParams = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createWorkflowUseCaseApiEvent.body).LlmParams, + ModelParams: { + max_tokens: 1000, + top_p: 0.9 + } + } + }) + }; + + let useCase = new WorkflowUseCaseDeploymentAdapter(eventWithModelParams as any as APIGatewayEvent); + + const config = useCase.configuration as import('../../../model/types').WorkflowUseCaseConfiguration; + expect(config.LlmParams?.ModelParams).toEqual({ + max_tokens: 1000, + top_p: 0.9 + }); + }); + + it('should handle verbose mode', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithVerbose = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createWorkflowUseCaseApiEvent.body).LlmParams, + Verbose: true + } + }) + }; + + let useCase = new WorkflowUseCaseDeploymentAdapter(eventWithVerbose as any as APIGatewayEvent); + + const config = useCase.configuration as import('../../../model/types').WorkflowUseCaseConfiguration; + expect(config.LlmParams?.Verbose).toBe(true); + }); + + it('should set UseInferenceProfile to Yes when BedrockInferenceType is INFERENCE_PROFILE', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithInferenceProfile = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createWorkflowUseCaseApiEvent.body).LlmParams, + BedrockLlmParams: { + InferenceProfileId: 'fake-profile-id', + BedrockInferenceType: 'INFERENCE_PROFILE' + } + } + }) + }; + + let useCase = new WorkflowUseCaseDeploymentAdapter(eventWithInferenceProfile as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.get('UseInferenceProfile')).toBe('Yes'); + }); + + it('should set UseInferenceProfile to No when BedrockInferenceType is not INFERENCE_PROFILE', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithModelId = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createWorkflowUseCaseApiEvent.body).LlmParams, + BedrockLlmParams: { + ModelId: 'fake-model-id', + BedrockInferenceType: 'ON_DEMAND' + } + } + }) + }; + + let useCase = new WorkflowUseCaseDeploymentAdapter(eventWithModelId as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.get('UseInferenceProfile')).toBe('No'); + }); + + it('should set UseInferenceProfile to No when BedrockInferenceType is not provided', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + let useCase = new WorkflowUseCaseDeploymentAdapter(createWorkflowUseCaseApiEvent as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.get('UseInferenceProfile')).toBe('No'); + }); + + it('should set ComponentCognitoUserPoolId parameter', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + let useCase = new WorkflowUseCaseDeploymentAdapter(createWorkflowUseCaseApiEvent as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.get('ComponentCognitoUserPoolId')).toBe('test-user-pool'); + }); + + describe('multimodal environment variables', () => { + it('should set multimodal CFN parameters when multimodal is enabled', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-multimodal-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-multimodal-bucket'; + + // Create event with multimodal enabled + const eventWithMultimodal = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createWorkflowUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: true + } + } + }) + }; + + let useCase = new WorkflowUseCaseDeploymentAdapter(eventWithMultimodal as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.get('MultimodalEnabled')).toBe('Yes'); + expect(useCase.cfnParameters!.get('ExistingMultimodalDataMetadataTable')).toBe('test-multimodal-table'); + expect(useCase.cfnParameters!.get('ExistingMultimodalDataBucket')).toBe('test-multimodal-bucket'); + }); + + it('should set multimodal CFN parameters to disabled state when multimodal is disabled', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-multimodal-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-multimodal-bucket'; + + // First create event with multimodal disabled + const eventWithMultimodalDisabled = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createWorkflowUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: false + } + } + }) + }; + + let useCase = new WorkflowUseCaseDeploymentAdapter(eventWithMultimodalDisabled as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.get('MultimodalEnabled')).toBe('No'); + expect(useCase.cfnParameters!.get('ExistingMultimodalDataMetadataTable')).toBe(''); + expect(useCase.cfnParameters!.get('ExistingMultimodalDataBucket')).toBe(''); + }); + + it('should not set multimodal CFN parameters when multimodal params are not provided', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-multimodal-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-multimodal-bucket'; + + // First create event without multimodal params + const eventWithoutMultimodal = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createWorkflowUseCaseApiEvent.body).LlmParams, + MultimodalParams: undefined + } + }) + }; + + let useCase = new WorkflowUseCaseDeploymentAdapter(eventWithoutMultimodal as any as APIGatewayEvent); + + expect(useCase.cfnParameters!.has('MultimodalEnabled')).toBe(false); + expect(useCase.cfnParameters!.has('ExistingMultimodalDataMetadataTable')).toBe(false); + expect(useCase.cfnParameters!.has('ExistingMultimodalDataBucket')).toBe(false); + }); + + it('should support updating from multimodal enabled to disabled', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-multimodal-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-multimodal-bucket'; + + // Create with multimodal enabled + const eventWithMultimodalEnabled = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createWorkflowUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: true + } + } + }) + }; + + let enabledUseCase = new WorkflowUseCaseDeploymentAdapter( + eventWithMultimodalEnabled as any as APIGatewayEvent + ); + + expect(enabledUseCase.cfnParameters!.get('MultimodalEnabled')).toBe('Yes'); + expect(enabledUseCase.cfnParameters!.get('ExistingMultimodalDataMetadataTable')).toBe( + 'test-multimodal-table' + ); + expect(enabledUseCase.cfnParameters!.get('ExistingMultimodalDataBucket')).toBe('test-multimodal-bucket'); + + // Update to disabled + const eventWithMultimodalDisabled = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createWorkflowUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: false + } + } + }) + }; + + let disabledUseCase = new WorkflowUseCaseDeploymentAdapter( + eventWithMultimodalDisabled as any as APIGatewayEvent + ); + + expect(disabledUseCase.cfnParameters!.get('MultimodalEnabled')).toBe('No'); + expect(disabledUseCase.cfnParameters!.get('ExistingMultimodalDataMetadataTable')).toBe(''); + expect(disabledUseCase.cfnParameters!.get('ExistingMultimodalDataBucket')).toBe(''); + }); + + it('should support updating from multimodal disabled to enabled', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env.MULTIMODAL_METADATA_TABLE_NAME = 'test-multimodal-table'; + process.env.MULTIMODAL_DATA_BUCKET = 'test-multimodal-bucket'; + + // Create with multimodal disabled + const eventWithMultimodalDisabled = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createWorkflowUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: false + } + } + }) + }; + + let disabledUseCase = new WorkflowUseCaseDeploymentAdapter( + eventWithMultimodalDisabled as any as APIGatewayEvent + ); + + expect(disabledUseCase.cfnParameters!.get('MultimodalEnabled')).toBe('No'); + expect(disabledUseCase.cfnParameters!.get('ExistingMultimodalDataMetadataTable')).toBe(''); + expect(disabledUseCase.cfnParameters!.get('ExistingMultimodalDataBucket')).toBe(''); + + // Update to enabled + const eventWithMultimodalEnabled = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createWorkflowUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: true + } + } + }) + }; + + let enabledUseCase = new WorkflowUseCaseDeploymentAdapter( + eventWithMultimodalEnabled as any as APIGatewayEvent + ); + + expect(enabledUseCase.cfnParameters!.get('MultimodalEnabled')).toBe('Yes'); + expect(enabledUseCase.cfnParameters!.get('ExistingMultimodalDataMetadataTable')).toBe( + 'test-multimodal-table' + ); + expect(enabledUseCase.cfnParameters!.get('ExistingMultimodalDataBucket')).toBe('test-multimodal-bucket'); + }); + + it('should handle multimodal parameters correctly in LlmParams', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithMultimodal = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createWorkflowUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: true + } + } + }) + }; + + const useCase = new WorkflowUseCaseDeploymentAdapter(eventWithMultimodal as any as APIGatewayEvent); + const config = useCase.configuration as any; + + expect(config.LlmParams.MultimodalParams).toBeDefined(); + expect(config.LlmParams.MultimodalParams.MultimodalEnabled).toBe(true); + }); + + it('should handle multimodal parameters when disabled in LlmParams', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const eventWithMultimodal = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + LlmParams: { + ...JSON.parse(createWorkflowUseCaseApiEvent.body).LlmParams, + MultimodalParams: { + MultimodalEnabled: false + } + } + }) + }; + + const useCase = new WorkflowUseCaseDeploymentAdapter(eventWithMultimodal as any as APIGatewayEvent); + const config = useCase.configuration as any; + + expect(config.LlmParams.MultimodalParams).toBeDefined(); + expect(config.LlmParams.MultimodalParams.MultimodalEnabled).toBe(false); + }); + + it('should handle missing multimodal parameters in LlmParams', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + const useCase = new WorkflowUseCaseDeploymentAdapter( + createWorkflowUseCaseApiEvent as any as APIGatewayEvent + ); + const config = useCase.configuration as any; + + expect(config.LlmParams.MultimodalParams).toBeUndefined(); + }); + }); + + describe('workflow-specific parameters', () => { + it('should handle workflow parameters correctly', () => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + + let useCase = new WorkflowUseCaseDeploymentAdapter(createWorkflowUseCaseApiEvent as any as APIGatewayEvent); + + const config = useCase.configuration as import('../../../model/types').WorkflowUseCaseConfiguration; + expect(config.WorkflowParams?.OrchestrationPattern).toBe('agents-as-tools'); + expect(config.WorkflowParams?.SystemPrompt).toBe('You are a helpful workflow assistant'); + expect(config.WorkflowParams?.AgentsAsToolsParams?.Agents).toEqual([ + { + UseCaseId: 'agent-1', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent One', + LlmParams: expect.objectContaining({ + ModelProvider: expect.any(String) + }), + AgentBuilderParams: expect.objectContaining({ + SystemPrompt: expect.any(String) + }) + }, + { + UseCaseId: 'agent-2', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Agent Two', + LlmParams: expect.objectContaining({ + ModelProvider: expect.any(String) + }), + AgentBuilderParams: expect.objectContaining({ + SystemPrompt: expect.any(String) + }) + } + ]); + }); + }); +}); + +describe('Test WorkflowUseCaseWithCognitoUserPool', () => { + beforeEach(() => { + process.env[IS_INTERNAL_USER_ENV_VAR] = 'true'; + process.env.USE_CASE_CONFIG_TABLE_NAME = 'test-config-table'; + process.env.COGNITO_POLICY_TABLE_NAME = 'test-cognito-table'; + process.env.USER_POOL_ID = 'test-user-pool'; + }); + + afterEach(() => { + delete process.env[IS_INTERNAL_USER_ENV_VAR]; + delete process.env.USE_CASE_CONFIG_TABLE_NAME; + delete process.env.COGNITO_POLICY_TABLE_NAME; + delete process.env.USER_POOL_ID; + }); + + it('should set the cfn parameters for cognito config', () => { + let useCase = new WorkflowUseCaseDeploymentAdapter( + createWorkflowUseCaseApiEventWithCognito as any as APIGatewayEvent + ); + expect(useCase.cfnParameters!.get('ExistingCognitoUserPoolId')).toBe('fake-user-pool-id'); + expect(useCase.cfnParameters!.get('ComponentCognitoUserPoolId')).toBe('fake-user-pool-id'); + expect(useCase.cfnParameters!.get('ExistingCognitoUserPoolClient')).toBe('fake-user-pool-client-id'); + expect(useCase.cfnParameters!.get('ExistingRestApiId')).toBeUndefined(); + expect(useCase.cfnParameters!.get('ExistingApiRootResourceId')).toBeUndefined(); + }); + + it('should set the cfn parameters for api config', () => { + let useCase = new WorkflowUseCaseDeploymentAdapter( + createWorkflowUseCaseApiEventWithApi as any as APIGatewayEvent + ); + expect(useCase.cfnParameters!.get('ExistingRestApiId')).toBe('fake-api-id'); + expect(useCase.cfnParameters!.get('ExistingApiRootResourceId')).toBe('fake-root-resource-id'); + }); + + it('should throw error for unsupported authentication provider', () => { + const eventWithUnsupportedAuth = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + AuthenticationParams: { + AuthenticationProvider: 'UnsupportedProvider', + CognitoParams: { + ExistingUserPoolId: 'fake-user-pool-id' + } + } + }) + }; + + expect(() => new WorkflowUseCaseDeploymentAdapter(eventWithUnsupportedAuth as any as APIGatewayEvent)).toThrow( + 'Error: unsupported AuthenticationProvider: UnsupportedProvider.' + ); + }); + + it('should throw error when ExistingUserPoolId is missing for Cognito provider', () => { + const eventWithMissingUserPoolId = { + ...createWorkflowUseCaseApiEvent, + body: JSON.stringify({ + ...JSON.parse(createWorkflowUseCaseApiEvent.body), + AuthenticationParams: { + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO, + CognitoParams: { + ExistingUserPoolClientId: 'fake-user-pool-client-id' + } + } + }) + }; + + expect( + () => new WorkflowUseCaseDeploymentAdapter(eventWithMissingUserPoolId as any as APIGatewayEvent) + ).toThrow('Required field existingUserPoolId not provided for the "Cognito" AuthenticationProvider.'); + }); +}); + +describe('WorkflowUseCaseInfoAdapter', () => { + let mockEvent: APIGatewayEvent; + + beforeEach(() => { + mockEvent = { + pathParameters: { + useCaseId: 'test-use-case-id' + }, + requestContext: { + authorizer: { + UserId: 'test-user-id' + } + } as any + } as any as APIGatewayEvent; + }); + + it('should create WorkflowUseCaseInfoAdapter with correct properties', () => { + const adapter = new WorkflowUseCaseInfoAdapter(mockEvent); + + expect(adapter.useCaseType).toBe(UseCaseTypes.WORKFLOW); + expect(adapter.useCaseId).toBe('test-use-case-id'); + expect(adapter.userId).toBe('test-user-id'); + expect(adapter.name).toBe(''); + expect(adapter.description).toBeUndefined(); + expect(adapter.providerName).toBe(''); + expect(adapter.configuration).toEqual({}); + }); + + it('should handle missing pathParameters gracefully', () => { + const eventWithoutPathParams = { + pathParameters: null, + requestContext: { + authorizer: { + UserId: 'test-user-id' + } + } as any + } as any as APIGatewayEvent; + + expect(() => new WorkflowUseCaseInfoAdapter(eventWithoutPathParams)).toThrow(); + }); + + it('should handle missing useCaseId in pathParameters', () => { + const eventWithoutUseCaseId = { + pathParameters: {}, + requestContext: { + authorizer: { + UserId: 'test-user-id' + } + } as any + } as any as APIGatewayEvent; + + expect(() => new WorkflowUseCaseInfoAdapter(eventWithoutUseCaseId)).toThrow(); + }); +}); diff --git a/source/lambda/use-case-management/test/model/command/agent-builder-command.test.ts b/source/lambda/use-case-management/test/model/command/agent-builder-command.test.ts new file mode 100644 index 00000000..00565480 --- /dev/null +++ b/source/lambda/use-case-management/test/model/command/agent-builder-command.test.ts @@ -0,0 +1,149 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { ListAgentBuilderCommand } from '../../../model/commands/agent-builder-command'; +import { ListUseCasesAdapter, UseCaseRecord } from '../../../model/list-use-cases'; +import { UseCaseTypes } from '../../../utils/constants'; + +jest.mock('../../../power-tools-init', () => ({ + tracer: { + captureMethod: () => () => {}, + captureAWSv3Client: jest.fn() + } +})); + +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn().mockReturnValue({}) +})); + +describe('ListAgentBuilderCommand', () => { + let command: ListAgentBuilderCommand; + let mockAdapter: jest.Mocked; + + beforeAll(() => { + process.env.AWS_SDK_USER_AGENT = JSON.stringify({ customUserAgent: 'test-agent' }); + process.env.USE_CASES_TABLE_NAME = 'test-table'; + process.env.USE_CASE_CONFIG_TABLE_NAME = 'test-config-table'; + }); + + beforeEach(() => { + command = new ListAgentBuilderCommand(); + mockAdapter = {} as jest.Mocked; + }); + + afterAll(() => { + delete process.env.AWS_SDK_USER_AGENT; + delete process.env.USE_CASES_TABLE_NAME; + delete process.env.USE_CASE_CONFIG_TABLE_NAME; + }); + + describe('filterUseCasesByType', () => { + it('should filter use cases to only include Agent Builder type', () => { + const useCaseRecords: UseCaseRecord[] = [ + { + UseCaseId: 'agent-1', + Name: 'Agent Builder 1', + UseCaseType: UseCaseTypes.AGENT_BUILDER, + Description: 'Test agent builder', + CreatedBy: 'user1', + CreatedDate: '2024-01-01', + StackId: 'stack-1', + UseCaseConfigRecordKey: 'key-1', + UseCaseConfigTableName: 'test-config-table' + }, + { + UseCaseId: 'chat-1', + Name: 'Chat 1', + UseCaseType: UseCaseTypes.CHAT, + Description: 'Test chat', + CreatedBy: 'user1', + CreatedDate: '2024-01-01', + StackId: 'stack-2', + UseCaseConfigRecordKey: 'key-2', + UseCaseConfigTableName: 'test-config-table' + }, + { + UseCaseId: 'agent-2', + Name: 'Agent Builder 2', + UseCaseType: UseCaseTypes.AGENT_BUILDER, + Description: 'Test agent builder 2', + CreatedBy: 'user1', + CreatedDate: '2024-01-01', + StackId: 'stack-3', + UseCaseConfigRecordKey: 'key-3', + UseCaseConfigTableName: 'test-config-table' + }, + { + UseCaseId: 'workflow-1', + Name: 'Workflow 1', + UseCaseType: UseCaseTypes.WORKFLOW, + Description: 'Test workflow', + CreatedBy: 'user1', + CreatedDate: '2024-01-01', + StackId: 'stack-4', + UseCaseConfigRecordKey: 'key-4', + UseCaseConfigTableName: 'test-config-table' + } + ]; + + const result = (command as any).filterUseCasesByType(useCaseRecords); + + expect(result).toHaveLength(2); + expect(result[0].UseCaseType).toBe(UseCaseTypes.AGENT_BUILDER); + expect(result[1].UseCaseType).toBe(UseCaseTypes.AGENT_BUILDER); + expect(result[0].UseCaseId).toBe('agent-1'); + expect(result[1].UseCaseId).toBe('agent-2'); + }); + + it('should return empty array when no Agent Builder use cases exist', () => { + const useCaseRecords: UseCaseRecord[] = [ + { + UseCaseId: 'chat-1', + Name: 'Chat 1', + UseCaseType: UseCaseTypes.CHAT, + Description: 'Test chat', + CreatedBy: 'user1', + CreatedDate: '2024-01-01', + StackId: 'stack-1', + UseCaseConfigRecordKey: 'key-1', + UseCaseConfigTableName: 'test-config-table' + }, + { + UseCaseId: 'workflow-1', + Name: 'Workflow 1', + UseCaseType: UseCaseTypes.WORKFLOW, + Description: 'Test workflow', + CreatedBy: 'user1', + CreatedDate: '2024-01-01', + StackId: 'stack-2', + UseCaseConfigRecordKey: 'key-2', + UseCaseConfigTableName: 'test-config-table' + } + ]; + + const result = (command as any).filterUseCasesByType(useCaseRecords); + + expect(result).toHaveLength(0); + }); + + it('should return empty array when input is empty', () => { + const useCaseRecords: UseCaseRecord[] = []; + + const result = (command as any).filterUseCasesByType(useCaseRecords); + + expect(result).toHaveLength(0); + }); + }); + + describe('execute', () => { + it('should call parent execute method', async () => { + const mockParentExecute = jest.fn().mockResolvedValue({ success: true }); + (command as any).constructor.prototype.__proto__.execute = mockParentExecute; + + const result = await command.execute(mockAdapter); + + expect(mockParentExecute).toHaveBeenCalledWith(mockAdapter); + expect(result).toEqual({ success: true }); + }); + }); +}); diff --git a/source/lambda/use-case-management/test/model/command/workflow-command.test.ts b/source/lambda/use-case-management/test/model/command/workflow-command.test.ts new file mode 100644 index 00000000..9a1de95a --- /dev/null +++ b/source/lambda/use-case-management/test/model/command/workflow-command.test.ts @@ -0,0 +1,149 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { ListWorkflowCommand } from '../../../model/commands/workflow-command'; +import { ListUseCasesAdapter, UseCaseRecord } from '../../../model/list-use-cases'; +import { UseCaseTypes } from '../../../utils/constants'; + +jest.mock('../../../power-tools-init', () => ({ + tracer: { + captureMethod: () => () => {}, + captureAWSv3Client: jest.fn() + } +})); + +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn().mockReturnValue({}) +})); + +describe('ListWorkflowCommand', () => { + let command: ListWorkflowCommand; + let mockAdapter: jest.Mocked; + + beforeAll(() => { + process.env.AWS_SDK_USER_AGENT = JSON.stringify({ customUserAgent: 'test-agent' }); + process.env.USE_CASES_TABLE_NAME = 'test-table'; + process.env.USE_CASE_CONFIG_TABLE_NAME = 'test-config-table'; + }); + + beforeEach(() => { + command = new ListWorkflowCommand(); + mockAdapter = {} as jest.Mocked; + }); + + afterAll(() => { + delete process.env.AWS_SDK_USER_AGENT; + delete process.env.USE_CASES_TABLE_NAME; + delete process.env.USE_CASE_CONFIG_TABLE_NAME; + }); + + describe('filterUseCasesByType', () => { + it('should filter use cases to only include Workflow type', () => { + const useCaseRecords: UseCaseRecord[] = [ + { + UseCaseId: 'workflow-1', + Name: 'Workflow 1', + UseCaseType: UseCaseTypes.WORKFLOW, + Description: 'Test workflow', + CreatedBy: 'user1', + CreatedDate: '2024-01-01', + StackId: 'stack-1', + UseCaseConfigRecordKey: 'key-1', + UseCaseConfigTableName: 'test-config-table' + }, + { + UseCaseId: 'chat-1', + Name: 'Chat 1', + UseCaseType: UseCaseTypes.CHAT, + Description: 'Test chat', + CreatedBy: 'user1', + CreatedDate: '2024-01-01', + StackId: 'stack-2', + UseCaseConfigRecordKey: 'key-2', + UseCaseConfigTableName: 'test-config-table' + }, + { + UseCaseId: 'workflow-2', + Name: 'Workflow 2', + UseCaseType: UseCaseTypes.WORKFLOW, + Description: 'Test workflow 2', + CreatedBy: 'user1', + CreatedDate: '2024-01-01', + StackId: 'stack-3', + UseCaseConfigRecordKey: 'key-3', + UseCaseConfigTableName: 'test-config-table' + }, + { + UseCaseId: 'agent-1', + Name: 'Agent Builder 1', + UseCaseType: UseCaseTypes.AGENT_BUILDER, + Description: 'Test agent builder', + CreatedBy: 'user1', + CreatedDate: '2024-01-01', + StackId: 'stack-4', + UseCaseConfigRecordKey: 'key-4', + UseCaseConfigTableName: 'test-config-table' + } + ]; + + const result = (command as any).filterUseCasesByType(useCaseRecords); + + expect(result).toHaveLength(2); + expect(result[0].UseCaseType).toBe(UseCaseTypes.WORKFLOW); + expect(result[1].UseCaseType).toBe(UseCaseTypes.WORKFLOW); + expect(result[0].UseCaseId).toBe('workflow-1'); + expect(result[1].UseCaseId).toBe('workflow-2'); + }); + + it('should return empty array when no Workflow use cases exist', () => { + const useCaseRecords: UseCaseRecord[] = [ + { + UseCaseId: 'chat-1', + Name: 'Chat 1', + UseCaseType: UseCaseTypes.CHAT, + Description: 'Test chat', + CreatedBy: 'user1', + CreatedDate: '2024-01-01', + StackId: 'stack-1', + UseCaseConfigRecordKey: 'key-1', + UseCaseConfigTableName: 'test-config-table' + }, + { + UseCaseId: 'agent-1', + Name: 'Agent Builder 1', + UseCaseType: UseCaseTypes.AGENT_BUILDER, + Description: 'Test agent builder', + CreatedBy: 'user1', + CreatedDate: '2024-01-01', + StackId: 'stack-2', + UseCaseConfigRecordKey: 'key-2', + UseCaseConfigTableName: 'test-config-table' + } + ]; + + const result = (command as any).filterUseCasesByType(useCaseRecords); + + expect(result).toHaveLength(0); + }); + + it('should return empty array when input is empty', () => { + const useCaseRecords: UseCaseRecord[] = []; + + const result = (command as any).filterUseCasesByType(useCaseRecords); + + expect(result).toHaveLength(0); + }); + }); + + describe('execute', () => { + it('should call parent execute method', async () => { + const mockParentExecute = jest.fn().mockResolvedValue({ success: true }); + (command as any).constructor.prototype.__proto__.execute = mockParentExecute; + + const result = await command.execute(mockAdapter); + + expect(mockParentExecute).toHaveBeenCalledWith(mockAdapter); + expect(result).toEqual({ success: true }); + }); + }); +}); diff --git a/source/lambda/use-case-management/test/model/commands/mcp-command.test.ts b/source/lambda/use-case-management/test/model/commands/mcp-command.test.ts new file mode 100644 index 00000000..d6682365 --- /dev/null +++ b/source/lambda/use-case-management/test/model/commands/mcp-command.test.ts @@ -0,0 +1,1929 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { S3Client } from '@aws-sdk/client-s3'; +import { SSMClient, GetParameterCommand } from '@aws-sdk/client-ssm'; +import { createPresignedPost } from '@aws-sdk/s3-presigned-post'; +import { APIGatewayEvent } from 'aws-lambda'; +import { UploadSchemasCommand, ListMCPServersCommand } from '../../../model/commands/mcp-command'; +import { UploadMCPTargetSchemaAdapter } from '../../../model/adapters/mcp-adapter'; +import { ListUseCasesAdapter } from '../../../model/list-use-cases'; +import { StorageManagement } from '../../../ddb/storage-management'; +import { UseCaseConfigManagement } from '../../../ddb/use-case-config-management'; +import { StackManagement } from '../../../cfn/stack-management'; +import { + GATEWAY_TARGET_TYPES, + MCP_SCHEMA_UPLOAD_CONSTRAINTS, + MCP_CONTENT_TYPES, + UseCaseTypes, + STRANDS_TOOLS_SSM_PARAM_ENV_VAR +} from '../../../utils/constants'; + +const { logger } = require('../../../power-tools-init'); + +jest.mock('@aws-sdk/client-s3'); +jest.mock('@aws-sdk/s3-presigned-post'); +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn().mockReturnValue({}) +})); + +jest.mock('../../../utils/utils', () => ({ + parseEventBody: jest.fn(), + extractUserId: jest.fn(), + generateUUID: jest.fn() +})); + +jest.mock('../../../power-tools-init', () => ({ + logger: { + info: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + warn: jest.fn() + }, + tracer: { + getRootXrayTraceId: jest.fn().mockReturnValue('test-trace-id'), + captureAWSv3Client: jest.fn(), + captureMethod: () => () => {} + } +})); + +describe('UploadSchemasCommand', () => { + let command: UploadSchemasCommand; + let mockS3Client: jest.Mocked; + let mockCreatePresignedPost: jest.MockedFunction; + let mockParseEventBody: jest.Mock; + let mockExtractUserId: jest.Mock; + let mockGenerateUUID: jest.Mock; + + const mockEvent: APIGatewayEvent = { + httpMethod: 'POST', + resource: '/upload-schema', + body: JSON.stringify({ + files: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'my-api-schema.json' + } + ] + }), + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/upload-schema', + pathParameters: null, + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: { + authorizer: { + UserId: 'test-user-123' + } + } as any + }; + + let mockUploadMCPTargetSchemaAdapter: UploadMCPTargetSchemaAdapter; + + beforeEach(() => { + jest.clearAllMocks(); + + process.env.GAAB_DEPLOYMENTS_BUCKET = 'test-mcp-schemas-bucket'; + process.env.AWS_SDK_USER_AGENT = JSON.stringify({ customUserAgent: 'test-agent' }); + process.env._X_AMZN_TRACE_ID = 'test-trace-id'; + + mockParseEventBody = require('../../../utils/utils').parseEventBody; + mockParseEventBody.mockReturnValue({ + files: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'my-api-schema.json' + } + ] + }); + + mockExtractUserId = jest.requireMock('../../../utils/utils').extractUserId; + mockExtractUserId.mockReturnValue('test-user-123'); + + mockGenerateUUID = jest.requireMock('../../../utils/utils').generateUUID; + mockGenerateUUID.mockReturnValue('test-uuid-123'); + + mockS3Client = new S3Client({}) as jest.Mocked; + (S3Client as jest.Mock).mockImplementation(() => mockS3Client); + + // Mock createPresignedPost + mockCreatePresignedPost = createPresignedPost as jest.MockedFunction; + mockCreatePresignedPost.mockResolvedValue({ + url: 'https://test-bucket.s3.amazonaws.com/', + fields: { + key: 'mcp/openApi_schema_2024-01-15T10-30-45-123Z.json', + 'x-amz-meta-userid': 'test-user-123', + 'x-amz-meta-filename': 'my-api-schema.json', + 'x-amz-meta-fileextension': '.json', + 'Content-Type': MCP_CONTENT_TYPES.JSON, + 'tagging': + 'schemaTypeopenApiSchemauploadedBytest-user-123sourcemcp-apistatusinactive' + } + }); + + command = new UploadSchemasCommand(); + mockUploadMCPTargetSchemaAdapter = new UploadMCPTargetSchemaAdapter(mockEvent); + }); + + afterEach(() => { + delete process.env.GAAB_DEPLOYMENTS_BUCKET; + delete process.env.AWS_SDK_USER_AGENT; + delete process.env._X_AMZN_TRACE_ID; + }); + + describe('UploadSchemasCommand backing the upload-schema endpoint', () => { + it('should successfully generate presigned POST for valid upload schema operation', async () => { + const result = await command.execute(mockUploadMCPTargetSchemaAdapter); + + expect(result).toEqual({ + uploads: [ + { + uploadUrl: 'https://test-bucket.s3.amazonaws.com/', + formFields: { + key: 'mcp/openApi_schema_2024-01-15T10-30-45-123Z.json', + 'x-amz-meta-userid': 'test-user-123', + 'x-amz-meta-filename': 'my-api-schema.json', + 'x-amz-meta-fileextension': '.json', + 'Content-Type': MCP_CONTENT_TYPES.JSON, + 'tagging': + 'schemaTypeopenApiSchemauploadedBytest-user-123sourcemcp-apistatusinactive' + }, + fileName: 'my-api-schema.json', + expiresIn: MCP_SCHEMA_UPLOAD_CONSTRAINTS.PRESIGNED_URL_EXPIRY_SECONDS, + createdAt: expect.any(String) + } + ] + }); + }); + + it('should throw error for invalid operation type', async () => { + const invalidOperation = {} as any; + + await expect(command.execute(invalidOperation)).rejects.toThrow( + 'UploadSchemasCommand only supports UploadMCPTargetSchemaAdapter operations' + ); + }); + + it('should handle S3 errors gracefully', async () => { + mockCreatePresignedPost.mockRejectedValue(new Error('S3 service error')); + + await expect(command.execute(mockUploadMCPTargetSchemaAdapter)).rejects.toThrow('S3 service error'); + }); + + it('should throw error for invalid file extension for schema type', async () => { + // Mock an adapter with invalid extension for lambda schema type + mockParseEventBody.mockReturnValue({ + files: [ + { + schemaType: GATEWAY_TARGET_TYPES.LAMBDA, + fileName: 'schema.yaml' // Invalid for lambda + } + ] + }); + + const invalidAdapter = new UploadMCPTargetSchemaAdapter(mockEvent); + + await expect(command.execute(invalidAdapter)).rejects.toThrow( + "Invalid files[0] file extension '.yaml' for file 'schema.yaml' with schema type 'lambda'" + ); + }); + }); + + describe('S3 integration', () => { + it('should generate presigned POST with proper S3 configuration and advanced validation', async () => { + await command.execute(mockUploadMCPTargetSchemaAdapter); + + expect(mockCreatePresignedPost).toHaveBeenCalledWith(mockS3Client, { + Bucket: 'test-mcp-schemas-bucket', + Key: 'mcp/schemas/openApiSchema/test-uuid-123.json', + Conditions: [ + ['starts-with', '$key', 'mcp/schemas/openApiSchema/'], + [ + 'content-length-range', + MCP_SCHEMA_UPLOAD_CONSTRAINTS.MIN_FILE_SIZE_BYTES, + MCP_SCHEMA_UPLOAD_CONSTRAINTS.MAX_FILE_SIZE_BYTES + ], + ['eq', '$x-amz-meta-userid', 'test-user-123'], + ['eq', '$x-amz-meta-filename', 'my-api-schema.json'], + ['eq', '$x-amz-meta-fileextension', '.json'], + ['eq', '$Content-Type', MCP_CONTENT_TYPES.JSON], + [ + 'eq', + '$tagging', + 'schemaTypeopenApiSchemauploadedBytest-user-123sourcemcp-apistatusinactive' + ] + ], + Fields: { + key: 'mcp/schemas/openApiSchema/test-uuid-123.json', + 'x-amz-meta-userid': 'test-user-123', + 'x-amz-meta-filename': 'my-api-schema.json', + 'x-amz-meta-fileextension': '.json', + 'Content-Type': MCP_CONTENT_TYPES.JSON, + 'tagging': + 'schemaTypeopenApiSchemauploadedBytest-user-123sourcemcp-apistatusinactive' + }, + Expires: MCP_SCHEMA_UPLOAD_CONSTRAINTS.PRESIGNED_URL_EXPIRY_SECONDS + }); + }); + }); + + describe('response format', () => { + it('should include upload constraints and enhanced metadata in response', async () => { + const result = await command.execute(mockUploadMCPTargetSchemaAdapter); + + expect(result).toEqual( + expect.objectContaining({ + uploads: expect.arrayContaining([ + expect.objectContaining({ + uploadUrl: expect.any(String), + formFields: expect.any(Object), + fileName: expect.any(String), + expiresIn: MCP_SCHEMA_UPLOAD_CONSTRAINTS.PRESIGNED_URL_EXPIRY_SECONDS, + createdAt: expect.any(String) + }) + ]) + }) + ); + }); + + it('should include proper form fields for POST upload', async () => { + const result = await command.execute(mockUploadMCPTargetSchemaAdapter); + + expect(result.uploads[0].formFields).toEqual( + expect.objectContaining({ + key: expect.any(String), + 'x-amz-meta-userid': 'test-user-123', + 'x-amz-meta-filename': 'my-api-schema.json', + 'x-amz-meta-fileextension': '.json', + 'Content-Type': MCP_CONTENT_TYPES.JSON, + 'tagging': expect.stringContaining('schemaTypeopenApiSchema') + }) + ); + }); + }); +}); + +describe('ListMCPServersCommand', () => { + let command: ListMCPServersCommand; + let mockStorageMgmt: any; + let mockUseCaseConfigMgmt: any; + let mockStackMgmt: any; + let mockSSMClient: any; + + beforeEach(() => { + jest.clearAllMocks(); + + mockStorageMgmt = { + getAllCaseRecords: jest.fn() + }; + + mockUseCaseConfigMgmt = { + getUseCaseConfigFromRecord: jest.fn() + }; + + mockStackMgmt = { + getStackDetailsFromUseCaseRecord: jest.fn() + }; + + mockSSMClient = { + send: jest.fn() + }; + + command = new ListMCPServersCommand(); + (command as any).storageMgmt = mockStorageMgmt; + (command as any).useCaseConfigMgmt = mockUseCaseConfigMgmt; + (command as any).stackMgmt = mockStackMgmt; + (command as any).ssmClient = mockSSMClient; + }); + + describe('Empty results handling', () => { + it('should return empty array when no use case records exist', async () => { + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: [] + }); + + const result = await command.execute({} as any); + + expect(result).toEqual({ mcpServers: [], strandsTools: [] }); + expect(mockStorageMgmt.getAllCaseRecords).toHaveBeenCalledTimes(1); + }); + + it('should return empty array when no MCP servers exist (only other use case types)', async () => { + const mockRecords = [ + { + UseCaseId: 'use-case-1', + Name: 'Chat Use Case', + UseCaseConfigRecordKey: 'config-key-1' + }, + { + UseCaseId: 'use-case-2', + Name: 'Agent Use Case', + UseCaseConfigRecordKey: 'config-key-2' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord + .mockResolvedValueOnce({ UseCaseType: 'Chat' }) + .mockResolvedValueOnce({ UseCaseType: 'AgentBuilder' }); + + const result = await command.execute({} as any); + + expect(result).toEqual({ mcpServers: [], strandsTools: [] }); + expect(mockUseCaseConfigMgmt.getUseCaseConfigFromRecord).toHaveBeenCalledTimes(2); + }); + + it('should log debug message when no MCP servers are found', async () => { + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: [] + }); + + await command.execute({} as any); + + expect(logger.debug).toHaveBeenCalledWith('No MCP servers found, returning empty array'); + }); + }); + + describe('Successful MCP server listing', () => { + it('should return formatted list of MCP servers with ACTIVE status', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'WeatherAPI-MCP', + Description: 'MCP server for weather data', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + }, + { + UseCaseId: 'mcp-2', + Name: 'DatabaseTools-MCP', + UseCaseConfigRecordKey: 'config-key-2', + StackId: 'stack-2' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'WeatherAPI-MCP', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway-1.example.com' + } + } + }) + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'DatabaseTools-MCP', + MCPParams: { + RuntimeParams: { + RuntimeUrl: 'https://runtime-2.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord + .mockResolvedValueOnce({ status: 'CREATE_COMPLETE' }) + .mockResolvedValueOnce({ status: 'UPDATE_COMPLETE' }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(2); + expect(result.mcpServers[0]).toEqual({ + useCaseId: 'mcp-1', + useCaseName: 'WeatherAPI-MCP', + description: '', + status: 'ACTIVE', + url: 'https://gateway-1.example.com', + type: 'gateway' + }); + expect(result.mcpServers[1]).toEqual({ + useCaseId: 'mcp-2', + useCaseName: 'DatabaseTools-MCP', + description: '', + status: 'ACTIVE', + url: 'https://runtime-2.example.com', + type: 'runtime' + }); + expect(result.strandsTools).toEqual([]); + }); + + it('should filter out non-MCP server use cases', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'WeatherAPI-MCP', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + }, + { + UseCaseId: 'chat-1', + Name: 'Chat Use Case', + UseCaseConfigRecordKey: 'config-key-2', + StackId: 'stack-2' + }, + { + UseCaseId: 'mcp-2', + Name: 'DatabaseTools-MCP', + UseCaseConfigRecordKey: 'config-key-3', + StackId: 'stack-3' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'WeatherAPI-MCP', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway-1.example.com' + } + } + }) + .mockResolvedValueOnce({ UseCaseType: 'Chat' }) + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'DatabaseTools-MCP', + MCPParams: { + RuntimeParams: { + RuntimeUrl: 'https://runtime-2.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord + .mockResolvedValueOnce({ status: 'CREATE_COMPLETE' }) + .mockResolvedValueOnce({ status: 'CREATE_COMPLETE' }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(2); + expect(result.mcpServers[0].useCaseId).toBe('mcp-1'); + expect(result.mcpServers[0].useCaseName).toBe('WeatherAPI-MCP'); + expect(result.mcpServers[1].useCaseId).toBe('mcp-2'); + expect(result.mcpServers[1].useCaseName).toBe('DatabaseTools-MCP'); + }); + + it('should return INACTIVE status for servers in CREATE_IN_PROGRESS state', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'Creating-MCP', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'Creating-MCP', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'CREATE_IN_PROGRESS' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers[0].status).toBe('INACTIVE'); + expect(result.mcpServers[0].useCaseName).toBe('Creating-MCP'); + }); + + it('should return INACTIVE status for servers in ROLLBACK state', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'Failed-MCP', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'Failed-MCP', + MCPParams: { + RuntimeParams: { + RuntimeUrl: 'https://runtime.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'ROLLBACK_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers[0].status).toBe('INACTIVE'); + expect(result.mcpServers[0].useCaseName).toBe('Failed-MCP'); + }); + + it('should handle stack status retrieval failure gracefully', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'MCP-With-Stack-Error', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'MCP-With-Stack-Error', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockRejectedValueOnce(new Error('Stack not found')); + + const result = await command.execute({} as any); + + expect(result.mcpServers[0].status).toBe('INACTIVE'); + expect(logger.warn).toHaveBeenCalledWith( + expect.stringContaining('Could not retrieve stack status for mcp-1') + ); + }); + + it('should use empty string as default when UseCaseName is missing', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'MCP-Without-Name', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + // UseCaseName is intentionally missing + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'CREATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(1); + expect(result.mcpServers[0]).toEqual({ + useCaseId: 'mcp-1', + useCaseName: '', // Should default to empty string + description: '', // Should default to empty string when UseCaseName is missing + status: 'ACTIVE', + url: 'https://gateway.example.com', + type: 'gateway' + }); + }); + }); + + describe('Error handling', () => { + it('should exclude records with missing UseCaseConfigRecordKey', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'Valid MCP', + UseCaseConfigRecordKey: 'config-key-1' + }, + { + UseCaseId: 'mcp-2', + Name: 'Invalid MCP', + UseCaseConfigRecordKey: undefined + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(1); + expect(logger.error).toHaveBeenCalledWith('UseCaseConfigRecordKey missing for record: mcp-2'); + }); + + it('should exclude records where config retrieval fails', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'Valid MCP', + UseCaseConfigRecordKey: 'config-key-1' + }, + { + UseCaseId: 'mcp-2', + Name: 'Failing MCP', + UseCaseConfigRecordKey: 'config-key-2' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'Valid MCP', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }) + .mockRejectedValueOnce(new Error('Config not found')); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(1); + expect(result.mcpServers[0].useCaseId).toBe('mcp-1'); + expect(result.mcpServers[0].useCaseName).toBe('Valid MCP'); + expect(logger.warn).toHaveBeenCalledWith( + expect.stringContaining('Error retrieving config for record mcp-2') + ); + }); + + it('should throw error when database read fails', async () => { + mockStorageMgmt.getAllCaseRecords.mockRejectedValue(new Error('Database error')); + + await expect(command.execute({} as any)).rejects.toThrow('Database error'); + }); + }); + + describe('SSM Parameter Store integration for Strands tools', () => { + beforeEach(() => { + // Setup default successful MCP server response + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: [] + }); + }); + + it('should successfully read and parse Strands tools from SSM parameter', async () => { + const mockTools = [ + { + name: 'Calculator', + description: 'Perform mathematical calculations', + value: 'calculator', + category: 'Math', + isDefault: true + }, + { + name: 'Current Time', + description: 'Get current date and time', + value: 'current_time', + category: 'Utilities', + isDefault: true + } + ]; + + process.env.STRANDS_TOOLS_SSM_PARAM = '/gaab/test-stack/strands-tools'; + + mockSSMClient.send.mockResolvedValue({ + Parameter: { + Value: JSON.stringify(mockTools) + } + }); + + const result = await command.execute({} as any); + + expect(result).toEqual({ + mcpServers: [], + strandsTools: mockTools + }); + expect(mockSSMClient.send).toHaveBeenCalledTimes(1); + }); + + it('should return empty tools array when STRANDS_TOOLS_SSM_PARAM environment variable is not set', async () => { + delete process.env.STRANDS_TOOLS_SSM_PARAM; + + const result = await command.execute({} as any); + + expect(result.strandsTools).toEqual([]); + expect(logger.warn).toHaveBeenCalledWith( + expect.stringContaining('STRANDS_TOOLS_SSM_PARAM environment variable not set') + ); + expect(mockSSMClient.send).not.toHaveBeenCalled(); + }); + + it('should handle ParameterNotFound error and return empty tools array', async () => { + process.env.STRANDS_TOOLS_SSM_PARAM = '/gaab/test-stack/strands-tools'; + + const parameterNotFoundError = new Error('Parameter not found'); + parameterNotFoundError.name = 'ParameterNotFound'; + mockSSMClient.send.mockRejectedValue(parameterNotFoundError); + + const result = await command.execute({} as any); + + expect(result.strandsTools).toEqual([]); + expect(logger.warn).toHaveBeenCalledWith( + expect.stringContaining('SSM parameter /gaab/test-stack/strands-tools not found') + ); + }); + + it('should handle AccessDeniedException error and return empty tools array', async () => { + process.env.STRANDS_TOOLS_SSM_PARAM = '/gaab/test-stack/strands-tools'; + + const accessDeniedError = new Error('Access denied'); + accessDeniedError.name = 'AccessDeniedException'; + mockSSMClient.send.mockRejectedValue(accessDeniedError); + + const result = await command.execute({} as any); + + expect(result.strandsTools).toEqual([]); + expect(logger.error).toHaveBeenCalledWith( + expect.stringContaining('Insufficient IAM permissions to read SSM parameter') + ); + }); + + it('should handle invalid JSON error and return empty tools array', async () => { + process.env.STRANDS_TOOLS_SSM_PARAM = '/gaab/test-stack/strands-tools'; + + mockSSMClient.send.mockResolvedValue({ + Parameter: { + Value: 'invalid json {' + } + }); + + const result = await command.execute({} as any); + + expect(result.strandsTools).toEqual([]); + expect(logger.error).toHaveBeenCalledWith(expect.stringContaining('Invalid JSON in SSM parameter')); + }); + + it('should return empty tools array when SSM parameter has no value', async () => { + process.env.STRANDS_TOOLS_SSM_PARAM = '/gaab/test-stack/strands-tools'; + + mockSSMClient.send.mockResolvedValue({ + Parameter: { + Value: undefined + } + }); + + const result = await command.execute({} as any); + + expect(result.strandsTools).toEqual([]); + expect(logger.warn).toHaveBeenCalledWith( + expect.stringContaining('SSM parameter /gaab/test-stack/strands-tools has no value') + ); + }); + + it('should return empty tools array when SSM parameter value is not an array', async () => { + process.env.STRANDS_TOOLS_SSM_PARAM = '/gaab/test-stack/strands-tools'; + + mockSSMClient.send.mockResolvedValue({ + Parameter: { + Value: JSON.stringify({ notAnArray: true }) + } + }); + + const result = await command.execute({} as any); + + expect(result.strandsTools).toEqual([]); + expect(logger.error).toHaveBeenCalledWith( + expect.stringContaining('SSM parameter /gaab/test-stack/strands-tools value is not an array') + ); + }); + + it('should handle unexpected errors and return empty tools array', async () => { + process.env.STRANDS_TOOLS_SSM_PARAM = '/gaab/test-stack/strands-tools'; + + const unexpectedError = new Error('Network timeout'); + mockSSMClient.send.mockRejectedValue(unexpectedError); + + const result = await command.execute({} as any); + + expect(result.strandsTools).toEqual([]); + expect(logger.error).toHaveBeenCalledWith( + expect.stringContaining('Unexpected error reading SSM parameter') + ); + }); + + it('should include both mcpServers and strandsTools in response format', async () => { + const mockTools = [ + { + name: 'Calculator', + description: 'Perform calculations', + value: 'calculator', + isDefault: true + } + ]; + + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'Test MCP', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + process.env.STRANDS_TOOLS_SSM_PARAM = '/gaab/test-stack/strands-tools'; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValue({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'test-mcp', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValue({ + status: 'CREATE_COMPLETE' + }); + + mockSSMClient.send.mockResolvedValue({ + Parameter: { + Value: JSON.stringify(mockTools) + } + }); + + const result = await command.execute({} as any); + + expect(result).toEqual({ + mcpServers: [ + { + useCaseId: 'mcp-1', + useCaseName: 'test-mcp', + description: '', + status: 'ACTIVE', + url: 'https://gateway.example.com', + type: 'gateway' + } + ], + strandsTools: mockTools + }); + }); + + it('should log successful tool retrieval with count', async () => { + const mockTools = [ + { name: 'Tool1', description: 'Desc1', value: 'tool1', isDefault: true }, + { name: 'Tool2', description: 'Desc2', value: 'tool2', isDefault: false }, + { name: 'Tool3', description: 'Desc3', value: 'tool3', isDefault: true } + ]; + + process.env.STRANDS_TOOLS_SSM_PARAM = '/gaab/test-stack/strands-tools'; + + mockSSMClient.send.mockResolvedValue({ + Parameter: { + Value: JSON.stringify(mockTools) + } + }); + + await command.execute({} as any); + + expect(logger.info).toHaveBeenCalledWith( + expect.stringContaining('Successfully loaded 3 Strands tools from SSM parameter') + ); + }); + }); + + describe('Enhanced MCP server details with URL and type', () => { + it('should return Gateway MCP server with url and type', async () => { + const mockRecords = [ + { + UseCaseId: 'gateway-mcp-1', + UseCaseName: 'mock-mcp-gateway', + Name: 'Gateway MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'gateway-mcp-server', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway-123.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'CREATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(1); + expect(result.mcpServers[0]).toEqual({ + useCaseId: 'gateway-mcp-1', + useCaseName: 'gateway-mcp-server', + description: '', + status: 'ACTIVE', + url: 'https://gateway-123.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp', + type: 'gateway' + }); + }); + + it('should return Runtime MCP server with url and type', async () => { + const mockRecords = [ + { + UseCaseId: 'runtime-mcp-1', + Name: 'Runtime MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'runtime-mcp-server', + MCPParams: { + RuntimeParams: { + RuntimeUrl: + 'https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agentcore%3Aus-east-1%3A123456789012%3Aruntime%2Fruntime-123/invocations?qualifier=DEFAULT' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'UPDATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(1); + expect(result.mcpServers[0]).toEqual({ + useCaseId: 'runtime-mcp-1', + useCaseName: 'runtime-mcp-server', + description: '', + status: 'ACTIVE', + url: 'https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agentcore%3Aus-east-1%3A123456789012%3Aruntime%2Fruntime-123/invocations?qualifier=DEFAULT', + type: 'runtime' + }); + }); + + it('should handle missing Gateway URL with empty string default', async () => { + const mockRecords = [ + { + UseCaseId: 'gateway-mcp-1', + Name: 'Gateway MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'gateway-mcp-no-url', + MCPParams: { + GatewayParams: {} + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'CREATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(1); + expect(result.mcpServers[0].url).toBe(''); + expect(result.mcpServers[0].type).toBe('gateway'); + expect(logger.warn).toHaveBeenCalledWith('GatewayUrl missing for use case gateway-mcp-1'); + }); + + it('should handle missing Runtime URL with empty string default', async () => { + const mockRecords = [ + { + UseCaseId: 'runtime-mcp-1', + Name: 'Runtime MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'runtime-mcp-no-url', + MCPParams: { + RuntimeParams: {} + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'CREATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(1); + expect(result.mcpServers[0].url).toBe(''); + expect(result.mcpServers[0].type).toBe('runtime'); + expect(logger.warn).toHaveBeenCalledWith('RuntimeUrl missing for use case runtime-mcp-1'); + }); + + it('should construct Runtime URL from RuntimeArn when RuntimeUrl is missing', async () => { + process.env.AWS_REGION = 'us-west-2'; + const mockRecords = [ + { + UseCaseId: 'runtime-mcp-1', + Name: 'Runtime MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'runtime-mcp-with-arn', + MCPParams: { + RuntimeParams: { + RuntimeArn: 'arn:aws:bedrock-agentcore:us-west-2:123456789012:runtime/runtime-123' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'CREATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(1); + expect(result.mcpServers[0].url).toBe( + 'https://bedrock-agentcore.us-west-2.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agentcore%3Aus-west-2%3A123456789012%3Aruntime%2Fruntime-123/invocations?qualifier=DEFAULT' + ); + expect(result.mcpServers[0].type).toBe('runtime'); + }); + + it('should exclude servers with both Gateway and Runtime params', async () => { + const mockRecords = [ + { + UseCaseId: 'invalid-mcp-1', + Name: 'Invalid MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'invalid-mcp-both-params', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + }, + RuntimeParams: { + RuntimeUrl: 'https://runtime.example.com' + } + } + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(0); + expect(logger.error).toHaveBeenCalledWith( + 'Invalid MCP configuration: both GatewayParams and RuntimeParams present for use case invalid-mcp-1' + ); + }); + + it('should exclude servers with neither Gateway nor Runtime params', async () => { + const mockRecords = [ + { + UseCaseId: 'invalid-mcp-1', + Name: 'Invalid MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'invalid-mcp-no-params', + MCPParams: {} + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(0); + expect(logger.error).toHaveBeenCalledWith( + 'Invalid MCP configuration: neither GatewayParams nor RuntimeParams present for use case invalid-mcp-1' + ); + }); + + it('should determine ACTIVE status for CREATE_COMPLETE', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'mcp-create-complete', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'CREATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers[0].status).toBe('ACTIVE'); + }); + + it('should determine ACTIVE status for UPDATE_COMPLETE', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'mcp-update-complete', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'UPDATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers[0].status).toBe('ACTIVE'); + }); + + it('should determine INACTIVE status for other stack statuses', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'mcp-delete-in-progress', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'DELETE_IN_PROGRESS' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers[0].status).toBe('INACTIVE'); + }); + + it('should continue processing when one server fails', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'Valid MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + }, + { + UseCaseId: 'mcp-2', + Name: 'Failing MCP Server', + UseCaseConfigRecordKey: 'config-key-2', + StackId: 'stack-2' + }, + { + UseCaseId: 'mcp-3', + Name: 'Another Valid MCP Server', + UseCaseConfigRecordKey: 'config-key-3', + StackId: 'stack-3' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway-1.example.com' + } + } + }) + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + MCPParams: {} + }) + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + MCPParams: { + RuntimeParams: { + RuntimeUrl: 'https://runtime-3.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord + .mockResolvedValueOnce({ status: 'CREATE_COMPLETE' }) + .mockResolvedValueOnce({ status: 'CREATE_COMPLETE' }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(2); + expect(result.mcpServers[0].useCaseId).toBe('mcp-1'); + expect(result.mcpServers[1].useCaseId).toBe('mcp-3'); + expect(logger.error).toHaveBeenCalledWith( + expect.stringContaining('Error extracting MCP server details for mcp-2') + ); + }); + + it('should include Strands tools in response alongside MCP servers', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + const mockTools = [ + { + name: 'Calculator', + description: 'Perform calculations', + value: 'calculator', + isDefault: true + } + ]; + + process.env.STRANDS_TOOLS_SSM_PARAM = '/gaab/test-stack/strands-tools'; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'mcp-server', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'CREATE_COMPLETE' + }); + + mockSSMClient.send.mockResolvedValue({ + Parameter: { + Value: JSON.stringify(mockTools) + } + }); + + const result = await command.execute({} as any); + + expect(result).toEqual({ + mcpServers: [ + { + useCaseId: 'mcp-1', + useCaseName: 'mcp-server', + description: '', + status: 'ACTIVE', + url: 'https://gateway.example.com', + type: 'gateway' + } + ], + strandsTools: mockTools + }); + }); + + it('should log info message with total server count', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'MCP Server 1', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + }, + { + UseCaseId: 'mcp-2', + Name: 'MCP Server 2', + UseCaseConfigRecordKey: 'config-key-2', + StackId: 'stack-2' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway-1.example.com' + } + } + }) + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + MCPParams: { + RuntimeParams: { + RuntimeUrl: 'https://runtime-2.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord + .mockResolvedValueOnce({ status: 'CREATE_COMPLETE' }) + .mockResolvedValueOnce({ status: 'CREATE_COMPLETE' }); + + await command.execute({} as any); + + expect(logger.info).toHaveBeenCalledWith('Found 2 MCP servers'); + }); + }); + + describe('URL extraction and validation', () => { + it('should extract Gateway URL correctly from config', async () => { + const mockRecords = [ + { + UseCaseId: 'gateway-mcp', + UseCaseName: 'Gateway', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'gateway-mcp-server', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway-abc123.example.com/mcp' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'CREATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers[0]).toEqual({ + useCaseId: 'gateway-mcp', + useCaseName: 'gateway-mcp-server', + description: '', + status: 'ACTIVE', + url: 'https://gateway-abc123.example.com/mcp', + type: 'gateway' + }); + }); + + it('should extract Runtime URL correctly from config', async () => { + const mockRecords = [ + { + UseCaseId: 'runtime-mcp', + Name: 'Runtime MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'runtime-mcp-server', + MCPParams: { + RuntimeParams: { + RuntimeUrl: + 'https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agentcore%3Aus-east-1%3A123456789012%3Aruntime%2Fruntime-id/invocations?qualifier=DEFAULT' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'UPDATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers[0]).toEqual({ + useCaseId: 'runtime-mcp', + useCaseName: 'runtime-mcp-server', + description: '', + status: 'ACTIVE', + url: 'https://bedrock-agentcore.us-east-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agentcore%3Aus-east-1%3A123456789012%3Aruntime%2Fruntime-id/invocations?qualifier=DEFAULT', + type: 'runtime' + }); + }); + + it('should use empty string default for missing Gateway URL', async () => { + const mockRecords = [ + { + UseCaseId: 'gateway-no-url', + UseCaseName: 'gateway', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'gateway-no-url', + MCPParams: { + GatewayParams: { + // GatewayUrl is missing + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'CREATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers[0]).toEqual({ + useCaseId: 'gateway-no-url', + useCaseName: 'gateway-no-url', + description: '', + status: 'ACTIVE', + url: '', + type: 'gateway' + }); + expect(logger.warn).toHaveBeenCalledWith('GatewayUrl missing for use case gateway-no-url'); + }); + + it('should use empty string default for missing Runtime URL', async () => { + const mockRecords = [ + { + UseCaseId: 'runtime-no-url', + UseCaseName: 'Runtime MCP Without URL', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'runtime-no-url', + MCPParams: { + RuntimeParams: { + // RuntimeUrl is missing + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'CREATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers[0]).toEqual({ + useCaseId: 'runtime-no-url', + useCaseName: 'runtime-no-url', + description: '', + status: 'ACTIVE', + url: '', + type: 'runtime' + }); + expect(logger.warn).toHaveBeenCalledWith('RuntimeUrl missing for use case runtime-no-url'); + }); + }); + + describe('Invalid configuration handling', () => { + it('should exclude servers with both Gateway and Runtime params', async () => { + const mockRecords = [ + { + UseCaseId: 'valid-mcp', + Name: 'Valid MCP', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + }, + { + UseCaseId: 'invalid-both', + Name: 'Invalid MCP with both params', + UseCaseConfigRecordKey: 'config-key-2', + StackId: 'stack-2' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }) + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + }, + RuntimeParams: { + RuntimeUrl: 'https://runtime.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'CREATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(1); + expect(result.mcpServers[0].useCaseId).toBe('valid-mcp'); + expect(logger.error).toHaveBeenCalledWith( + 'Invalid MCP configuration: both GatewayParams and RuntimeParams present for use case invalid-both' + ); + }); + + it('should exclude servers with neither Gateway nor Runtime params', async () => { + const mockRecords = [ + { + UseCaseId: 'valid-mcp', + Name: 'Valid MCP', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + }, + { + UseCaseId: 'invalid-neither', + Name: 'Invalid MCP with no params', + UseCaseConfigRecordKey: 'config-key-2', + StackId: 'stack-2' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }) + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + MCPParams: { + // Neither GatewayParams nor RuntimeParams + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'CREATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(1); + expect(result.mcpServers[0].useCaseId).toBe('valid-mcp'); + expect(logger.error).toHaveBeenCalledWith( + 'Invalid MCP configuration: neither GatewayParams nor RuntimeParams present for use case invalid-neither' + ); + }); + + it('should continue processing other servers when one server has invalid config', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'Valid MCP 1', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + }, + { + UseCaseId: 'mcp-invalid', + Name: 'Invalid MCP', + UseCaseConfigRecordKey: 'config-key-2', + StackId: 'stack-2' + }, + { + UseCaseId: 'mcp-2', + Name: 'Valid MCP 2', + UseCaseConfigRecordKey: 'config-key-3', + StackId: 'stack-3' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway-1.example.com' + } + } + }) + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + MCPParams: { + // Invalid: neither params + } + }) + .mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + MCPParams: { + RuntimeParams: { + RuntimeUrl: 'https://runtime-2.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord + .mockResolvedValueOnce({ status: 'CREATE_COMPLETE' }) + .mockResolvedValueOnce({ status: 'CREATE_COMPLETE' }); + + const result = await command.execute({} as any); + + expect(result.mcpServers).toHaveLength(2); + expect(result.mcpServers[0].useCaseId).toBe('mcp-1'); + expect(result.mcpServers[1].useCaseId).toBe('mcp-2'); + expect(logger.error).toHaveBeenCalledWith(expect.stringContaining('Invalid MCP configuration')); + }); + }); + + describe('Status determination', () => { + it('should return ACTIVE status for CREATE_COMPLETE', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'mcp-status-create-complete', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'CREATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers[0].status).toBe('ACTIVE'); + }); + + it('should return ACTIVE status for UPDATE_COMPLETE', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'mcp-status-update-complete', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'UPDATE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers[0].status).toBe('ACTIVE'); + }); + + it('should return INACTIVE status for DELETE_COMPLETE', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'mcp-status-delete-complete', + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://gateway.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'DELETE_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers[0].status).toBe('INACTIVE'); + }); + + it('should return INACTIVE status for UPDATE_ROLLBACK_COMPLETE', async () => { + const mockRecords = [ + { + UseCaseId: 'mcp-1', + Name: 'MCP Server', + UseCaseConfigRecordKey: 'config-key-1', + StackId: 'stack-1' + } + ]; + + mockStorageMgmt.getAllCaseRecords.mockResolvedValue({ + useCaseRecords: mockRecords + }); + + mockUseCaseConfigMgmt.getUseCaseConfigFromRecord.mockResolvedValueOnce({ + UseCaseType: UseCaseTypes.MCP_SERVER, + UseCaseName: 'mcp-status-rollback-complete', + MCPParams: { + RuntimeParams: { + RuntimeUrl: 'https://runtime.example.com' + } + } + }); + + mockStackMgmt.getStackDetailsFromUseCaseRecord.mockResolvedValueOnce({ + status: 'UPDATE_ROLLBACK_COMPLETE' + }); + + const result = await command.execute({} as any); + + expect(result.mcpServers[0].status).toBe('INACTIVE'); + }); + }); +}); diff --git a/source/lambda/use-case-management/test/model/get-use-case.test.ts b/source/lambda/use-case-management/test/model/get-use-case.test.ts index 271dacbf..9222e2e9 100644 --- a/source/lambda/use-case-management/test/model/get-use-case.test.ts +++ b/source/lambda/use-case-management/test/model/get-use-case.test.ts @@ -80,6 +80,7 @@ function createTextUseCaseParams(promptEditingEnabled: boolean): CombinedUseCase 'FeedbackParams': { 'FeedbackEnabled': true }, + 'ProvisionedConcurrencyValue': 0, 'UseCaseType': 'Text' }; return { ...mockUseCaseRecord, ...mockStackDetails, ...mockUseCaseConfig }; @@ -115,6 +116,37 @@ function createAgentUseCaseParams() { return { ...mockUseCaseRecord, ...mockStackDetails, ...mockUseCaseConfig }; } +function createAgentBuilderUseCaseParams(): CombinedUseCaseParams { + const mockUseCaseConfig = { + 'IsInternalUser': 'true', + 'UseCaseName': 'test-agent-builder', + 'UseCaseType': 'AgentBuilder', + 'LlmParams': { + 'Streaming': true, + 'Temperature': 0.5, + 'Verbose': false, + 'BedrockLlmParams': { + 'BedrockInferenceType': 'INFERENCE_PROFILE', + 'InferenceProfileId': 'us.anthropic.claude-3-7-sonnet-20250219-v1:0' + }, + 'ModelProvider': 'Bedrock', + 'ModelParams': {}, + 'RAGEnabled': false + }, + 'AgentBuilderParams': { + 'SystemPrompt': + 'You are a helpful AI assistant. Your role is to:\n\n- Provide accurate and helpful responses to user questions\n- Be concise and clear in your communication\n- Ask for clarification when needed\n- Maintain a professional and friendly tone\n- Use the tools and MCP servers available to you when appropriate.', + 'MemoryConfig': { + 'LongTermEnabled': false + } + }, + 'FeedbackParams': { + 'FeedbackEnabled': false + } + }; + return { ...mockUseCaseRecord, ...mockStackDetails, ...mockUseCaseConfig }; +} + describe('When creating a get use case adapter', () => { it('Should create a GetUseCaseAdpater instance correctly', () => { const event = { @@ -222,7 +254,8 @@ describe('When using get use case adapter to cast to different types', () => { }, 'FeedbackParams': { 'FeedbackEnabled': true - } + }, + 'ProvisionedConcurrencyValue': 0 }); }); @@ -275,4 +308,51 @@ describe('When using get use case adapter to cast to different types', () => { 'ModelProviderName': 'BedrockAgent' }); }); + + it('Should cast AgentBuilder use case to admin type with AgentBuilderParams', () => { + const useCaseDetails = createAgentBuilderUseCaseParams(); + const useCaseInfo = castToAdminType(useCaseDetails); + + expect(useCaseInfo).toEqual({ + 'UseCaseName': 'test-agent-builder', + 'UseCaseType': 'AgentBuilder', + 'UseCaseId': 'a1b2c3d4-5e6f-7g8h-9i10-j11k12l13m14', + 'Description': 'Customer sentiment analysis use case for retail division', + 'CreatedDate': '2025-07-15T09:45:33.124Z', + 'StackId': + 'arn:aws:cloudformation:us-west-2:123456789012:stack/prod-a1b2c3d4/45678901-abcd-12ef-3456-789012ghijkl', + 'Status': 'UPDATE_COMPLETE', + 'ragEnabled': 'false', + 'deployUI': 'Yes', + 'createNewVpc': undefined, + 'vpcEnabled': 'Yes', + 'vpcId': 'mock-vpc-id', + 'knowledgeBaseType': 'Kendra', + 'cloudFrontWebUrl': 'mock-cloudfront-url', + 'defaultUserEmail': 'john_doe@example.com', + 'LlmParams': { + 'Streaming': true, + 'Temperature': 0.5, + 'Verbose': false, + 'BedrockLlmParams': { + 'BedrockInferenceType': 'INFERENCE_PROFILE', + 'InferenceProfileId': 'us.anthropic.claude-3-7-sonnet-20250219-v1:0' + }, + 'ModelProvider': 'Bedrock', + 'ModelParams': {}, + 'RAGEnabled': false + }, + 'AgentBuilderParams': { + 'SystemPrompt': + 'You are a helpful AI assistant. Your role is to:\n\n- Provide accurate and helpful responses to user questions\n- Be concise and clear in your communication\n- Ask for clarification when needed\n- Maintain a professional and friendly tone\n- Use the tools and MCP servers available to you when appropriate.', + 'MemoryConfig': { + 'LongTermEnabled': false + } + }, + 'FeedbackParams': { + 'FeedbackEnabled': false + }, + 'ProvisionedConcurrencyValue': undefined + }); + }); }); diff --git a/source/lambda/use-case-management/test/model/use-case-validator.test.ts b/source/lambda/use-case-management/test/model/use-case-validator.test.ts deleted file mode 100644 index e98f9286..00000000 --- a/source/lambda/use-case-management/test/model/use-case-validator.test.ts +++ /dev/null @@ -1,2153 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -import { CognitoIdentityProviderClient, DescribeUserPoolCommand } from '@aws-sdk/client-cognito-identity-provider'; -import { DynamoDBClient, GetItemCommand, InternalServerError } from '@aws-sdk/client-dynamodb'; -import { marshall } from '@aws-sdk/util-dynamodb'; -import { mockClient } from 'aws-sdk-client-mock'; -import { StorageManagement } from '../../ddb/storage-management'; -import { UseCaseConfigManagement } from '../../ddb/use-case-config-management'; -import { AgentUseCaseDeploymentAdapter } from '../../model/agent-use-case-adapter'; -import { AgentUseCaseConfiguration } from '../../model/types'; -import { UseCase } from '../../model/use-case'; -import { AgentUseCaseValidator, TextUseCaseValidator, UseCaseValidator } from '../../model/use-case-validator'; -import { - AUTHENTICATION_PROVIDERS, - CHAT_PROVIDERS, - CfnParameterKeys, - KnowledgeBaseTypes, - MODEL_INFO_TABLE_NAME_ENV_VAR, - ModelInfoTableKeys, - USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, - UseCaseTypes -} from '../../utils/constants'; - -describe('Testing use case validation for Text UseCases', () => { - let config: any; - let cfnParameters: Map; - let ddbMockedClient: any; - let validator: UseCaseValidator; - let modelInfoTableName = 'model-info-table'; - - beforeAll(() => { - config = { - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true, - KendraKnowledgeBaseParams: { ExistingKendraIndexId: 'fakeid' } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fake-model' - }, - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' } - }, - PromptParams: { PromptTemplate: '{context}' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - cfnParameters = new Map(); - cfnParameters.set(CfnParameterKeys.UseCaseConfigRecordKey, 'fake-id'); - - process.env.AWS_SDK_USER_AGENT = `{ "customUserAgent": "AWSSOLUTION/SO0276/v2.1.0" }`; - process.env[MODEL_INFO_TABLE_NAME_ENV_VAR] = modelInfoTableName; - process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] = 'UseCaseConfigTable'; - - cfnParameters = new Map(); - cfnParameters.set(CfnParameterKeys.UseCaseConfigRecordKey, 'fake-id'); - const storageMgmt = new StorageManagement(); - const useCaseConfigManagement = new UseCaseConfigManagement(); - - validator = UseCaseValidator.createValidator(UseCaseTypes.CHAT, storageMgmt, useCaseConfigManagement); - ddbMockedClient = mockClient(DynamoDBClient); - }); - - afterEach(() => { - ddbMockedClient.reset(); - }); - - afterAll(() => { - delete process.env.AWS_SDK_USER_AGENT; - delete process.env[MODEL_INFO_TABLE_NAME_ENV_VAR]; - delete process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]; - - ddbMockedClient.restore(); - }); - - describe('When successfully invoking Create/Update Commands', () => { - beforeEach(() => { - config = { - UseCaseName: 'fake-use-case', - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true, - KendraKnowledgeBaseParams: { ExistingKendraIndexId: 'fakeid' } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fake-model' - }, - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' } - }, - PromptParams: { PromptTemplate: '{context}' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - - ddbMockedClient - .on(GetItemCommand, { - 'TableName': process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] - }) - .resolves({ - Item: marshall({ config: config }) - }) - .on(GetItemCommand, { - 'TableName': `${process.env[MODEL_INFO_TABLE_NAME_ENV_VAR]}`, - 'Key': { - [ModelInfoTableKeys.MODEL_INFO_TABLE_PARTITION_KEY]: { 'S': 'Chat' } - } - }) - .resolves({ - Item: marshall({ - 'UseCase': 'Chat', - 'SortKey': `${CHAT_PROVIDERS.BEDROCK}#fake-model`, - 'ModelProviderName': CHAT_PROVIDERS.BEDROCK, - 'ModelName': 'fake-model', - 'AllowsStreaming': false, - 'Prompt': 'Prompt2', - 'MaxTemperature': '100', - 'DefaultTemperature': '0.1', - 'MinTemperature': '0', - 'DefaultStopSequences': [], - 'MemoryConfig': { - 'history': 'chat_history', - 'input': 'question', - 'context': null, - 'ai_prefix': 'AI', - 'human_prefix': 'Human', - 'output': 'answer' - }, - 'MaxPromptSize': 2000, - 'MaxChatMessageSize': 2500 - }) - }) - .on(GetItemCommand, { - 'TableName': `${process.env[MODEL_INFO_TABLE_NAME_ENV_VAR]}`, - 'Key': { - [ModelInfoTableKeys.MODEL_INFO_TABLE_PARTITION_KEY]: { 'S': 'RAGChat' } - } - }) - .resolves({ - Item: marshall({ - 'UseCase': 'RAGChat', - 'SortKey': `${CHAT_PROVIDERS.BEDROCK}#fake-model`, - 'ModelProviderName': CHAT_PROVIDERS.BEDROCK, - 'ModelName': 'fake-model', - 'AllowsStreaming': false, - 'Prompt': 'Prompt2 {context}', - 'MaxTemperature': '100', - 'DefaultTemperature': '0.1', - 'MinTemperature': '0', - 'DefaultStopSequences': [], - 'MemoryConfig': { - 'history': 'chat_history', - 'input': 'question', - 'context': 'context', - 'ai_prefix': 'AI', - 'human_prefix': 'Human', - 'output': 'answer' - }, - 'MaxPromptSize': 2000, - 'MaxChatMessageSize': 2500 - }) - }); - }); - - describe('When successfully invoking Create Commands', () => { - it('should validate a new use case', async () => { - config = { - UseCaseName: 'fake-use-case', - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true, - KendraKnowledgeBaseParams: { ExistingKendraIndexId: 'fakeid' } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fake-model' - }, - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' } - }, - PromptParams: { PromptTemplate: '{context}' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - const useCase = new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - config, - 'test-user', - 'FakeProviderName', - 'Chat' - ); - const result = await validator.validateNewUseCase(useCase.clone()); - - let getItemCalls = ddbMockedClient.commandCalls(GetItemCommand); - expect(getItemCalls.length).toEqual(1); - expect(getItemCalls[0].args[0].input.TableName).toEqual(modelInfoTableName); - expect(result).toEqual(useCase); - }); - - it('should validate a new use case with bedrock RAG', async () => { - let ragConfig = { - UseCaseName: 'fake-use-case', - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.BEDROCK, - NumberOfDocs: 5, - ReturnSourceDocs: true, - BedrockKnowledgeBaseParams: { BedrockKnowledgeBaseId: 'fakeid', RetrievalFilter: {} } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fake-model' - }, - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' } - }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - const useCase = new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - ragConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ); - const result = await validator.validateNewUseCase(useCase.clone()); - - let getItemCalls = ddbMockedClient.commandCalls(GetItemCommand); - expect(getItemCalls.length).toEqual(1); - expect(getItemCalls[0].args[0].input.TableName).toEqual(modelInfoTableName); - expect(result).toEqual(useCase); - }); - - it('should validate a new use case with a custom model ARN in bedrock', async () => { - let config = { - UseCaseName: 'fake-use-case', - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.BEDROCK, - NumberOfDocs: 5, - ReturnSourceDocs: true, - BedrockKnowledgeBaseParams: { BedrockKnowledgeBaseId: 'fakeid' } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fake-model', - ModelArn: 'fake-arn' - }, - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' } - }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - const useCase = new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - config, - 'test-user', - 'FakeProviderName', - 'Chat' - ); - const result = await validator.validateNewUseCase(useCase.clone()); - - let getItemCalls = ddbMockedClient.commandCalls(GetItemCommand); - expect(getItemCalls.length).toEqual(1); - expect(getItemCalls[0].args[0].input.TableName).toEqual(modelInfoTableName); - expect(result).toEqual(useCase); - }); - - it('should validate a new use case with no prompt provided', async () => { - let newConfig = { - UseCaseName: 'fake-use-case', - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fake-model' - }, - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' } - }, - Streaming: true, - Temperature: 0.1 - } - }; - const result = await validator.validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - newConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ); - - let getItemCalls = ddbMockedClient.commandCalls(GetItemCommand); - expect(getItemCalls.length).toEqual(1); - expect(getItemCalls[0].args[0].input.TableName).toEqual(modelInfoTableName); - expect(result.configuration.LlmParams?.PromptParams?.PromptTemplate).toEqual('Prompt2'); - }); - - it('should validate a new RAG use case with no prompt provided', async () => { - let newConfig = { - UseCaseName: 'fake-use-case', - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.BEDROCK, - NumberOfDocs: 5, - ReturnSourceDocs: true, - BedrockKnowledgeBaseParams: { BedrockKnowledgeBaseId: 'fakeid' } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fake-model' - }, - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' } - }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - const result = await validator.validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - newConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ); - - let getItemCalls = ddbMockedClient.commandCalls(GetItemCommand); - expect(getItemCalls.length).toEqual(1); - expect(getItemCalls[0].args[0].input.TableName).toEqual(modelInfoTableName); - expect(result.configuration.LlmParams?.PromptParams?.PromptTemplate).toEqual('Prompt2 {context}'); - }); - - it('should validate a new use case with a model input payload schema - sagemaker with rag', async () => { - const modelParamConfig = { - UseCaseName: 'fake-use-case', - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true, - KendraKnowledgeBaseParams: { ExistingKendraIndexId: 'fakeid' } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - SageMakerLlmParams: { - ModelInputPayloadSchema: { - 'temperature': '<>', - 'prompt': '<>', - 'max_tokens': 10, - 'other_settings': [ - { 'setting1': '<>' }, - { 'setting2': '<>' }, - { 'setting3': 1 } - ] - } - }, - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' } - }, - PromptParams: { PromptTemplate: 'Prompt2 {input}{history}{context}' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - const useCase = new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - modelParamConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ); - const result = await validator.validateNewUseCase(useCase.clone()); - - let getItemCalls = ddbMockedClient.commandCalls(GetItemCommand); - expect(getItemCalls.length).toEqual(1); - expect(getItemCalls[0].args[0].input.TableName).toEqual(modelInfoTableName); - expect(result).toEqual(useCase); - expect(result.configuration.LlmParams?.PromptParams?.PromptTemplate).toEqual( - 'Prompt2 {input}{history}{context}' - ); - }); - - it('should validate a new use case with a model input payload schema - sagemaker without rag', async () => { - const modelParamConfig = { - UseCaseName: 'fake-use-case', - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - SageMakerLlmParams: { - ModelInputPayloadSchema: { - 'temperature': '<>', - 'prompt': '<>', - 'max_tokens': 10, - 'other_settings': [ - { 'setting1': '<>' }, - { 'setting2': '<>' }, - { 'setting3': 1 } - ] - } - }, - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' } - }, - PromptParams: { PromptTemplate: 'Prompt2 {input}{history}' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: false - } - }; - - const useCase = new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - modelParamConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ); - const result = await validator.validateNewUseCase(useCase.clone()); - - let getItemCalls = ddbMockedClient.commandCalls(GetItemCommand); - expect(getItemCalls.length).toEqual(1); - expect(getItemCalls[0].args[0].input.TableName).toEqual(modelInfoTableName); - expect(result).toEqual(useCase); - }); - - it('should validate a new use case with escaped braces in the prompt', async () => { - config = { - UseCaseName: 'fake-use-case', - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true, - KendraKnowledgeBaseParams: { ExistingKendraIndexId: 'fakeid' } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fake-model' - }, - PromptParams: { - PromptTemplate: '{{example}} {context} some other {{example}} text, {{example json}}' - }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - - const useCase = new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - config, - 'test-user', - 'FakeProviderName', - 'Chat' - ); - const result = await validator.validateNewUseCase(useCase.clone()); - - let getItemCalls = ddbMockedClient.commandCalls(GetItemCommand); - expect(getItemCalls.length).toEqual(1); - expect(getItemCalls[0].args[0].input.TableName).toEqual(modelInfoTableName); - expect(result).toEqual(useCase); - }); - }); - - describe('When successfully invoking Update Commands', () => { - it('should validate an update', async () => { - const updateConfig = { - KnowledgeBaseParams: { - NumberOfDocs: 10 - }, - LlmParams: { - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' }, - param3: { Value: 'value3', Type: 'string' } - }, - PromptParams: { PromptTemplate: 'Prompt2 {context}' } - } - }; - - const expectedConfig = { - UseCaseName: 'fake-use-case', - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - NumberOfDocs: 10, - ReturnSourceDocs: true, - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - KendraKnowledgeBaseParams: { ExistingKendraIndexId: 'fakeid' } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fake-model' - }, - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' }, - param3: { Value: 'value3', Type: 'string' } - }, - PromptParams: { PromptTemplate: 'Prompt2 {context}' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - - const result = await validator.validateUpdateUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - updateConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ), - 'old-key' - ); - - expect(ddbMockedClient.commandCalls(GetItemCommand).length).toEqual(2); - expect(result.configuration).toEqual(expectedConfig); - }); - - it('should overwrite modelparams from new config during an update validation', async () => { - const updateConfig = { - LlmParams: { - ModelParams: { - param3: { Value: 'value3', Type: 'string' } - } - } - }; - const expectedConfig = { - UseCaseName: 'fake-use-case', - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true, - KendraKnowledgeBaseParams: { ExistingKendraIndexId: 'fakeid' } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fake-model' - }, - ModelParams: { - param3: { Value: 'value3', Type: 'string' } - }, - PromptParams: { PromptTemplate: '{context}' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - - const result = await validator.validateUpdateUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - updateConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ), - 'old-key' - ); - - expect(ddbMockedClient.commandCalls(GetItemCommand).length).toEqual(2); - expect(result.configuration).toEqual(expectedConfig); - }); - - it('should remove modelParams if new update config request has empty object', async () => { - const updateConfig = { - LlmParams: { - ModelParams: {} - } - }; - const expectedConfig = { - UseCaseName: 'fake-use-case', - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true, - KendraKnowledgeBaseParams: { ExistingKendraIndexId: 'fakeid' } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - ModelId: 'fake-model' - }, - ModelParams: {}, - PromptParams: { PromptTemplate: '{context}' }, - RAGEnabled: true, - Streaming: true, - Temperature: 0.1 - } - }; - - const result = await validator.validateUpdateUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - updateConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ), - 'old-key' - ); - - expect(ddbMockedClient.commandCalls(GetItemCommand).length).toEqual(2); - expect(result.configuration).toEqual(expectedConfig); - }); - - it('should validate an update with a model input payload schema', async () => { - let modelParamConfig = { ...config }; - modelParamConfig.LlmParams.ModelProvider = CHAT_PROVIDERS.SAGEMAKER; - modelParamConfig.LlmParams.SageMakerLlmParams = { - ModelInputPayloadSchema: { - 'temperature': '<>', - 'prompt': '<>', - 'max_tokens': 10, - 'other_settings': [ - { 'setting1': '<>' }, - { 'setting2': '<>' }, - { 'setting3': 1 } - ] - } - }; - delete modelParamConfig.LlmParams.BedrockLlmParams; - - ddbMockedClient.reset(); - ddbMockedClient - .on(GetItemCommand, { - TableName: process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR], - Key: { key: { S: 'old-key' } } - }) - .resolvesOnce({ - Item: marshall({ config: modelParamConfig }) - }) - .on(GetItemCommand, { - 'TableName': 'model-info-table', - 'Key': { - 'UseCase': { - 'S': 'RAGChat' - }, - 'SortKey': { - 'S': `${CHAT_PROVIDERS.SAGEMAKER}#default` - } - } - }) - .resolves({ - Item: marshall({ - 'UseCase': 'Chat', - 'SortKey': `${CHAT_PROVIDERS.SAGEMAKER}#fake-model`, - 'ModelProviderName': CHAT_PROVIDERS.SAGEMAKER, - 'ModelName': 'fake-model', - 'AllowsStreaming': false, - 'Prompt': 'Prompt2 {input}{history}{context}', - 'MaxTemperature': '100', - 'DefaultTemperature': '0.1', - 'MinTemperature': '0', - 'DefaultStopSequences': [], - 'MemoryConfig': { - 'history': 'chat_history', - 'input': 'question', - 'context': 'context', - 'ai_prefix': 'AI', - 'human_prefix': 'Human', - 'output': 'answer' - }, - 'MaxPromptSize': 2000, - 'MaxChatMessageSize': 2500 - }) - }); - - const updateConfig = { - KnowledgeBaseParams: { - NumberOfDocs: 10 - }, - LlmParams: { - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' }, - param3: { Value: 'value3', Type: 'string' } - }, - SageMakerLlmParams: { - ModelInputPayloadSchema: { - temperature: '<>', - prompt: '<>', - max_tokens: 10, - other_settings: [ - { setting1: '<>' }, - { setting2: '<>' }, - { setting3: '<>' } - ] - } - }, - PromptParams: { PromptTemplate: 'Prompt2 {input}{history}{context}' } - } - }; - const expectedConfig = { - UseCaseName: 'fake-use-case', - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 10, - ReturnSourceDocs: true, - KendraKnowledgeBaseParams: { ExistingKendraIndexId: 'fakeid' } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' }, - param3: { Value: 'value3', Type: 'string' } - }, - SageMakerLlmParams: { - ModelInputPayloadSchema: { - temperature: '<>', - prompt: '<>', - max_tokens: 10, - other_settings: [ - { setting1: '<>' }, - { setting2: '<>' }, - { setting3: '<>' } - ] - } - }, - PromptParams: { PromptTemplate: 'Prompt2 {input}{history}{context}' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - - const result = await validator.validateUpdateUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - updateConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ), - 'old-key' - ); - - expect(ddbMockedClient.commandCalls(GetItemCommand).length).toEqual(2); - expect(result.configuration).toEqual(expectedConfig); - }); - - it('should validate an update with escaped braces in the prompt', async () => { - const updateConfig = { - KnowledgeBaseParams: { - NumberOfDocs: 10 - }, - LlmParams: { - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' }, - param3: { Value: 'value3', Type: 'string' } - }, - PromptParams: { - PromptTemplate: '{{example}} {context} some other {{example}} text, {{example json}}' - } - } - }; - - await validator.validateUpdateUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - updateConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ), - 'old-key' - ); - - expect(ddbMockedClient.commandCalls(GetItemCommand).length).toEqual(2); - }); - - it('should remove ModelId and ModelArn if updating to a bedrock inference profile', async () => { - const updateConfig = { - LlmParams: { - BedrockLlmParams: { - InferenceProfileId: 'fake-profile' - } - } - }; - const expectedConfig = { - UseCaseName: 'fake-use-case', - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true, - KendraKnowledgeBaseParams: { ExistingKendraIndexId: 'fakeid' } - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { - InferenceProfileId: 'fake-profile' - }, - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' } - }, - PromptParams: { PromptTemplate: '{context}' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - - const result = await validator.validateUpdateUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - updateConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ), - 'old-key' - ); - - expect(ddbMockedClient.commandCalls(GetItemCommand).length).toEqual(2); - expect(result.configuration).toEqual(expectedConfig); - }); - }); - }); - - describe('When validation fails for Create/Update Commands', () => { - let sagemakerConfig: any; - - beforeEach(() => { - ddbMockedClient.on(GetItemCommand).resolvesOnce({ - Item: marshall({ - 'UseCase': 'Chat', - 'SortKey': `${CHAT_PROVIDERS.BEDROCK}#fake-model`, - 'ModelProviderName': CHAT_PROVIDERS.BEDROCK, - 'ModelName': 'fake-model', - 'AllowsStreaming': false, - 'Prompt': 'Prompt2', - 'MaxTemperature': '100', - 'DefaultTemperature': '0.1', - 'MinTemperature': '0', - 'DefaultStopSequences': [], - 'MemoryConfig': { - 'history': 'chat_history', - 'input': 'question', - 'context': null, - 'ai_prefix': 'AI', - 'human_prefix': 'Human', - 'output': 'answer' - }, - 'MaxPromptSize': 2000, - 'MaxChatMessageSize': 2500 - }) - }); - - sagemakerConfig = { ...config }; - sagemakerConfig.LlmParams.ModelProvider = CHAT_PROVIDERS.SAGEMAKER; - sagemakerConfig.LlmParams.SageMakerLlmParams = { - ModelInputPayloadSchema: { - 'temperature': '<>', - 'prompt': '<>', - 'max_tokens': 10, - 'other_settings': [{ 'setting1': '<>' }, { 'setting2': '<>' }, { 'setting3': 1 }] - } - }; - delete sagemakerConfig.LlmParams.BedrockLlmParams; - }); - - describe('When validation fails for Create Commands', () => { - it('should fail create validation if model info is not available for the key', async () => { - ddbMockedClient.on(GetItemCommand).rejectsOnce( - new InternalServerError({ - $metadata: { - httpStatusCode: 404 - }, - message: 'Fake getItem error' - }) - ); - - expect( - await validator - .validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - sagemakerConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ) - .catch((error) => { - expect(error).toBeInstanceOf(InternalServerError); - expect(error.message).toEqual('Fake getItem error'); - }) - ); - }); - - it('should fail create validation if no model exists', async () => { - ddbMockedClient.on(GetItemCommand).resolvesOnce({}); - expect( - await validator - .validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - config, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toContain('No model info found for command'); - }) - ); - }); - }); - - describe('When validation fails for Update Commands', () => { - it('should fail update validation if dynamodb get fails to get the old param during update', async () => { - ddbMockedClient.on(GetItemCommand).rejectsOnce( - new InternalServerError({ - $metadata: { - httpStatusCode: 404 - }, - message: 'Fake getItem error' - }) - ); - expect( - await validator - .validateUpdateUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - config, - 'test-user', - CHAT_PROVIDERS.BEDROCK, - 'Chat' - ), - 'old-key' - ) - .catch((error) => { - expect(error).toBeInstanceOf(InternalServerError); - expect(error.message).toEqual('Fake getItem error'); - }) - ); - }); - - it('should fail update validation if we fail to get model info', async () => { - ddbMockedClient.on(GetItemCommand).rejectsOnce(new Error('Fake getItem error')); - expect( - await validator - .validateUpdateUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - config, - 'test-user', - 'FakeProviderName', - 'Chat' - ), - 'old-key' - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toEqual('Fake getItem error'); - }) - ); - }); - - it('should fail update validation if no model exists', async () => { - ddbMockedClient.on(GetItemCommand).resolvesOnce({}); - cfnParameters.set(CfnParameterKeys.UseCaseConfigRecordKey, 'config-fake-id'); - expect( - await validator - .validateUpdateUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - config, - 'test-user', - 'FakeProviderName', - 'Chat' - ), - 'old-key' - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toContain('No use case config found for the specified key.'); - }) - ); - }); - }); - }); - - describe('Model input schema validation failures', () => { - beforeEach(() => { - ddbMockedClient.on(GetItemCommand).resolves({ - Item: marshall({ - 'UseCase': 'RAGChat', - 'SortKey': `${CHAT_PROVIDERS.BEDROCK}#fake-model`, - 'ModelProviderName': CHAT_PROVIDERS.BEDROCK, - 'ModelName': 'fake-model', - 'AllowsStreaming': false, - 'Prompt': 'Prompt2 {context}', - 'MaxTemperature': '100', - 'DefaultTemperature': '0.1', - 'MinTemperature': '0', - 'DefaultStopSequences': [], - 'MemoryConfig': { - 'history': 'chat_history', - 'input': 'question', - 'context': 'context', - 'ai_prefix': 'AI', - 'human_prefix': 'Human', - 'output': 'answer' - }, - 'MaxPromptSize': 2000, - 'MaxChatMessageSize': 2500 - }) - }); - }); - - it('should fail on a new use case with a model input payload schema, with model params missing', async () => { - let modelParamConfig = { - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - SageMakerLlmParams: { - ModelInputPayloadSchema: { - 'temperature': '<>', - 'prompt': '<>', - 'max_tokens': 10, - 'other_settings': [ - { 'setting1': '<>' }, - { 'setting2': '<>' }, - { 'setting3': 1 } - ] - } - }, - ModelInputPayloadSchema: { - 'temperature': '<>', - 'prompt': '<>', - 'max_tokens': 10, - 'other_settings': [ - { 'setting1': '<>' }, - { 'setting2': '<>' }, - { 'setting3': 1 } - ] - }, - PromptParams: { PromptTemplate: 'You are a helpful AI assistant.' }, - Streaming: true, - Temperature: 0.1 - } - }; - - expect( - await validator - .validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - modelParamConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toEqual( - 'No model parameters were provided in the useCase despite requiring parameters in the input payload schema.' - ); - }) - ); - }); - - it('should fail on a new use case with a model input payload schema, having a placeholder with no param provided', async () => { - let modelParamConfig = { - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true - }, - LlmParams: { - ModelId: 'fake-model', - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' } - }, - ModelInputPayloadSchema: { - 'temperature': '<>', - 'prompt': '<>', - 'max_tokens': '<>', - 'other_settings': [ - { 'setting1': '<>' }, - { 'setting2': '<>' }, - { 'setting3': 1 } - ] - }, - PromptParams: { PromptTemplate: '{input}{history}' }, - Streaming: true, - Temperature: 0.1 - } - }; - - expect( - await validator - .validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - modelParamConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toEqual( - 'InvalidModelParameter: max_tokens is not a valid model parameter present in the Model Parameters' - ); - }) - ); - }); - - it('should fail on a new use case with a bad prompt template for rag', async () => { - let modelParamConfig = { - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelName: 'fake-model', ModelId: 'fake-model' }, - PromptParams: { PromptTemplate: 'You are a helpful AI assistant.' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - - expect( - await validator - .validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - modelParamConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toEqual( - "Provided prompt template does not have the required placeholder '{context}'." - ); - }) - ); - }); - - it('should fail on a new use case with a bad prompt template for non rag', async () => { - let modelParamConfig = { - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.SAGEMAKER, - ModelParams: { - param1: { Value: 'value1', Type: 'string' }, - param2: { Value: 'value2', Type: 'string' } - }, - ModelInputPayloadSchema: { - 'temperature': '<>', - 'prompt': '<>', - 'max_tokens': '<>', - 'other_settings': [ - { 'setting1': '<>' }, - { 'setting2': '<>' }, - { 'setting3': 1 } - ] - }, - PromptParams: { PromptTemplate: 'You are a helpful AI assistant.{history}' }, - Streaming: true, - Temperature: 0.1 - } - }; - - expect( - await validator - .validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - modelParamConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toEqual( - "Provided prompt template does not have the required placeholder '{input}'." - ); - }) - ); - }); - - it('should fail on a new use case with a bad disambiguation prompt template for rag', async () => { - let modelParamConfig = { - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelName: 'fake-model', ModelId: 'fake-model' }, - PromptParams: { - PromptTemplate: '{input}{history}{context}', - DisambiguationEnabled: true, - DisambiguationPromptTemplate: '{input}' - }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - - expect( - await validator - .validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - modelParamConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toEqual( - "Provided disambiguation prompt template does not have the required placeholder '{history}'." - ); - }) - ); - }); - - it('should fail on a new use case if prompt template contains a duplicate placeholder', async () => { - let modelParamConfig = { - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelName: 'fake-model', ModelId: 'fake-model' }, - ModelId: 'fake-model', - PromptParams: { PromptTemplate: '{context}{context}' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - - expect( - await validator - .validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - modelParamConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toEqual( - "Placeholder '{context}' should appear only once in the prompt template." - ); - }) - ); - }); - - it('should fail on a new use case if disambiguation prompt template contains a duplicate placeholder', async () => { - let modelParamConfig = { - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelName: 'fake-model', ModelId: 'fake-model' }, - PromptParams: { - PromptTemplate: '{context}', - DisambiguationEnabled: true, - DisambiguationPromptTemplate: '{input}{history}{history}' - }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - - expect( - await validator - .validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - modelParamConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toEqual( - "Placeholder '{history}' should appear only once in the disambiguation prompt template." - ); - }) - ); - }); - - it('should fail on a new use case if prompt template contains unescaped braces', async () => { - let modelParamConfig = { - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelName: 'fake-model', ModelId: 'fake-model' }, - PromptParams: { - PromptTemplate: '{context}' - }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - const badPromptTemplates = [ - { template: '{{context}', badCharacter: '{' }, - { template: '{context}}', badCharacter: '}' }, - { - template: 'some other text {{escaped braces}} {context} {unescaped braces}', - badCharacter: '{' - }, - { - template: 'some other text { {context} {{unescaped braces}}', - badCharacter: '{' - }, - { template: 'some other text {context} }', badCharacter: '}' }, - { template: '}some other text {context}', badCharacter: '}' }, - { template: 'some other text {context}{', badCharacter: '{' }, - { template: '{context} {input}', badCharacter: '{' }, - { template: '{context} {history}', badCharacter: '{' } - ]; - - for (let i = 0; i < badPromptTemplates.length; i++) { - modelParamConfig.LlmParams.PromptParams.PromptTemplate = badPromptTemplates[i].template; - expect( - await validator - .validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - modelParamConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toEqual( - `Prompt template contains an unescaped curly brace '${badPromptTemplates[i].badCharacter}'` - ); - }) - ); - } - }); - - it('should fail on a new use case with missing knowledge base params', async () => { - let modelParamConfig = { - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelName: 'fake-model', ModelId: 'fake-model' }, - PromptParams: { PromptTemplate: '{context}' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - - expect( - await validator - .validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - modelParamConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toEqual( - 'Provided knowledge base type Kendra requires KendraKnowledgeBaseParams to be present in KnowledgeBaseParams.' - ); - }) - ); - }); - - it('should fail on a new use case with wrong rag params present, Bedrock', async () => { - let modelParamConfig = { - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.BEDROCK, - NumberOfDocs: 5, - ReturnSourceDocs: true, - KendraKnowledgeBaseParams: {} - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelName: 'fake-model', ModelId: 'fake-model' }, - PromptParams: { PromptTemplate: '{context}' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - - expect( - await validator - .validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - modelParamConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toEqual( - 'Provided knowledge base type Bedrock requires BedrockKnowledgeBaseParams to be present in KnowledgeBaseParams.' - ); - }) - ); - }); - - it('should fail on a new use case with wrong rag params present, Kendra', async () => { - let modelParamConfig = { - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, - NumberOfDocs: 5, - ReturnSourceDocs: true, - BedrockKnowledgeBaseParams: {} - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelName: 'fake-model', ModelId: 'fake-model' }, - PromptParams: { PromptTemplate: '{context}' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - - expect( - await validator - .validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - modelParamConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toEqual( - 'Provided knowledge base type Kendra requires KendraKnowledgeBaseParams to be present in KnowledgeBaseParams.' - ); - }) - ); - }); - - it('should fail on a new use case with invalid RAG provider', async () => { - let modelParamConfig = { - ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, - KnowledgeBaseParams: { - KnowledgeBaseType: 'Garbage' - }, - LlmParams: { - ModelProvider: CHAT_PROVIDERS.BEDROCK, - BedrockLlmParams: { ModelName: 'fake-model', ModelId: 'fake-model' }, - PromptParams: { PromptTemplate: '{context}' }, - Streaming: true, - Temperature: 0.1, - RAGEnabled: true - } - }; - - expect( - await validator - .validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - modelParamConfig, - 'test-user', - 'FakeProviderName', - 'Chat' - ) - ) - .catch((error) => { - expect(error).toBeInstanceOf(Error); - expect(error.message).toEqual( - 'Provided knowledge base type Garbage is not supported. You should not get this error.' - ); - }) - ); - }); - }); -}); - -describe('TextUseCaseValidator', () => { - beforeAll(() => {}); - - describe('resolveBedrockModelSourceOnUpdate', () => { - it('should resolve to an inference profile', async () => { - const mergedConfig = { - UseCaseType: UseCaseTypes.CHAT, - UseCaseName: 'fake-use-case', - LlmParams: { - BedrockLlmParams: { - ModelId: 'anthropic.claude-v2', - InferenceProfileId: 'fake-profile' - } - } - }; - const newConfig = { - UseCaseType: UseCaseTypes.CHAT, - LlmParams: { - BedrockLlmParams: { - InferenceProfileId: 'fake-profile' - } - } - }; - - const expectedConfig = { - UseCaseType: UseCaseTypes.CHAT, - UseCaseName: 'fake-use-case', - LlmParams: { - BedrockLlmParams: { - InferenceProfileId: 'fake-profile' - } - } - }; - - const resolvedUseCase = TextUseCaseValidator.resolveBedrockModelSourceOnUpdate(newConfig, mergedConfig); - expect(resolvedUseCase).toEqual(expectedConfig); - }); - }); - - it('should resolve to an inference profile when a model ARN existed', async () => { - const mergedConfig = { - UseCaseType: UseCaseTypes.CHAT, - UseCaseName: 'fake-use-case', - LlmParams: { - BedrockLlmParams: { - ModelId: 'anthropic.claude-v2', - ModelArn: 'fake-model-arn', - InferenceProfileId: 'fake-profile' - } - } - }; - const newConfig = { - UseCaseType: UseCaseTypes.CHAT, - LlmParams: { - BedrockLlmParams: { - InferenceProfileId: 'fake-profile' - } - } - }; - - const expectedConfig = { - UseCaseType: UseCaseTypes.CHAT, - UseCaseName: 'fake-use-case', - LlmParams: { - BedrockLlmParams: { - InferenceProfileId: 'fake-profile' - } - } - }; - - const resolvedUseCase = TextUseCaseValidator.resolveBedrockModelSourceOnUpdate(newConfig, mergedConfig); - expect(resolvedUseCase).toEqual(expectedConfig); - }); - - it('should resolve to a model id', async () => { - const mergedConfig = { - UseCaseType: UseCaseTypes.CHAT, - UseCaseName: 'fake-use-case', - LlmParams: { - BedrockLlmParams: { - ModelId: 'anthropic.claude-v2', - InferenceProfileId: 'fake-profile' - } - } - }; - const newConfig = { - UseCaseType: UseCaseTypes.CHAT, - LlmParams: { - BedrockLlmParams: { - ModelId: 'anthropic.claude-v2' - } - } - }; - - const expectedConfig = { - UseCaseType: UseCaseTypes.CHAT, - UseCaseName: 'fake-use-case', - LlmParams: { - BedrockLlmParams: { - ModelId: 'anthropic.claude-v2' - } - } - }; - - const resolvedUseCase = TextUseCaseValidator.resolveBedrockModelSourceOnUpdate(newConfig, mergedConfig); - expect(resolvedUseCase).toEqual(expectedConfig); - }); - - it('should resolve to a model id and arn', async () => { - const mergedConfig = { - UseCaseType: UseCaseTypes.CHAT, - UseCaseName: 'fake-use-case', - LlmParams: { - BedrockLlmParams: { - ModelId: 'anthropic.claude-v2', - ModelArn: 'fake-model-arn', - InferenceProfileId: 'fake-profile' - } - } - }; - const newConfig = { - UseCaseType: UseCaseTypes.CHAT, - LlmParams: { - BedrockLlmParams: { - ModelId: 'anthropic.claude-v2' - } - } - }; - - const expectedConfig = { - UseCaseType: UseCaseTypes.CHAT, - UseCaseName: 'fake-use-case', - LlmParams: { - BedrockLlmParams: { - ModelId: 'anthropic.claude-v2', - ModelArn: 'fake-model-arn' - } - } - }; - - const resolvedUseCase = TextUseCaseValidator.resolveBedrockModelSourceOnUpdate(newConfig, mergedConfig); - expect(resolvedUseCase).toEqual(expectedConfig); - }); -}); - -describe('resolveKnowledgeBaseParamsOnUpdate', () => { - it('should remove NoDocsFoundResponse when it exists in merged config but not in update config', () => { - const mergedConfig = { - UseCaseType: UseCaseTypes.CHAT, - UseCaseName: 'fake-use-case', - KnowledgeBaseParams: { - NoDocsFoundResponse: 'Original response', - OtherParam: 'keep-me' - } - }; - const updateConfig = { - UseCaseType: UseCaseTypes.CHAT, - KnowledgeBaseParams: { - OtherParam: 'keep-me' - } - }; - - const expectedConfig = { - UseCaseType: UseCaseTypes.CHAT, - UseCaseName: 'fake-use-case', - KnowledgeBaseParams: { - OtherParam: 'keep-me' - } - }; - - const resolvedConfig = TextUseCaseValidator.resolveKnowledgeBaseParamsOnUpdate(updateConfig, mergedConfig); - expect(resolvedConfig).toEqual(expectedConfig); - }); - - it('should keep NoDocsFoundResponse when it exists in both configs', () => { - const mergedConfig = { - UseCaseType: UseCaseTypes.CHAT, - KnowledgeBaseParams: { - NoDocsFoundResponse: 'New response' - } - }; - const updateConfig = { - UseCaseType: UseCaseTypes.CHAT, - KnowledgeBaseParams: { - NoDocsFoundResponse: 'New response' - } - }; - - const expectedConfig = { - UseCaseType: UseCaseTypes.CHAT, - KnowledgeBaseParams: { - NoDocsFoundResponse: 'New response' - } - }; - - const resolvedConfig = TextUseCaseValidator.resolveKnowledgeBaseParamsOnUpdate(updateConfig, mergedConfig); - expect(resolvedConfig).toEqual(expectedConfig); - }); - - it('should handle missing KnowledgeBaseParams gracefully', () => { - const mergedConfig = { - UseCaseType: UseCaseTypes.CHAT, - UseCaseName: 'fake-use-case' - }; - const updateConfig = { - UseCaseType: UseCaseTypes.CHAT - }; - - const expectedConfig = { - UseCaseType: UseCaseTypes.CHAT, - UseCaseName: 'fake-use-case' - }; - - const resolvedConfig = TextUseCaseValidator.resolveKnowledgeBaseParamsOnUpdate(updateConfig, mergedConfig); - expect(resolvedConfig).toEqual(expectedConfig); - }); -}); - - - -describe('Testing use case validation for Agent UseCases', () => { - let config: any; - let cfnParameters: Map; - let validator: UseCaseValidator; - - let useCase: AgentUseCaseDeploymentAdapter; - - beforeAll(async () => { - config = { - UseCaseType: UseCaseTypes.AGENT, - UseCaseName: 'fake-use-case', - AgentParams: { - BedrockAgentParams: { - AgentId: '1111122222', - AgentAliasId: 'TSTALIASID', - EnableTrace: true - } - } - }; - cfnParameters = new Map(); - cfnParameters.set(CfnParameterKeys.UseCaseConfigRecordKey, 'fake-id'); - - process.env.AWS_SDK_USER_AGENT = `{ "customUserAgent": "AWSSOLUTION/SO0276/v2.1.0" }`; - process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] = 'UseCaseConfigTable'; - - cfnParameters = new Map(); - cfnParameters.set(CfnParameterKeys.UseCaseConfigRecordKey, 'fake-id'); - const storageMgmt = new StorageManagement(); - const useCaseConfigManagement = new UseCaseConfigManagement(); - - validator = UseCaseValidator.createValidator(UseCaseTypes.AGENT, storageMgmt, useCaseConfigManagement); - - useCase = await validator.validateNewUseCase( - new UseCase( - 'fake-id', - 'fake-test', - 'Create a stack for test', - cfnParameters, - config, - 'test-user', - 'FakeProviderName', - 'Agent' - ) - ); - }); - - afterAll(() => { - delete process.env.AWS_SDK_USER_AGENT; - delete process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]; - }); - - it('should pass on a new use case', async () => { - const validatedUseCase = await validator.validateNewUseCase(useCase); - expect(validatedUseCase).toBeDefined(); - expect(validatedUseCase).toBeInstanceOf(UseCase); - }); - - it('should have the right agent params configuration', async () => { - const validatedUseCase = await validator.validateNewUseCase(useCase); - - const config = validatedUseCase.configuration as AgentUseCaseConfiguration; - - expect(config).toBeDefined(); - expect(config.AgentParams).toEqual({ - BedrockAgentParams: { - AgentId: '1111122222', - AgentAliasId: 'TSTALIASID', - EnableTrace: true - } - }); - expect(config.UseCaseType).toEqual(UseCaseTypes.AGENT); - expect(config.UseCaseName).toEqual('fake-use-case'); - }); -}); - -describe('AgentUseCaseValidator', () => { - let validator: AgentUseCaseValidator; - let ddbMockedClient: any; - let cognitoMockClient: any; - - beforeAll(() => { - process.env.AWS_SDK_USER_AGENT = `{ "customUserAgent": "AWSSOLUTION/SO0276/v2.1.0" }`; - process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] = 'UseCaseConfigTable'; - - const storageMgmt = new StorageManagement(); - const useCaseConfigManagement = new UseCaseConfigManagement(); - validator = new AgentUseCaseValidator(storageMgmt, useCaseConfigManagement); - ddbMockedClient = mockClient(DynamoDBClient); - cognitoMockClient = mockClient(CognitoIdentityProviderClient); - }); - - describe('validateNewAgentUseCase', () => { - it('should validate a new agent use case successfully', async () => { - const mockUseCase = new UseCase( - 'fake-id', - 'fake-name', - 'fake-description', - new Map([ - [CfnParameterKeys.BedrockAgentId, 'fake-agent-id'], - [CfnParameterKeys.BedrockAgentAliasId, 'fake-alias-id'] - ]), - { - UseCaseType: 'Agent', - UseCaseName: 'fake-name', - AgentParams: { - BedrockAgentParams: { - AgentId: 'fake-agent-id', - AgentAliasId: 'fake-alias-id', - EnableTrace: true - } - } - } as AgentUseCaseConfiguration, - 'fake-user-id', - 'FakeProviderName', - 'Agent' - ); - - const result = await validator.validateNewUseCase(mockUseCase); - expect(result).toEqual(mockUseCase); - }); - }); - - describe('validateNewAgentUseCase with Cognito parameters', () => { - beforeAll(() => { - //mockCognitoclient - cognitoMockClient.on(DescribeUserPoolCommand).resolves({ - UserPool: { - Id: 'fake-client-id', - Domain: 'fake-domain' - } - }); - }); - - it('should validate a new agent use case successfully', async () => { - const mockUseCase = new UseCase( - 'fake-id', - 'fake-name', - 'fake-description', - new Map([ - [CfnParameterKeys.BedrockAgentId, 'fake-agent-id'], - [CfnParameterKeys.BedrockAgentAliasId, 'fake-alias-id'], - [CfnParameterKeys.ExistingCognitoUserPoolId, 'fake-user-pool-id'], - [CfnParameterKeys.ExistingCognitoUserPoolClient, 'fake-client-id'] - ]), - { - UseCaseType: 'Agent', - UseCaseName: 'fake-name', - AgentParams: { - BedrockAgentParams: { - AgentId: 'fake-agent-id', - AgentAliasId: 'fake-alias-id', - EnableTrace: true - } - }, - AuthenticationParams: { - CognitoParams: { - ExistingUserPoolId: 'fake-user-pool-id', - ExistingUserPoolClientId: 'fake-client-id' - }, - AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO - } - } as AgentUseCaseConfiguration, - 'fake-user-id', - 'FakeProviderName', - 'Agent' - ); - - const result = await validator.validateNewUseCase(mockUseCase); - expect(result).toEqual(mockUseCase); - }); - }); - - describe('validateUpdateUseCase', () => { - beforeAll(() => { - const config = { - UseCaseType: UseCaseTypes.AGENT, - UseCaseName: 'fake-use-case', - AgentParams: { - BedrockAgentParams: { - AgentId: 'fake-agent-id', - AgentAliasId: 'fake-alias-id', - EnableTrace: true - } - } - }; - - ddbMockedClient - .on(GetItemCommand, { - 'TableName': process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] - }) - .resolves({ - Item: marshall({ config: config }) - }); - }); - - afterAll(() => { - ddbMockedClient.restore(); - }); - - it('should validate an update to an agent use case successfully', async () => { - const mockUseCase = new UseCase( - 'fake-id', - 'fake-name', - 'fake-description', - new Map([ - [CfnParameterKeys.BedrockAgentId, 'updated-agent-id'], - [CfnParameterKeys.BedrockAgentAliasId, 'updated-alias-id'] - ]), - { - UseCaseType: 'Agent', - UseCaseName: 'fake-name', - AgentParams: { - BedrockAgentParams: { - AgentId: 'updated-agent-id', - AgentAliasId: 'updated-alias-id', - EnableTrace: false - } - } - } as AgentUseCaseConfiguration, - 'fake-user-id', - 'FakeProviderName', - 'Agent' - ); - - const result = await validator.validateUpdateUseCase(mockUseCase, 'old-key'); - expect(result).toEqual(mockUseCase); - }); - }); - - afterAll(() => { - delete process.env.AWS_SDK_USER_AGENT; - delete process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]; - - cognitoMockClient.restore(); - ddbMockedClient.restore(); - }); -}); diff --git a/source/lambda/use-case-management/test/model/validators/agent-builder-validator.test.ts b/source/lambda/use-case-management/test/model/validators/agent-builder-validator.test.ts new file mode 100644 index 00000000..3739aeee --- /dev/null +++ b/source/lambda/use-case-management/test/model/validators/agent-builder-validator.test.ts @@ -0,0 +1,472 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { CognitoIdentityProviderClient } from '@aws-sdk/client-cognito-identity-provider'; +import { DynamoDBClient, GetItemCommand } from '@aws-sdk/client-dynamodb'; +import { marshall } from '@aws-sdk/util-dynamodb'; +import { mockClient } from 'aws-sdk-client-mock'; +import { StorageManagement } from '../../../ddb/storage-management'; +import { UseCaseConfigManagement } from '../../../ddb/use-case-config-management'; +import { AgentBuilderUseCaseConfiguration } from '../../../model/types'; +import { UseCase } from '../../../model/use-case'; +import { AgentBuilderUseCaseValidator } from '../../../model/validators/agent-builder-validator'; +import { CfnParameterKeys, USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, UseCaseTypes } from '../../../utils/constants'; + +describe('AgentBuilderUseCaseValidator', () => { + let validator: AgentBuilderUseCaseValidator; + let ddbMockedClient: any; + let cognitoMockClient: any; + let cfnParameters: Map; + + beforeAll(() => { + process.env.AWS_SDK_USER_AGENT = `{ "customUserAgent": "AWSSOLUTION/SO0276/v2.1.0" }`; + process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] = 'UseCaseConfigTable'; + + ddbMockedClient = mockClient(DynamoDBClient); + cognitoMockClient = mockClient(CognitoIdentityProviderClient); + + const storageMgmt = new StorageManagement(); + const useCaseConfigManagement = new UseCaseConfigManagement(); + validator = new AgentBuilderUseCaseValidator(storageMgmt, useCaseConfigManagement); + + cfnParameters = new Map(); + cfnParameters.set(CfnParameterKeys.UseCaseConfigRecordKey, 'fake-id'); + }); + + beforeEach(() => { + ddbMockedClient.reset(); + cognitoMockClient.reset(); + }); + + afterEach(() => { + ddbMockedClient.reset(); + cognitoMockClient.reset(); + jest.clearAllTimers(); + }); + + afterAll(async () => { + delete process.env.AWS_SDK_USER_AGENT; + delete process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]; + + try { + ddbMockedClient.restore(); + cognitoMockClient.restore(); + } catch (error) { + // Ignore restore errors + } + + jest.clearAllMocks(); + jest.clearAllTimers(); + + if (global.gc) { + global.gc(); + } + + await new Promise((resolve) => setTimeout(resolve, 10)); + }); + + describe('validateNewUseCase', () => { + it('should validate a new agent builder use case successfully', async () => { + const config = { + UseCaseType: UseCaseTypes.AGENT_BUILDER, + UseCaseName: 'fake-agent-builder', + AgentBuilderParams: { + SystemPrompt: 'You are a helpful assistant', + Tools: [{ ToolId: 'tool1' }, { ToolId: 'tool2' }], + MemoryConfig: { + LongTermEnabled: true + } + }, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0' + }, + Temperature: 0.7 + } + } as AgentBuilderUseCaseConfiguration; + + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + cfnParameters, + config, + 'fake-user-id', + 'FakeProviderName', + 'AgentBuilder' + ); + + const result = await validator.validateNewUseCase(mockUseCase); + expect(result).toEqual(mockUseCase); + }); + + it('should validate agent builder use case with minimal configuration', async () => { + const config = { + UseCaseType: UseCaseTypes.AGENT_BUILDER, + UseCaseName: 'minimal-agent-builder', + AgentBuilderParams: { + SystemPrompt: 'You are a helpful assistant' + }, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0' + } + } + } as AgentBuilderUseCaseConfiguration; + + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + cfnParameters, + config, + 'fake-user-id', + 'FakeProviderName', + 'AgentBuilder' + ); + + const result = await validator.validateNewUseCase(mockUseCase); + expect(result).toEqual(mockUseCase); + }); + + it('should fail validation with invalid Tools format', async () => { + const config = { + UseCaseType: UseCaseTypes.AGENT_BUILDER, + UseCaseName: 'invalid-tools-agent-builder', + AgentBuilderParams: { + SystemPrompt: 'You are a helpful assistant', + Tools: [ + { InvalidField: 'tool1' } // Invalid format + ] + }, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0' + } + } + } as any; + + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + cfnParameters, + config, + 'fake-user-id', + 'FakeProviderName', + 'AgentBuilder' + ); + + await expect(validator.validateNewUseCase(mockUseCase)).rejects.toThrow( + 'Tools[0].ToolId is required and must be a non-empty string.' + ); + }); + }); + + describe('validateUpdateUseCase', () => { + beforeEach(() => { + const existingConfig = { + UseCaseType: UseCaseTypes.AGENT_BUILDER, + UseCaseName: 'existing-agent-builder', + AgentBuilderParams: { + SystemPrompt: 'Original system prompt' + }, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0' + }, + Temperature: 0.5 + } + }; + + ddbMockedClient + .on(GetItemCommand, { + 'TableName': process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] + }) + .resolves({ + Item: marshall({ config: existingConfig }) + }); + }); + + it('should validate an update to an agent builder use case successfully', async () => { + const updateConfig = { + UseCaseType: UseCaseTypes.AGENT_BUILDER, + UseCaseName: 'updated-agent-builder', + AgentBuilderParams: { + SystemPrompt: 'Updated system prompt', + Tools: [{ ToolId: 'new-tool' }] + }, + LlmParams: { + Temperature: 0.8 + } + } as AgentBuilderUseCaseConfiguration; + + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + cfnParameters, + updateConfig, + 'fake-user-id', + 'FakeProviderName', + 'AgentBuilder' + ); + + const result = await validator.validateUpdateUseCase(mockUseCase, 'old-key'); + + // Verify the configuration was merged properly + const resultConfig = result.configuration as AgentBuilderUseCaseConfiguration; + expect(resultConfig.UseCaseName).toBe('updated-agent-builder'); + expect(resultConfig.AgentBuilderParams?.SystemPrompt).toBe('Updated system prompt'); + expect(resultConfig.AgentBuilderParams?.Tools).toHaveLength(1); + expect(resultConfig.LlmParams?.Temperature).toBe(0.8); + expect(resultConfig.LlmParams?.ModelProvider).toBe('Bedrock'); // Should be preserved from existing config + }); + + it('should handle partial updates correctly', async () => { + const partialUpdateConfig = { + AgentBuilderParams: { + SystemPrompt: 'Partially updated prompt' + } + } as AgentBuilderUseCaseConfiguration; + + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + cfnParameters, + partialUpdateConfig, + 'fake-user-id', + 'FakeProviderName', + 'AgentBuilder' + ); + + const result = await validator.validateUpdateUseCase(mockUseCase, 'old-key'); + + const resultConfig = result.configuration as AgentBuilderUseCaseConfiguration; + expect(resultConfig.AgentBuilderParams?.SystemPrompt).toBe('Partially updated prompt'); + expect(resultConfig.UseCaseName).toBe('existing-agent-builder'); // Should be preserved + expect(resultConfig.LlmParams?.ModelProvider).toBe('Bedrock'); // Should be preserved + }); + + it('should replace arrays instead of concatenating them during updates', async () => { + // Setup existing config with Tools and MCPServers + const existingConfigWithArrays = { + UseCaseType: UseCaseTypes.AGENT_BUILDER, + UseCaseName: 'existing-agent-builder', + AgentBuilderParams: { + SystemPrompt: 'Original system prompt', + Tools: [{ ToolId: 'calculator' }, { ToolId: 'current_time' }], + MCPServers: [ + { + Type: 'gateway', + UseCaseName: 'reservation-gateway', + UseCaseId: '3b16b09b-07a7-4c00-8f2c-d404cefd9f1a', + Url: 'https://example.com/mcp' + } + ] + }, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0' + }, + Temperature: 0.5 + } + }; + + ddbMockedClient.reset(); + ddbMockedClient + .on(GetItemCommand, { + 'TableName': process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] + }) + .resolves({ + Item: marshall({ config: existingConfigWithArrays }) + }); + + // Update with the same tools (simulating user re-selecting the same tools) + const updateConfig = { + UseCaseType: UseCaseTypes.AGENT_BUILDER, + AgentBuilderParams: { + SystemPrompt: 'Updated system prompt', + Tools: [{ ToolId: 'calculator' }, { ToolId: 'current_time' }], + MCPServers: [ + { + Type: 'gateway', + UseCaseName: 'reservation-gateway', + UseCaseId: '3b16b09b-07a7-4c00-8f2c-d404cefd9f1a', + Url: 'https://example.com/mcp' + } + ] + } + } as AgentBuilderUseCaseConfiguration; + + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + cfnParameters, + updateConfig, + 'fake-user-id', + 'FakeProviderName', + 'AgentBuilder' + ); + + const result = await validator.validateUpdateUseCase(mockUseCase, 'old-key'); + + const resultConfig = result.configuration as AgentBuilderUseCaseConfiguration; + + // Verify arrays were replaced, not concatenated + expect(resultConfig.AgentBuilderParams?.Tools).toHaveLength(2); + expect(resultConfig.AgentBuilderParams?.Tools).toEqual([ + { ToolId: 'calculator' }, + { ToolId: 'current_time' } + ]); + + expect(resultConfig.AgentBuilderParams?.MCPServers).toHaveLength(1); + expect(resultConfig.AgentBuilderParams?.MCPServers).toEqual([ + { + Type: 'gateway', + UseCaseName: 'reservation-gateway', + UseCaseId: '3b16b09b-07a7-4c00-8f2c-d404cefd9f1a', + Url: 'https://example.com/mcp' + } + ]); + + // Verify no duplicates were created + const toolIds = resultConfig.AgentBuilderParams?.Tools?.map((t) => t.ToolId) || []; + const uniqueToolIds = new Set(toolIds); + expect(toolIds.length).toBe(uniqueToolIds.size); + }); + + it('should replace arrays with different values during updates', async () => { + // Setup existing config with original tools + const existingConfigWithArrays = { + UseCaseType: UseCaseTypes.AGENT_BUILDER, + UseCaseName: 'existing-agent-builder', + AgentBuilderParams: { + SystemPrompt: 'Original system prompt', + Tools: [{ ToolId: 'calculator' }, { ToolId: 'current_time' }] + }, + LlmParams: { + ModelProvider: 'Bedrock', + BedrockLlmParams: { + ModelId: 'anthropic.claude-3-sonnet-20240229-v1:0' + } + } + }; + + ddbMockedClient.reset(); + ddbMockedClient + .on(GetItemCommand, { + 'TableName': process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] + }) + .resolves({ + Item: marshall({ config: existingConfigWithArrays }) + }); + + // Update with completely different tools + const updateConfig = { + AgentBuilderParams: { + Tools: [{ ToolId: 'new_tool_1' }, { ToolId: 'new_tool_2' }, { ToolId: 'new_tool_3' }] + } + } as AgentBuilderUseCaseConfiguration; + + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + cfnParameters, + updateConfig, + 'fake-user-id', + 'FakeProviderName', + 'AgentBuilder' + ); + + const result = await validator.validateUpdateUseCase(mockUseCase, 'old-key'); + + const resultConfig = result.configuration as AgentBuilderUseCaseConfiguration; + + // Verify the new tools completely replaced the old ones + expect(resultConfig.AgentBuilderParams?.Tools).toHaveLength(3); + expect(resultConfig.AgentBuilderParams?.Tools).toEqual([ + { ToolId: 'new_tool_1' }, + { ToolId: 'new_tool_2' }, + { ToolId: 'new_tool_3' } + ]); + + // Verify old tools are not present + const toolIds = resultConfig.AgentBuilderParams?.Tools?.map((t) => t.ToolId) || []; + expect(toolIds).not.toContain('calculator'); + expect(toolIds).not.toContain('current_time'); + }); + }); + + describe('multimodal parameter validation', () => { + it('should validate agent builder use case with multimodal enabled', async () => { + const configWithMultimodal: AgentBuilderUseCaseConfiguration = { + UseCaseType: 'AgentBuilder', + AgentBuilderParams: { + SystemPrompt: 'You are a helpful assistant.' + }, + LlmParams: { + ModelProvider: 'Bedrock', + MultimodalParams: { + MultimodalEnabled: true + } + } + }; + + const mockUseCaseWithMultimodal = new UseCase( + 'test-id', + 'test-name', + 'test-description', + cfnParameters, + configWithMultimodal, + 'fake-user-id', + 'FakeProviderName', + 'AgentBuilder' + ); + + const result = await validator.validateNewUseCase(mockUseCaseWithMultimodal); + const resultConfig = result.configuration as AgentBuilderUseCaseConfiguration; + + expect(resultConfig.LlmParams?.MultimodalParams?.MultimodalEnabled).toBe(true); + }); + + it('should validate agent builder use case with multimodal disabled', async () => { + const configWithMultimodal: AgentBuilderUseCaseConfiguration = { + UseCaseType: 'AgentBuilder', + AgentBuilderParams: { + SystemPrompt: 'You are a helpful assistant.' + }, + LlmParams: { + ModelProvider: 'Bedrock', + MultimodalParams: { + MultimodalEnabled: false + } + } + }; + + const mockUseCaseWithMultimodal = new UseCase( + 'test-id', + 'test-name', + 'test-description', + cfnParameters, + configWithMultimodal, + 'fake-user-id', + 'FakeProviderName', + 'AgentBuilder' + ); + + const result = await validator.validateNewUseCase(mockUseCaseWithMultimodal); + const resultConfig = result.configuration as AgentBuilderUseCaseConfiguration; + + expect(resultConfig.LlmParams?.MultimodalParams?.MultimodalEnabled).toBe(false); + }); + }); +}); diff --git a/source/lambda/use-case-management/test/model/validators/agent-validator.test.ts b/source/lambda/use-case-management/test/model/validators/agent-validator.test.ts new file mode 100644 index 00000000..5eaeb0f2 --- /dev/null +++ b/source/lambda/use-case-management/test/model/validators/agent-validator.test.ts @@ -0,0 +1,241 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { CognitoIdentityProviderClient, DescribeUserPoolCommand } from '@aws-sdk/client-cognito-identity-provider'; +import { DynamoDBClient, GetItemCommand } from '@aws-sdk/client-dynamodb'; +import { marshall } from '@aws-sdk/util-dynamodb'; +import { mockClient } from 'aws-sdk-client-mock'; +import { StorageManagement } from '../../../ddb/storage-management'; +import { UseCaseConfigManagement } from '../../../ddb/use-case-config-management'; +import { AgentUseCaseConfiguration } from '../../../model/types'; +import { UseCase } from '../../../model/use-case'; +import { AgentUseCaseValidator } from '../../../model/validators/agent-validator'; +import { + AUTHENTICATION_PROVIDERS, + CfnParameterKeys, + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, + UseCaseTypes +} from '../../../utils/constants'; + +describe('AgentUseCaseValidator', () => { + let validator: AgentUseCaseValidator; + let ddbMockedClient: any; + let cognitoMockClient: any; + let cfnParameters: Map; + + beforeAll(() => { + process.env.AWS_SDK_USER_AGENT = `{ "customUserAgent": "AWSSOLUTION/SO0276/v2.1.0" }`; + process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] = 'UseCaseConfigTable'; + + ddbMockedClient = mockClient(DynamoDBClient); + cognitoMockClient = mockClient(CognitoIdentityProviderClient); + + const storageMgmt = new StorageManagement(); + const useCaseConfigManagement = new UseCaseConfigManagement(); + validator = new AgentUseCaseValidator(storageMgmt, useCaseConfigManagement); + + cfnParameters = new Map(); + cfnParameters.set(CfnParameterKeys.UseCaseConfigRecordKey, 'fake-id'); + }); + + beforeEach(() => { + ddbMockedClient.reset(); + cognitoMockClient.reset(); + }); + + afterEach(() => { + ddbMockedClient.reset(); + cognitoMockClient.reset(); + jest.clearAllTimers(); + }); + + afterAll(async () => { + delete process.env.AWS_SDK_USER_AGENT; + delete process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]; + + try { + ddbMockedClient.restore(); + cognitoMockClient.restore(); + } catch (error) { + // Ignore restore errors + } + + jest.clearAllMocks(); + jest.clearAllTimers(); + + if (global.gc) { + global.gc(); + } + + await new Promise((resolve) => setTimeout(resolve, 10)); + }); + + describe('validateNewUseCase', () => { + it('should validate a new agent use case successfully', async () => { + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + new Map([ + [CfnParameterKeys.BedrockAgentId, 'fake-agent-id'], + [CfnParameterKeys.BedrockAgentAliasId, 'fake-alias-id'] + ]), + { + UseCaseType: 'Agent', + UseCaseName: 'fake-name', + AgentParams: { + BedrockAgentParams: { + AgentId: 'fake-agent-id', + AgentAliasId: 'fake-alias-id', + EnableTrace: true + } + } + } as AgentUseCaseConfiguration, + 'fake-user-id', + 'FakeProviderName', + 'Agent' + ); + + const result = await validator.validateNewUseCase(mockUseCase); + expect(result).toEqual(mockUseCase); + }); + + it('should validate a new agent use case with Cognito parameters', async () => { + cognitoMockClient.on(DescribeUserPoolCommand).resolves({ + UserPool: { + Id: 'fake-client-id', + Domain: 'fake-domain' + } + }); + + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + new Map([ + [CfnParameterKeys.BedrockAgentId, 'fake-agent-id'], + [CfnParameterKeys.BedrockAgentAliasId, 'fake-alias-id'], + [CfnParameterKeys.ExistingCognitoUserPoolId, 'fake-user-pool-id'], + [CfnParameterKeys.ExistingCognitoUserPoolClient, 'fake-client-id'] + ]), + { + UseCaseType: 'Agent', + UseCaseName: 'fake-name', + AgentParams: { + BedrockAgentParams: { + AgentId: 'fake-agent-id', + AgentAliasId: 'fake-alias-id', + EnableTrace: true + } + }, + AuthenticationParams: { + CognitoParams: { + ExistingUserPoolId: 'fake-user-pool-id', + ExistingUserPoolClientId: 'fake-client-id' + }, + AuthenticationProvider: AUTHENTICATION_PROVIDERS.COGNITO + } + } as AgentUseCaseConfiguration, + 'fake-user-id', + 'FakeProviderName', + 'Agent' + ); + + const result = await validator.validateNewUseCase(mockUseCase); + expect(result).toEqual(mockUseCase); + }); + + it('should have the right agent params configuration', async () => { + const config = { + UseCaseType: UseCaseTypes.AGENT, + UseCaseName: 'fake-use-case', + AgentParams: { + BedrockAgentParams: { + AgentId: '1111122222', + AgentAliasId: 'TSTALIASID', + EnableTrace: true + } + } + } as AgentUseCaseConfiguration; + + const testUseCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create a stack for test', + cfnParameters, + config, + 'test-user', + 'FakeProviderName', + 'Agent' + ); + + const validatedUseCase = await validator.validateNewUseCase(testUseCase); + + const validatedConfig = validatedUseCase.configuration as AgentUseCaseConfiguration; + + expect(validatedConfig).toBeDefined(); + expect(validatedConfig.AgentParams).toEqual({ + BedrockAgentParams: { + AgentId: '1111122222', + AgentAliasId: 'TSTALIASID', + 'EnableTrace': true + } + }); + expect(validatedConfig.UseCaseType).toEqual(UseCaseTypes.AGENT); + expect(validatedConfig.UseCaseName).toEqual('fake-use-case'); + }); + }); + + describe('validateUpdateUseCase', () => { + beforeEach(() => { + const config = { + UseCaseType: UseCaseTypes.AGENT, + UseCaseName: 'fake-use-case', + AgentParams: { + BedrockAgentParams: { + AgentId: 'fake-agent-id', + AgentAliasId: 'fake-alias-id', + EnableTrace: true + } + } + }; + + ddbMockedClient + .on(GetItemCommand, { + 'TableName': process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] + }) + .resolves({ + Item: marshall({ config: config }) + }); + }); + + it('should validate an update to an agent use case successfully', async () => { + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + new Map([ + [CfnParameterKeys.BedrockAgentId, 'updated-agent-id'], + [CfnParameterKeys.BedrockAgentAliasId, 'updated-alias-id'] + ]), + { + UseCaseType: 'Agent', + UseCaseName: 'fake-name', + AgentParams: { + BedrockAgentParams: { + AgentId: 'updated-agent-id', + AgentAliasId: 'updated-alias-id', + EnableTrace: false + } + } + } as AgentUseCaseConfiguration, + 'fake-user-id', + 'FakeProviderName', + 'Agent' + ); + + const result = await validator.validateUpdateUseCase(mockUseCase, 'old-key'); + expect(result).toEqual(mockUseCase); + }); + }); +}); diff --git a/source/lambda/use-case-management/test/model/validators/config-merge-utils.test.ts b/source/lambda/use-case-management/test/model/validators/config-merge-utils.test.ts new file mode 100644 index 00000000..f33d35b0 --- /dev/null +++ b/source/lambda/use-case-management/test/model/validators/config-merge-utils.test.ts @@ -0,0 +1,693 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { ConfigMergeUtils } from '../../../model/validators/config-merge-utils'; +import { CHAT_PROVIDERS, UseCaseTypes } from '../../../utils/constants'; + +describe('ConfigMergeUtils', () => { + describe('mergeConfigs', () => { + // NOTE: All mergeConfigs tests must be async because the method is decorated with + // @tracer.captureMethod from AWS Lambda Powertools, which wraps the synchronous + // method and can return promises in the test environment + it('should merge simple configurations', async () => { + const existingConfig = { + UseCaseName: 'existing-name', + UseCaseType: UseCaseTypes.CHAT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + Temperature: 0.5 + } + }; + + const newConfig = { + UseCaseName: 'updated-name', + LlmParams: { + Temperature: 0.8 + } + }; + + const result = await ConfigMergeUtils.mergeConfigs(existingConfig, newConfig); + + expect(result).toEqual({ + UseCaseName: 'updated-name', + UseCaseType: UseCaseTypes.CHAT, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + Temperature: 0.8 + } + }); + }); + + it('should handle ModelParams overwriting correctly', async () => { + const existingConfig = { + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + ModelParams: { + param1: { Value: 'old-value1', Type: 'string' }, + param2: { Value: 'old-value2', Type: 'string' } + } + } + }; + + const newConfig = { + LlmParams: { + ModelParams: { + param1: { Value: 'new-value1', Type: 'string' }, + param3: { Value: 'new-value3', Type: 'string' } + } + } + }; + + const result = await ConfigMergeUtils.mergeConfigs(existingConfig, newConfig); + + expect(result.LlmParams.ModelParams).toEqual({ + param1: { Value: 'new-value1', Type: 'string' }, + param3: { Value: 'new-value3', Type: 'string' } + }); + }); + + it('should handle nested object merging', async () => { + const existingConfig = { + KnowledgeBaseParams: { + KnowledgeBaseType: 'Kendra', + NumberOfDocs: 5, + KendraKnowledgeBaseParams: { + ExistingKendraIndexId: 'old-index' + } + } + }; + + const newConfig = { + KnowledgeBaseParams: { + NumberOfDocs: 10, + KendraKnowledgeBaseParams: { + ExistingKendraIndexId: 'new-index' + } + } + }; + + const result = await ConfigMergeUtils.mergeConfigs(existingConfig, newConfig); + + expect(result.KnowledgeBaseParams).toEqual({ + KnowledgeBaseType: 'Kendra', + NumberOfDocs: 10, + KendraKnowledgeBaseParams: { + ExistingKendraIndexId: 'new-index' + } + }); + }); + + it('should handle empty new config', async () => { + const existingConfig = { + UseCaseName: 'existing-name', + LlmParams: { + Temperature: 0.5 + } + }; + + const newConfig = {}; + + const result = await ConfigMergeUtils.mergeConfigs(existingConfig, newConfig); + + expect(result).toEqual(existingConfig); + }); + + it('should handle empty existing config', async () => { + const existingConfig = {}; + + const newConfig = { + UseCaseName: 'new-name', + LlmParams: { + Temperature: 0.8 + } + }; + + const result = await ConfigMergeUtils.mergeConfigs(existingConfig, newConfig); + + expect(result).toEqual(newConfig); + }); + }); + + describe('resolveBedrockModelSourceOnUpdate', () => { + it('should resolve to inference profile when provided in new config', () => { + const mergedConfig = { + LlmParams: { + BedrockLlmParams: { + ModelId: 'anthropic.claude-v2', + InferenceProfileId: 'fake-profile' + } + } + }; + + const newConfig = { + LlmParams: { + BedrockLlmParams: { + InferenceProfileId: 'fake-profile' + } + } + }; + + const result = ConfigMergeUtils.resolveBedrockModelSourceOnUpdate(newConfig, mergedConfig); + + expect(result.LlmParams.BedrockLlmParams).toEqual({ + InferenceProfileId: 'fake-profile' + }); + }); + + it('should resolve to model id when provided in new config', () => { + const mergedConfig = { + LlmParams: { + BedrockLlmParams: { + ModelId: 'anthropic.claude-v2', + InferenceProfileId: 'fake-profile' + } + } + }; + + const newConfig = { + LlmParams: { + BedrockLlmParams: { + ModelId: 'anthropic.claude-v2' + } + } + }; + + const result = ConfigMergeUtils.resolveBedrockModelSourceOnUpdate(newConfig, mergedConfig); + + expect(result.LlmParams.BedrockLlmParams).toEqual({ + ModelId: 'anthropic.claude-v2' + }); + }); + + it('should preserve model ARN when model id is provided', () => { + const mergedConfig = { + LlmParams: { + BedrockLlmParams: { + ModelId: 'anthropic.claude-v2', + ModelArn: 'fake-model-arn', + InferenceProfileId: 'fake-profile' + } + } + }; + + const newConfig = { + LlmParams: { + BedrockLlmParams: { + ModelId: 'anthropic.claude-v2' + } + } + }; + + const result = ConfigMergeUtils.resolveBedrockModelSourceOnUpdate(newConfig, mergedConfig); + + expect(result.LlmParams.BedrockLlmParams).toEqual({ + ModelId: 'anthropic.claude-v2', + ModelArn: 'fake-model-arn' + }); + }); + + it('should handle missing BedrockLlmParams', () => { + const mergedConfig = { + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + Temperature: 0.8 + } + }; + + const newConfig = { + LlmParams: { + Temperature: 0.8 + } + }; + + const result = ConfigMergeUtils.resolveBedrockModelSourceOnUpdate(newConfig, mergedConfig); + + expect(result).toEqual(mergedConfig); + }); + }); + + describe('mergeAgentBuilderConfigs', () => { + it('should replace Tools array when provided in new config', async () => { + const existingConfig = { + UseCaseName: 'agent-test', + AgentBuilderParams: { + SystemPrompt: 'You are a helpful assistant', + Tools: [{ ToolId: 'calculator' }, { ToolId: 'weather' }], + MCPServers: [{ Type: 'runtime', UseCaseName: 'mcp1', UseCaseId: 'id1', Url: 'url1' }] + } + }; + + const newConfig = { + AgentBuilderParams: { + Tools: [{ ToolId: 'calculator' }] + } + }; + + const result = await ConfigMergeUtils.mergeAgentBuilderConfigs(existingConfig, newConfig); + + expect(result.AgentBuilderParams.Tools).toEqual([{ ToolId: 'calculator' }]); + expect(result.AgentBuilderParams.SystemPrompt).toBe('You are a helpful assistant'); + }); + + it('should clear Tools array when not provided in new config', async () => { + const existingConfig = { + UseCaseName: 'agent-test', + AgentBuilderParams: { + SystemPrompt: 'You are a helpful assistant', + Tools: [{ ToolId: 'calculator' }, { ToolId: 'weather' }], + MCPServers: [{ Type: 'runtime', UseCaseName: 'mcp1', UseCaseId: 'id1', Url: 'url1' }] + } + }; + + const newConfig = { + AgentBuilderParams: { + SystemPrompt: 'Updated prompt' + } + }; + + const result = await ConfigMergeUtils.mergeAgentBuilderConfigs(existingConfig, newConfig); + + expect(result.AgentBuilderParams.Tools).toEqual([]); + expect(result.AgentBuilderParams.SystemPrompt).toBe('Updated prompt'); + }); + + it('should replace MCPServers array when provided in new config', async () => { + const existingConfig = { + UseCaseName: 'agent-test', + AgentBuilderParams: { + SystemPrompt: 'You are a helpful assistant', + Tools: [{ ToolId: 'calculator' }], + MCPServers: [ + { Type: 'runtime', UseCaseName: 'mcp1', UseCaseId: 'id1', Url: 'url1' }, + { Type: 'gateway', UseCaseName: 'mcp2', UseCaseId: 'id2', Url: 'url2' } + ] + } + }; + + const newConfig = { + AgentBuilderParams: { + MCPServers: [{ Type: 'runtime', UseCaseName: 'mcp3', UseCaseId: 'id3', Url: 'url3' }] + } + }; + + const result = await ConfigMergeUtils.mergeAgentBuilderConfigs(existingConfig, newConfig); + + expect(result.AgentBuilderParams.MCPServers).toEqual([ + { Type: 'runtime', UseCaseName: 'mcp3', UseCaseId: 'id3', Url: 'url3' } + ]); + expect(result.AgentBuilderParams.Tools).toEqual([]); + }); + + it('should clear MCPServers array when not provided in new config', async () => { + const existingConfig = { + UseCaseName: 'agent-test', + AgentBuilderParams: { + SystemPrompt: 'You are a helpful assistant', + Tools: [{ ToolId: 'calculator' }], + MCPServers: [ + { Type: 'runtime', UseCaseName: 'mcp1', UseCaseId: 'id1', Url: 'url1' }, + { Type: 'gateway', UseCaseName: 'mcp2', UseCaseId: 'id2', Url: 'url2' } + ] + } + }; + + const newConfig = { + AgentBuilderParams: { + SystemPrompt: 'Updated prompt' + } + }; + + const result = await ConfigMergeUtils.mergeAgentBuilderConfigs(existingConfig, newConfig); + + expect(result.AgentBuilderParams.MCPServers).toEqual([]); + expect(result.AgentBuilderParams.SystemPrompt).toBe('Updated prompt'); + }); + + it('should handle empty arrays in new config', async () => { + const existingConfig = { + UseCaseName: 'agent-test', + AgentBuilderParams: { + SystemPrompt: 'You are a helpful assistant', + Tools: [{ ToolId: 'calculator' }, { ToolId: 'weather' }], + MCPServers: [{ Type: 'runtime', UseCaseName: 'mcp1', UseCaseId: 'id1', Url: 'url1' }] + } + }; + + const newConfig = { + AgentBuilderParams: { + Tools: [], + MCPServers: [] + } + }; + + const result = await ConfigMergeUtils.mergeAgentBuilderConfigs(existingConfig, newConfig); + + expect(result.AgentBuilderParams.Tools).toEqual([]); + expect(result.AgentBuilderParams.MCPServers).toEqual([]); + }); + + it('should merge other AgentBuilderParams fields normally', async () => { + const existingConfig = { + UseCaseName: 'agent-test', + AgentBuilderParams: { + SystemPrompt: 'You are a helpful assistant', + Tools: [{ ToolId: 'calculator' }], + MCPServers: [], + MemoryConfig: { + LongTermEnabled: false + } + } + }; + + const newConfig = { + AgentBuilderParams: { + SystemPrompt: 'Updated prompt', + MemoryConfig: { + LongTermEnabled: true + } + } + }; + + const result = await ConfigMergeUtils.mergeAgentBuilderConfigs(existingConfig, newConfig); + + expect(result.AgentBuilderParams.SystemPrompt).toBe('Updated prompt'); + expect(result.AgentBuilderParams.MemoryConfig.LongTermEnabled).toBe(true); + expect(result.AgentBuilderParams.Tools).toEqual([]); + expect(result.AgentBuilderParams.MCPServers).toEqual([]); + }); + + it('should handle missing AgentBuilderParams in new config', async () => { + const existingConfig = { + UseCaseName: 'agent-test', + AgentBuilderParams: { + SystemPrompt: 'You are a helpful assistant', + Tools: [{ ToolId: 'calculator' }] + } + }; + + const newConfig = { + UseCaseName: 'updated-name' + }; + + const result = await ConfigMergeUtils.mergeAgentBuilderConfigs(existingConfig, newConfig); + + expect(result.UseCaseName).toBe('updated-name'); + expect(result.AgentBuilderParams.SystemPrompt).toBe('You are a helpful assistant'); + expect(result.AgentBuilderParams.Tools).toEqual([{ ToolId: 'calculator' }]); + }); + + it('should handle LlmParams merge alongside AgentBuilderParams', async () => { + const existingConfig = { + UseCaseName: 'agent-test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + Temperature: 0.5, + ModelParams: { + param1: { Value: 'old-value', Type: 'string' } + } + }, + AgentBuilderParams: { + SystemPrompt: 'You are a helpful assistant', + Tools: [{ ToolId: 'calculator' }] + } + }; + + const newConfig = { + LlmParams: { + Temperature: 0.8, + ModelParams: { + param1: { Value: 'new-value', Type: 'string' } + } + }, + AgentBuilderParams: { + Tools: [{ ToolId: 'weather' }] + } + }; + + const result = await ConfigMergeUtils.mergeAgentBuilderConfigs(existingConfig, newConfig); + + expect(result.LlmParams.Temperature).toBe(0.8); + expect(result.LlmParams.ModelParams).toEqual({ + param1: { Value: 'new-value', Type: 'string' } + }); + expect(result.AgentBuilderParams.Tools).toEqual([{ ToolId: 'weather' }]); + expect(result.AgentBuilderParams.MCPServers).toEqual([]); + }); + }); + + describe('mergeWorkflowConfigs', () => { + it('should replace Agents array when provided in new config', async () => { + const existingConfig = { + UseCaseName: 'workflow-test', + WorkflowParams: { + SystemPrompt: 'You are a workflow coordinator', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { UseCaseId: 'agent1', UseCaseName: 'Agent 1' }, + { UseCaseId: 'agent2', UseCaseName: 'Agent 2' } + ] + } + } + }; + + const newConfig = { + WorkflowParams: { + AgentsAsToolsParams: { + Agents: [{ UseCaseId: 'agent3', UseCaseName: 'Agent 3' }] + } + } + }; + + const result = await ConfigMergeUtils.mergeWorkflowConfigs(existingConfig, newConfig); + + expect(result.WorkflowParams.AgentsAsToolsParams.Agents).toEqual([ + { UseCaseId: 'agent3', UseCaseName: 'Agent 3' } + ]); + expect(result.WorkflowParams.SystemPrompt).toBe('You are a workflow coordinator'); + }); + + it('should clear Agents array when not provided in new config', async () => { + const existingConfig = { + UseCaseName: 'workflow-test', + WorkflowParams: { + SystemPrompt: 'You are a workflow coordinator', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { UseCaseId: 'agent1', UseCaseName: 'Agent 1' }, + { UseCaseId: 'agent2', UseCaseName: 'Agent 2' } + ] + } + } + }; + + const newConfig = { + WorkflowParams: { + SystemPrompt: 'Updated workflow prompt', + AgentsAsToolsParams: {} + } + }; + + const result = await ConfigMergeUtils.mergeWorkflowConfigs(existingConfig, newConfig); + + expect(result.WorkflowParams.AgentsAsToolsParams.Agents).toEqual([]); + expect(result.WorkflowParams.SystemPrompt).toBe('Updated workflow prompt'); + }); + + it('should handle empty Agents array in new config', async () => { + const existingConfig = { + UseCaseName: 'workflow-test', + WorkflowParams: { + SystemPrompt: 'You are a workflow coordinator', + AgentsAsToolsParams: { + Agents: [ + { UseCaseId: 'agent1', UseCaseName: 'Agent 1' }, + { UseCaseId: 'agent2', UseCaseName: 'Agent 2' } + ] + } + } + }; + + const newConfig = { + WorkflowParams: { + AgentsAsToolsParams: { + Agents: [] + } + } + }; + + const result = await ConfigMergeUtils.mergeWorkflowConfigs(existingConfig, newConfig); + + expect(result.WorkflowParams.AgentsAsToolsParams.Agents).toEqual([]); + }); + + it('should merge other WorkflowParams fields normally', async () => { + const existingConfig = { + UseCaseName: 'workflow-test', + WorkflowParams: { + SystemPrompt: 'You are a workflow coordinator', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [{ UseCaseId: 'agent1' }] + } + } + }; + + const newConfig = { + WorkflowParams: { + SystemPrompt: 'Updated workflow prompt', + OrchestrationPattern: 'sequential', + AgentsAsToolsParams: {} + } + }; + + const result = await ConfigMergeUtils.mergeWorkflowConfigs(existingConfig, newConfig); + + expect(result.WorkflowParams.SystemPrompt).toBe('Updated workflow prompt'); + expect(result.WorkflowParams.OrchestrationPattern).toBe('sequential'); + expect(result.WorkflowParams.AgentsAsToolsParams.Agents).toEqual([]); + }); + + it('should handle missing WorkflowParams in new config', async () => { + const existingConfig = { + UseCaseName: 'workflow-test', + WorkflowParams: { + SystemPrompt: 'You are a workflow coordinator', + AgentsAsToolsParams: { + Agents: [{ UseCaseId: 'agent1' }] + } + } + }; + + const newConfig = { + UseCaseName: 'updated-workflow' + }; + + const result = await ConfigMergeUtils.mergeWorkflowConfigs(existingConfig, newConfig); + + expect(result.UseCaseName).toBe('updated-workflow'); + expect(result.WorkflowParams.SystemPrompt).toBe('You are a workflow coordinator'); + expect(result.WorkflowParams.AgentsAsToolsParams.Agents).toEqual([{ UseCaseId: 'agent1' }]); + }); + + it('should handle LlmParams merge alongside WorkflowParams', async () => { + const existingConfig = { + UseCaseName: 'workflow-test', + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + Temperature: 0.5 + }, + WorkflowParams: { + SystemPrompt: 'You are a workflow coordinator', + AgentsAsToolsParams: { + Agents: [{ UseCaseId: 'agent1' }] + } + } + }; + + const newConfig = { + LlmParams: { + Temperature: 0.8 + }, + WorkflowParams: { + AgentsAsToolsParams: { + Agents: [{ UseCaseId: 'agent2' }] + } + } + }; + + const result = await ConfigMergeUtils.mergeWorkflowConfigs(existingConfig, newConfig); + + expect(result.LlmParams.Temperature).toBe(0.8); + expect(result.WorkflowParams.AgentsAsToolsParams.Agents).toEqual([{ UseCaseId: 'agent2' }]); + }); + }); + + describe('resolveKnowledgeBaseParamsOnUpdate', () => { + it('should remove NoDocsFoundResponse when not in update config', () => { + const mergedConfig = { + KnowledgeBaseParams: { + NoDocsFoundResponse: 'Original response', + ReturnSourceDocs: true + } + }; + + const updateConfig = { + KnowledgeBaseParams: { + ReturnSourceDocs: true + } + }; + + const result = ConfigMergeUtils.resolveKnowledgeBaseParamsOnUpdate(updateConfig, mergedConfig); + + expect(result.KnowledgeBaseParams).toEqual({ + ReturnSourceDocs: true + }); + }); + + it('should keep NoDocsFoundResponse when in update config', () => { + const mergedConfig = { + KnowledgeBaseParams: { + NoDocsFoundResponse: 'Updated response', + NumberOfDocs: 5 + } + }; + + const updateConfig = { + KnowledgeBaseParams: { + NoDocsFoundResponse: 'Updated response', + NumberOfDocs: 5 + } + }; + + const result = ConfigMergeUtils.resolveKnowledgeBaseParamsOnUpdate(updateConfig, mergedConfig); + + expect(result.KnowledgeBaseParams).toEqual({ + NoDocsFoundResponse: 'Updated response', + NumberOfDocs: 5 + }); + }); + + it('should handle missing KnowledgeBaseParams in merged config', () => { + const mergedConfig = { + UseCaseName: 'test-case' + }; + + const updateConfig = { + KnowledgeBaseParams: { + NumberOfDocs: 10 + } + }; + + const result = ConfigMergeUtils.resolveKnowledgeBaseParamsOnUpdate(updateConfig, mergedConfig); + + expect(result).toEqual({ + UseCaseName: 'test-case' + }); + }); + + it('should handle missing KnowledgeBaseParams in update config', () => { + const mergedConfig = { + KnowledgeBaseParams: { + NoDocsFoundResponse: 'Original response', + NumberOfDocs: 5 + }, + UseCaseName: 'updated-case' + }; + + const updateConfig = { + UseCaseName: 'updated-case' + }; + + const result = ConfigMergeUtils.resolveKnowledgeBaseParamsOnUpdate(updateConfig, mergedConfig); + + expect(result).toEqual({ + KnowledgeBaseParams: { + NumberOfDocs: 5 + }, + UseCaseName: 'updated-case' + }); + }); + }); +}); diff --git a/source/lambda/use-case-management/test/model/validators/mcp-validator.test.ts b/source/lambda/use-case-management/test/model/validators/mcp-validator.test.ts new file mode 100644 index 00000000..8207a33f --- /dev/null +++ b/source/lambda/use-case-management/test/model/validators/mcp-validator.test.ts @@ -0,0 +1,1250 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn(() => ({ region: 'us-east-1' })) +})); + +import { SchemaUploadValidator, McpOperationsValidator } from '../../../model/validators/mcp-validator'; +import { GATEWAY_TARGET_TYPES, MCP_CONTENT_TYPES, McpOperationTypes, UseCaseTypes, MODEL_INFO_TABLE_NAME_ENV_VAR, USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, CfnParameterKeys } from '../../../utils/constants'; +import { TargetParams } from '../../../model/types'; + +import { DynamoDBClient } from '@aws-sdk/client-dynamodb'; +import { mockClient } from 'aws-sdk-client-mock'; +import { StorageManagement } from '../../../ddb/storage-management'; +import { UseCaseConfigManagement } from '../../../ddb/use-case-config-management'; +import { UseCase } from '../../../model/use-case'; +import { MCPUsecaseValidator } from '../../../model/validators/mcp-validator' +import { ValidatorFactory } from '../../../model/validators/validator-factory' + +jest.mock('../../../power-tools-init', () => ({ + logger: { + info: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + warn: jest.fn() + }, + tracer: { + getRootXrayTraceId: jest.fn().mockReturnValue('test-trace-id'), + captureMethod: () => () => {}, + captureAWSv3Client: jest.fn((client) => client) + } +})); + +describe('MCP Validator', () => { + let validator: SchemaUploadValidator; + + beforeEach(() => { + validator = new SchemaUploadValidator(); + }); + + describe('McpOperationsValidator Factory', () => { + it('should create SchemaUploadValidator for upload-schema operation', () => { + const createdValidator = McpOperationsValidator.createValidator(McpOperationTypes.UPLOAD_SCHEMA); + expect(createdValidator).toBeInstanceOf(SchemaUploadValidator); + }); + + it('should throw error for invalid operation type', () => { + expect(() => McpOperationsValidator.createValidator('invalid-operation')).toThrow( + 'Invalid MCP operation type: invalid-operation' + ); + }); + }); + + describe('SchemaUploadValidator', () => { + describe('validateMcpOperation', () => { + it('should successfully validate and process single file', async () => { + const mockOperation = { + rawFiles: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'api-spec.json' + } + ], + files: [] + }; + + const result = (await validator.validateMcpOperation(mockOperation)) as any; + + expect(result.files).toHaveLength(1); + expect(result.files[0]).toEqual({ + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'api-spec.json', + fileExtension: '.json', + contentType: MCP_CONTENT_TYPES.JSON + }); + }); + + it('should successfully validate and process multiple files', async () => { + const mockOperation = { + rawFiles: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'api-spec.yaml' + }, + { + schemaType: GATEWAY_TARGET_TYPES.LAMBDA, + fileName: 'lambda-schema.json' + } + ], + files: [] + }; + + const result = (await validator.validateMcpOperation(mockOperation)) as any; + + expect(result.files).toHaveLength(2); + + expect(result.files[0]).toEqual({ + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'api-spec.yaml', + fileExtension: '.yaml', + contentType: MCP_CONTENT_TYPES.YAML + }); + + expect(result.files[1]).toEqual({ + schemaType: GATEWAY_TARGET_TYPES.LAMBDA, + fileName: 'lambda-schema.json', + fileExtension: '.json', + contentType: MCP_CONTENT_TYPES.JSON + }); + }); + + it('should throw error for missing schemaType', async () => { + const mockOperation = { + rawFiles: [ + { + fileName: 'api-spec.json' + } + ], + files: [] + }; + + await expect(validator.validateMcpOperation(mockOperation)).rejects.toThrow( + 'files[0].schemaType is required and must be a non-empty string' + ); + }); + + it('should throw error for missing fileName', async () => { + const mockOperation = { + rawFiles: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API + } + ], + files: [] + }; + + await expect(validator.validateMcpOperation(mockOperation)).rejects.toThrow( + 'files[0].fileName is required and must be a non-empty string' + ); + }); + + it('should throw error for missing file extension', async () => { + const mockOperation = { + rawFiles: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'api-spec' + } + ], + files: [] + }; + + await expect(validator.validateMcpOperation(mockOperation)).rejects.toThrow( + "files[0].fileName 'api-spec' must have a valid file extension" + ); + }); + + it('should throw error for invalid schema type', async () => { + const mockOperation = { + rawFiles: [ + { + schemaType: 'invalid-type', + fileName: 'api-spec.json' + } + ], + files: [] + }; + + await expect(validator.validateMcpOperation(mockOperation)).rejects.toThrow( + "Invalid files[0].schemaType 'invalid-type' for file 'api-spec.json'. Must be one of:" + ); + }); + + it('should throw error for incompatible file extension', async () => { + const mockOperation = { + rawFiles: [ + { + schemaType: GATEWAY_TARGET_TYPES.LAMBDA, + fileName: 'lambda-schema.yaml' // Invalid for lambda + } + ], + files: [] + }; + + await expect(validator.validateMcpOperation(mockOperation)).rejects.toThrow( + "Invalid files[0] file extension '.yaml' for file 'lambda-schema.yaml' with schema type 'lambda'" + ); + }); + + it('should throw error for unsupported file extension', async () => { + const mockOperation = { + rawFiles: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: 'api-spec.xml' + } + ], + files: [] + }; + + await expect(validator.validateMcpOperation(mockOperation)).rejects.toThrow( + "Invalid files[0] file extension '.xml' for file 'api-spec.xml' with schema type 'openApiSchema'. Allowed extensions: .json, .yaml, .yml" + ); + }); + + it('should set correct content types for different extensions', async () => { + const testCases = [ + { fileName: 'spec.json', expectedContentType: MCP_CONTENT_TYPES.JSON }, + { fileName: 'spec.yaml', expectedContentType: MCP_CONTENT_TYPES.YAML }, + { fileName: 'spec.yml', expectedContentType: MCP_CONTENT_TYPES.YAML } + ]; + + for (const testCase of testCases) { + const mockOperation = { + rawFiles: [ + { + schemaType: GATEWAY_TARGET_TYPES.OPEN_API, + fileName: testCase.fileName + } + ], + files: [] + }; + + const result = (await validator.validateMcpOperation(mockOperation)) as any; + expect(result.files[0].contentType).toBe(testCase.expectedContentType); + } + }); + }); + }); +}); + +describe('Testing MCP Use Case Validation', () => { + let mcpValidator: MCPUsecaseValidator; + let cfnParameters: Map; + let ddbMockedClient: any; + + beforeAll(() => { + process.env.AWS_SDK_USER_AGENT = '{ "customUserAgent": "AWSSOLUTION/SO0276/v2.0.0" }'; + process.env[MODEL_INFO_TABLE_NAME_ENV_VAR] = 'model-info-table'; + process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] = 'use-case-config-table'; + process.env.AWS_REGION = 'us-east-1'; + + const storageMgmt = new StorageManagement(); + const useCaseConfigManagement = new UseCaseConfigManagement(); + + // Use the factory method like other tests + mcpValidator = ValidatorFactory.createValidator(UseCaseTypes.MCP_SERVER, storageMgmt, useCaseConfigManagement) as MCPUsecaseValidator; + + cfnParameters = new Map(); + cfnParameters.set(CfnParameterKeys.UseCaseUUID, 'fake-uuid'); + + ddbMockedClient = mockClient(DynamoDBClient); + }); + + afterAll(() => { + delete process.env.AWS_SDK_USER_AGENT; + delete process.env[MODEL_INFO_TABLE_NAME_ENV_VAR]; + delete process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]; + delete process.env.AWS_REGION; + ddbMockedClient.restore(); + }); + + describe('Environment Variables Validation', () => { + it('should pass validation with valid environment variables', async () => { + const validEnvVars = { + 'API_KEY': 'test-key-123', + 'DATABASE_URL': 'postgresql://localhost:5432/db', + 'DEBUG_MODE': 'true', + '_PRIVATE_VAR': 'private-value' + }; + + const mcpConfig = { + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-repo:latest', + EnvironmentVariables: validEnvVars + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + // Should not throw any errors + await expect(mcpValidator.validateNewUseCase(useCase)).resolves.toBeDefined(); + }); + + it('should pass validation when no environment variables are provided', async () => { + const mcpConfig = { + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-repo:latest' + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + // Should not throw any errors + await expect(mcpValidator.validateNewUseCase(useCase)).resolves.toBeDefined(); + }); + + it('should fail validation when environment variables is not an object', async () => { + const mcpConfig = { + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-repo:latest', + EnvironmentVariables: null as any + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + await expect(mcpValidator.validateNewUseCase(useCase)).rejects.toThrow('Environment variables must be provided as an object'); + }); + + it('should fail validation with invalid environment variable names', async () => { + const testCases = [ + { name: '', value: 'test', expectedError: 'Environment variable names cannot be empty' }, + { name: '123INVALID', value: 'test', expectedError: 'Invalid environment variable name "123INVALID". Names must start with a letter or underscore and contain only letters, numbers, and underscores' }, + { name: 'INVALID-NAME', value: 'test', expectedError: 'Invalid environment variable name "INVALID-NAME". Names must start with a letter or underscore and contain only letters, numbers, and underscores' }, + { name: 'INVALID.NAME', value: 'test', expectedError: 'Invalid environment variable name "INVALID.NAME". Names must start with a letter or underscore and contain only letters, numbers, and underscores' }, + { name: 'INVALID NAME', value: 'test', expectedError: 'Invalid environment variable name "INVALID NAME". Names must start with a letter or underscore and contain only letters, numbers, and underscores' } + ]; + + for (const testCase of testCases) { + const mcpConfig = { + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-repo:latest', + EnvironmentVariables: { + [testCase.name]: testCase.value + } + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail(`Expected validation to throw an error for test case: ${testCase.name}`); + } catch (error: any) { + expect(error.message).toBe(testCase.expectedError); + } + } + }); + + it('should fail validation when environment variable name is too long', async () => { + const longName = 'A'.repeat(257); // 257 characters, exceeds 256 limit + const mcpConfig = { + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-repo:latest', + EnvironmentVariables: { + [longName]: 'test-value' + } + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe(`Environment variable name "${longName}" exceeds maximum length of 256 characters`); + } + }); + + it('should fail validation when environment variable value is not a string', async () => { + const mcpConfig = { + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-repo:latest', + EnvironmentVariables: { + 'VALID_NAME': 123 as any // Type assertion to bypass TypeScript check for testing invalid input + } + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('Environment variable value for "VALID_NAME" must be a string'); + } + }); + + it('should fail validation when environment variable value is too long', async () => { + const longValue = 'A'.repeat(2049); // 2049 characters, exceeds 2048 limit + const mcpConfig = { + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-repo:latest', + EnvironmentVariables: { + 'VALID_NAME': longValue + } + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('Environment variable value for "VALID_NAME" exceeds maximum length of 2048 characters'); + } + }); + + it('should fail validation when total environment variables size exceeds 4KB', async () => { + // Create environment variables that exceed 4KB total + const largeEnvVars: { [key: string]: string } = {}; + const largeValue = 'A'.repeat(1000); // 1KB per variable + for (let i = 0; i < 5; i++) { + largeEnvVars[`LARGE_VAR_${i}`] = largeValue; + } + + const mcpConfig = { + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-repo:latest', + EnvironmentVariables: largeEnvVars + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('Total size of environment variables exceeds 4KB limit. Please reduce the number or size of environment variables'); + } + }); + }); + + describe('MCP Runtime Parameters Validation', () => { + it('should pass validation with valid runtime parameters', async () => { + const mcpConfig = { + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-repo:latest', + EnvironmentVariables: { + 'API_KEY': 'test-key' + } + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + await expect(mcpValidator.validateNewUseCase(useCase)).resolves.toBeDefined(); + }); + + it('should fail validation when ECR URI is missing', async () => { + const mcpConfig = { + MCPParams: { + RuntimeParams: { + EnvironmentVariables: { + 'API_KEY': 'test-key' + } + } as any // Type assertion to bypass TypeScript check for testing missing required field + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('ECR URI is required when deploying MCP servers with Agentcore Runtime'); + } + }); + + it('should fail validation when ECR URI is empty', async () => { + const mcpConfig = { + MCPParams: { + RuntimeParams: { + EcrUri: '', + EnvironmentVariables: { + 'API_KEY': 'test-key' + } + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('ECR URI is required when deploying MCP servers with Agentcore Runtime'); + } + }); + + it('should fail validation when ECR URI region does not match deployment region', async () => { + const mcpConfig = { + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-repo:latest', + EnvironmentVariables: { + 'API_KEY': 'test-key' + } + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + const originalRegion = process.env.AWS_REGION; + process.env.AWS_REGION = 'us-west-2'; + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toContain('ECR image must be in the same region (us-west-2) as the deployment'); + } finally { + if (originalRegion !== undefined) { + process.env.AWS_REGION = originalRegion; + } else { + delete process.env.AWS_REGION; + } + } + }); + }); + + describe('MCP Parameters Structure Validation', () => { + it('should fail validation when MCPParams is missing', async () => { + const mcpConfig = {}; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('MCPParams is required for MCP use cases'); + } + }); + + it('should fail validation when neither GatewayParams nor RuntimeParams is provided', async () => { + const mcpConfig = { + MCPParams: {} + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('Either GatewayParams or RuntimeParams must be provided for MCP use cases'); + } + }); + + it('should fail validation when both GatewayParams and RuntimeParams are provided', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + TargetParams: [] + }, + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-repo:latest' + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('Only one of GatewayParams or RuntimeParams should be provided, not both'); + } + }); + }); + + // Optional Gateway Fields Validation tests moved to mcp-validator-optional-fields.test.ts + // to avoid TypeScript compilation issues with complex type assertions + const createValidTargetParams = (): TargetParams => ({ + TargetName: 'test-target', + TargetType: GATEWAY_TARGET_TYPES.LAMBDA, + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/12345678-1234-1234-1234-123456789012.json' + }); + + const createValidOpenApiTargetParams = (): TargetParams => ({ + TargetName: 'test-target', + TargetType: GATEWAY_TARGET_TYPES.OPEN_API, + SchemaUri: 'mcp/schemas/openApiSchema/12345678-1234-1234-1234-123456789012.json', + OutboundAuthParams: { + OutboundAuthProviderArn: 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/oauth2credentialprovider/test-provider', + OutboundAuthProviderType: 'OAUTH' + } + }); + + const createTargetParamsWithId = (targetId: string): TargetParams => ({ + TargetName: 'test-target', + TargetType: GATEWAY_TARGET_TYPES.LAMBDA, + TargetId: targetId, + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function', + SchemaUri: 'mcp/schemas/lambda/12345678-1234-1234-1234-123456789012.json' + }); + + const createOpenApiTargetParamsWithArn = (arn: string): TargetParams => ({ + TargetName: 'test-target', + TargetType: GATEWAY_TARGET_TYPES.OPEN_API, + SchemaUri: 'mcp/schemas/openApiSchema/12345678-1234-1234-1234-123456789012.json', + OutboundAuthParams: { + OutboundAuthProviderArn: arn, + OutboundAuthProviderType: 'OAUTH' + } + }); + describe('GatewayId validation', () => { + it('should pass with valid GatewayId pattern (prefix-{10chars})', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + GatewayId: 'test-abc1234567', + TargetParams: [createValidTargetParams()] + } + } + }; + + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + await expect(mcpValidator.validateNewUseCase(useCase)).resolves.not.toThrow(); + }); + + it('should fail with empty GatewayId', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + GatewayId: '', + TargetParams: [createValidTargetParams()] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('GatewayId must be a non-empty string'); + } + }); + + }); + + describe('GatewayArn validation', () => { + it('should pass with valid gateway ARN', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + GatewayArn: 'arn:aws:bedrock-agentcore:us-east-1:123456789012:gateway/test-gateway-123', + TargetParams: [createValidTargetParams()] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + await expect(mcpValidator.validateNewUseCase(useCase)).resolves.not.toThrow(); + }); + + it('should fail with invalid gateway ARN format', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + GatewayArn: 'arn:aws:bedrock-agentcore:us-east-1:123456789012:invalid-resource', + TargetParams: [createValidTargetParams()] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('GatewayArn must follow pattern: arn:aws:bedrock-agentcore:{region}:{AccountId}:gateway/{GatewayId}'); + } + }); + + it('should fail with wrong service in ARN', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + GatewayArn: 'arn:aws:lambda:us-east-1:123456789012:gateway/test-gateway', + TargetParams: [createValidTargetParams()] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('GatewayArn must follow pattern: arn:aws:bedrock-agentcore:{region}:{AccountId}:gateway/{GatewayId}'); + } + }); + + it('should pass when GatewayArn is undefined', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + TargetParams: [createValidTargetParams()] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + await expect(mcpValidator.validateNewUseCase(useCase)).resolves.not.toThrow(); + }); + }); + + describe('GatewayUrl validation', () => { + it('should pass with valid gateway URL pattern', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://test-gateway-123.gateway.bedrock-agentcore.us-east-1.amazonaws.com/mcp', + TargetParams: [createValidTargetParams()] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + await expect(mcpValidator.validateNewUseCase(useCase)).resolves.not.toThrow(); + }); + + it('should fail with invalid URL format', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + GatewayUrl: 'https://invalid-domain.com/mcp', + TargetParams: [createValidTargetParams()] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('GatewayUrl must follow pattern: https://{GatewayId}.gateway.bedrock-agentcore.{Region}.amazonaws.com/mcp'); + } + }); + + }); + + describe('GatewayName validation', () => { + it('should pass with valid gateway name', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + GatewayName: 'My Test Gateway', + TargetParams: [createValidTargetParams()] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + await expect(mcpValidator.validateNewUseCase(useCase)).resolves.not.toThrow(); + }); + + it('should fail with empty GatewayName', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + GatewayName: '', + TargetParams: [createValidTargetParams()] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('GatewayName must be a non-empty string'); + } + }); + + }); + + describe('TargetId validation', () => { + it('should pass with valid 10-character uppercase alphanumeric TargetId', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + TargetParams: [createTargetParamsWithId('ABC1234567')] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + await expect(mcpValidator.validateNewUseCase(useCase)).resolves.not.toThrow(); + }); + + it('should fail with TargetId shorter than 10 characters', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + TargetParams: [createTargetParamsWithId('ABC123')] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('TargetId must be exactly 10 uppercase alphanumeric characters for target "test-target"'); + } + }); + + it('should fail with TargetId longer than 10 characters', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + TargetParams: [createTargetParamsWithId('ABC1234567890')] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('TargetId must be exactly 10 uppercase alphanumeric characters for target "test-target"'); + } + }); + + it('should fail with lowercase characters in TargetId', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + TargetParams: [createTargetParamsWithId('abc1234567')] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('TargetId must be exactly 10 uppercase alphanumeric characters for target "test-target"'); + } + }); + + it('should fail with special characters in TargetId', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + TargetParams: [createTargetParamsWithId('ABC123-456')] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('TargetId must be exactly 10 uppercase alphanumeric characters for target "test-target"'); + } + }); + + }); + + describe('Updated outbound auth validation', () => { + it('should pass with valid OAuth ARN', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + TargetParams: [createValidOpenApiTargetParams()] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + await expect(mcpValidator.validateNewUseCase(useCase)).resolves.not.toThrow(); + }); + + it('should pass with valid API Key ARN', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + TargetParams: [createOpenApiTargetParamsWithArn('arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-provider')] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + await expect(mcpValidator.validateNewUseCase(useCase)).resolves.not.toThrow(); + }); + + it('should fail when ARN matches neither OAuth nor API Key pattern', async () => { + const mcpConfig = { + MCPParams: { + GatewayParams: { + TargetParams: [createOpenApiTargetParamsWithArn('arn:aws:bedrock-agentcore:us-east-1:123456789012:gateway/invalid-resource')] + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create MCP server', + cfnParameters, + mcpConfig, + 'test-user', + 'FakeProviderName', + 'MCPServer' + ); + + try { + await mcpValidator.validateNewUseCase(useCase); + fail('Expected validation to throw an error'); + } catch (error: any) { + expect(error.message).toBe('Invalid outbound authentication provider ARN format for target "test-target"'); + } + }); + }); + }); diff --git a/source/lambda/use-case-management/test/model/validators/text-validator.test.ts b/source/lambda/use-case-management/test/model/validators/text-validator.test.ts new file mode 100644 index 00000000..1eca5391 --- /dev/null +++ b/source/lambda/use-case-management/test/model/validators/text-validator.test.ts @@ -0,0 +1,506 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { CognitoIdentityProviderClient } from '@aws-sdk/client-cognito-identity-provider'; +import { DynamoDBClient, GetItemCommand } from '@aws-sdk/client-dynamodb'; +import { marshall } from '@aws-sdk/util-dynamodb'; +import { mockClient } from 'aws-sdk-client-mock'; +import { StorageManagement } from '../../../ddb/storage-management'; +import { UseCaseConfigManagement } from '../../../ddb/use-case-config-management'; +import { UseCase } from '../../../model/use-case'; +import { TextUseCaseValidator } from '../../../model/validators/text-validator'; +import { + CHAT_PROVIDERS, + CfnParameterKeys, + KnowledgeBaseTypes, + MODEL_INFO_TABLE_NAME_ENV_VAR, + ModelInfoTableKeys, + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR +} from '../../../utils/constants'; + +describe('TextUseCaseValidator', () => { + let ddbMockedClient: any; + let cognitoMockedClient: any; + let validator: TextUseCaseValidator; + let modelInfoTableName = 'model-info-table'; + let cfnParameters: Map; + + beforeAll(() => { + process.env.AWS_SDK_USER_AGENT = `{ "customUserAgent": "AWSSOLUTION/SO0276/v2.1.0" }`; + process.env[MODEL_INFO_TABLE_NAME_ENV_VAR] = modelInfoTableName; + process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] = 'UseCaseConfigTable'; + + ddbMockedClient = mockClient(DynamoDBClient); + cognitoMockedClient = mockClient(CognitoIdentityProviderClient); + + const storageMgmt = new StorageManagement(); + const useCaseConfigManagement = new UseCaseConfigManagement(); + validator = new TextUseCaseValidator(storageMgmt, useCaseConfigManagement); + }); + + beforeEach(() => { + // Only reset mocks, don't set up any default mock behavior + ddbMockedClient.reset(); + cognitoMockedClient.reset(); + + // Reset parameters for each test + cfnParameters = new Map(); + cfnParameters.set(CfnParameterKeys.UseCaseConfigRecordKey, 'fake-id'); + }); + + afterEach(() => { + ddbMockedClient.reset(); + cognitoMockedClient.reset(); + jest.clearAllTimers(); + }); + + afterAll(async () => { + delete process.env.AWS_SDK_USER_AGENT; + delete process.env[MODEL_INFO_TABLE_NAME_ENV_VAR]; + delete process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]; + + try { + ddbMockedClient.restore(); + cognitoMockedClient.restore(); + } catch (error) { + // Ignore restore errors + } + + jest.clearAllMocks(); + jest.clearAllTimers(); + + if (global.gc) { + global.gc(); + } + + await new Promise((resolve) => setTimeout(resolve, 10)); + }); + + describe('validateNewUseCase - Success Cases', () => { + // Set up success mocks for this entire describe block + beforeEach(() => { + ddbMockedClient + .on(GetItemCommand, { + 'TableName': `${process.env[MODEL_INFO_TABLE_NAME_ENV_VAR]}`, + 'Key': { + [ModelInfoTableKeys.MODEL_INFO_TABLE_PARTITION_KEY]: { 'S': 'RAGChat' } + } + }) + .resolves({ + Item: marshall({ + 'UseCase': 'RAGChat', + 'SortKey': `${CHAT_PROVIDERS.BEDROCK}#fake-model`, + 'ModelProviderName': CHAT_PROVIDERS.BEDROCK, + 'ModelName': 'fake-model', + 'AllowsStreaming': false, + 'Prompt': 'Prompt2 {context}', + 'MaxTemperature': '100', + 'DefaultTemperature': '0.1', + 'MinTemperature': '0', + 'DefaultStopSequences': [], + 'MemoryConfig': { + 'history': 'chat_history', + 'input': 'question', + 'context': 'context', + 'ai_prefix': 'AI', + 'human_prefix': 'Human', + 'output': 'answer' + }, + 'MaxPromptSize': 2000, + 'MaxChatMessageSize': 2500 + }) + }) + .on(GetItemCommand, { + 'TableName': `${process.env[MODEL_INFO_TABLE_NAME_ENV_VAR]}`, + 'Key': { + [ModelInfoTableKeys.MODEL_INFO_TABLE_PARTITION_KEY]: { 'S': 'Chat' } + } + }) + .resolves({ + Item: marshall({ + 'UseCase': 'Chat', + 'SortKey': `${CHAT_PROVIDERS.BEDROCK}#fake-model`, + 'ModelProviderName': CHAT_PROVIDERS.BEDROCK, + 'ModelName': 'fake-model', + 'AllowsStreaming': false, + 'Prompt': 'Prompt2', + 'MaxTemperature': '100', + 'DefaultTemperature': '0.1', + 'MinTemperature': '0', + 'DefaultStopSequences': [], + 'MemoryConfig': { + 'history': 'chat_history', + 'input': 'question', + 'context': null, + 'ai_prefix': 'AI', + 'human_prefix': 'Human', + 'output': 'answer' + }, + 'MaxPromptSize': 2000, + 'MaxChatMessageSize': 2500 + }) + }); + }); + + it('should validate a new use case', async () => { + const config = { + UseCaseName: 'fake-use-case', + ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, + KnowledgeBaseParams: { + KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, + NumberOfDocs: 5, + ReturnSourceDocs: true, + KendraKnowledgeBaseParams: { ExistingKendraIndexId: 'fakeid' } + }, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fake-model' + }, + ModelParams: { + param1: { Value: 'value1', Type: 'string' }, + param2: { Value: 'value2', Type: 'string' } + }, + PromptParams: { PromptTemplate: '{context}' }, + Streaming: true, + Temperature: 0.1, + RAGEnabled: true + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create a stack for test', + cfnParameters, + config, + 'test-user', + 'FakeProviderName', + 'Chat' + ); + const result = await validator.validateNewUseCase(useCase.clone()); + + let getItemCalls = ddbMockedClient.commandCalls(GetItemCommand); + expect(getItemCalls.length).toEqual(1); + expect(getItemCalls[0].args[0].input.TableName).toEqual(modelInfoTableName); + expect(result).toEqual(useCase); + }); + + it('should validate a new use case with no prompt provided', async () => { + let newConfig = { + UseCaseName: 'fake-use-case', + ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fake-model' + }, + ModelParams: { + param1: { Value: 'value1', Type: 'string' }, + param2: { Value: 'value2', Type: 'string' } + }, + Streaming: true, + Temperature: 0.1 + } + }; + const result = await validator.validateNewUseCase( + new UseCase( + 'fake-id', + 'fake-test', + 'Create a stack for test', + cfnParameters, + newConfig, + 'test-user', + 'FakeProviderName', + 'Chat' + ) + ); + + let getItemCalls = ddbMockedClient.commandCalls(GetItemCommand); + expect(getItemCalls.length).toEqual(1); + expect(getItemCalls[0].args[0].input.TableName).toEqual(modelInfoTableName); + expect((result.configuration as any).LlmParams?.PromptParams?.PromptTemplate).toEqual('Prompt2'); + }); + }); + + describe('validateNewUseCase - Error Cases', () => { + it('should throw an error when model info is not found', async () => { + // Mock DDB to return empty result (no model info found) + ddbMockedClient + .on(GetItemCommand, { + 'TableName': `${process.env[MODEL_INFO_TABLE_NAME_ENV_VAR]}`, + 'Key': { + [ModelInfoTableKeys.MODEL_INFO_TABLE_PARTITION_KEY]: { 'S': 'RAGChat' } + } + }) + .resolves({ + Item: undefined + }); + + const config = { + UseCaseName: 'fake-use-case', + ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, + KnowledgeBaseParams: { + KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, + NumberOfDocs: 5, + ReturnSourceDocs: true, + KendraKnowledgeBaseParams: { ExistingKendraIndexId: 'fakeid' } + }, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fake-model' + }, + ModelParams: { + param1: { Value: 'value1', Type: 'string' }, + param2: { Value: 'value2', Type: 'string' } + }, + PromptParams: { PromptTemplate: '{context}' }, + Streaming: true, + Temperature: 0.1, + RAGEnabled: true + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create a stack for test', + cfnParameters, + config, + 'test-user', + 'FakeProviderName', + 'Chat' + ); + + await expect(validator.validateNewUseCase(useCase.clone())).rejects.toThrow(); + }); + + it('should throw an error when DDB throws an error', async () => { + // Mock DDB to throw an error + ddbMockedClient.on(GetItemCommand).rejects(new Error('DynamoDB error')); + + const config = { + UseCaseName: 'fake-use-case', + ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fake-model' + }, + ModelParams: { + param1: { Value: 'value1', Type: 'string' }, + param2: { Value: 'value2', Type: 'string' } + }, + Streaming: true, + Temperature: 0.1 + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create a stack for test', + cfnParameters, + config, + 'test-user', + 'FakeProviderName', + 'Chat' + ); + + await expect(validator.validateNewUseCase(useCase.clone())).rejects.toThrow('DynamoDB error'); + }); + }); + + describe('validateUpdateUseCase - Success Cases', () => { + beforeEach(() => { + const config = { + UseCaseName: 'fake-use-case', + ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, + KnowledgeBaseParams: { + KnowledgeBaseType: KnowledgeBaseTypes.KENDRA, + NumberOfDocs: 5, + ReturnSourceDocs: true, + KendraKnowledgeBaseParams: { ExistingKendraIndexId: 'fakeid' } + }, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fake-model' + }, + ModelParams: { + param1: { Value: 'value1', Type: 'string' }, + param2: { Value: 'value2', Type: 'string' } + }, + PromptParams: { PromptTemplate: '{context}' }, + Streaming: true, + Temperature: 0.1, + RAGEnabled: true + } + }; + + ddbMockedClient + .on(GetItemCommand, { + 'TableName': process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] + }) + .resolves({ + Item: marshall({ config: config }) + }) + .on(GetItemCommand, { + 'TableName': `${process.env[MODEL_INFO_TABLE_NAME_ENV_VAR]}`, + 'Key': { + [ModelInfoTableKeys.MODEL_INFO_TABLE_PARTITION_KEY]: { 'S': 'RAGChat' } + } + }) + .resolves({ + Item: marshall({ + 'UseCase': 'RAGChat', + 'SortKey': `${CHAT_PROVIDERS.BEDROCK}#fake-model`, + 'ModelProviderName': CHAT_PROVIDERS.BEDROCK, + 'ModelName': 'fake-model', + 'AllowsStreaming': false, + 'Prompt': 'Prompt2 {context}', + 'MaxTemperature': '100', + 'DefaultTemperature': '0.1', + 'MinTemperature': '0', + 'DefaultStopSequences': [], + 'MemoryConfig': { + 'history': 'chat_history', + 'input': 'question', + 'context': 'context', + 'ai_prefix': 'AI', + 'human_prefix': 'Human', + 'output': 'answer' + }, + 'MaxPromptSize': 2000, + 'MaxChatMessageSize': 2500 + }) + }); + }); + + it('should validate an update', async () => { + const updateConfig = { + KnowledgeBaseParams: { + NumberOfDocs: 10 + }, + LlmParams: { + ModelParams: { + param1: { Value: 'value1', Type: 'string' }, + param2: { Value: 'value2', Type: 'string' }, + param3: { Value: 'value3', Type: 'string' } + }, + PromptParams: { PromptTemplate: 'Prompt2 {context}' } + } + }; + + const result = await validator.validateUpdateUseCase( + new UseCase( + 'fake-id', + 'fake-test', + 'Create a stack for test', + cfnParameters, + updateConfig, + 'test-user', + 'FakeProviderName', + 'Chat' + ), + 'old-key' + ); + + expect(ddbMockedClient.commandCalls(GetItemCommand).length).toEqual(2); + expect(result.configuration).toBeDefined(); + }); + }); + + describe('validateUpdateUseCase - Error Cases', () => { + it('should throw an error when existing config is not found', async () => { + // Mock DDB to return empty result for existing config + ddbMockedClient + .on(GetItemCommand, { + 'TableName': process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] + }) + .resolves({ + Item: undefined + }); + + const updateConfig = { + KnowledgeBaseParams: { + NumberOfDocs: 10 + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create a stack for test', + cfnParameters, + updateConfig, + 'test-user', + 'FakeProviderName', + 'Chat' + ); + + await expect(validator.validateUpdateUseCase(useCase, 'old-key')).rejects.toThrow(); + }); + + it('should throw an error when model info is not found during update', async () => { + const config = { + UseCaseName: 'fake-use-case', + ConversationMemoryParams: { ConversationMemoryType: 'DynamoDB' }, + LlmParams: { + ModelProvider: CHAT_PROVIDERS.BEDROCK, + BedrockLlmParams: { + ModelId: 'fake-model' + }, + ModelParams: { + param1: { Value: 'value1', Type: 'string' } + }, + PromptParams: { PromptTemplate: '{context}' }, + Streaming: true, + Temperature: 0.1, + RAGEnabled: true + } + }; + + // Mock existing config retrieval to succeed + ddbMockedClient + .on(GetItemCommand, { + 'TableName': process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] + }) + .resolves({ + Item: marshall({ config: config }) + }) + // Mock model info retrieval to fail + .on(GetItemCommand, { + 'TableName': `${process.env[MODEL_INFO_TABLE_NAME_ENV_VAR]}`, + 'Key': { + [ModelInfoTableKeys.MODEL_INFO_TABLE_PARTITION_KEY]: { 'S': 'RAGChat' } + } + }) + .resolves({ + Item: undefined + }); + + const updateConfig = { + LlmParams: { + ModelParams: { + param1: { Value: 'updated-value1', Type: 'string' } + } + } + }; + + const useCase = new UseCase( + 'fake-id', + 'fake-test', + 'Create a stack for test', + cfnParameters, + updateConfig, + 'test-user', + 'FakeProviderName', + 'Chat' + ); + + await expect(validator.validateUpdateUseCase(useCase, 'old-key')).rejects.toThrow(); + }); + }); +}); diff --git a/source/lambda/use-case-management/test/model/validators/validator-factory.test.ts b/source/lambda/use-case-management/test/model/validators/validator-factory.test.ts new file mode 100644 index 00000000..44db5201 --- /dev/null +++ b/source/lambda/use-case-management/test/model/validators/validator-factory.test.ts @@ -0,0 +1,135 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { DynamoDBClient } from '@aws-sdk/client-dynamodb'; +import { mockClient } from 'aws-sdk-client-mock'; +import { StorageManagement } from '../../../ddb/storage-management'; +import { UseCaseConfigManagement } from '../../../ddb/use-case-config-management'; +import { AgentBuilderUseCaseValidator } from '../../../model/validators/agent-builder-validator'; +import { AgentUseCaseValidator } from '../../../model/validators/agent-validator'; +import { TextUseCaseValidator } from '../../../model/validators/text-validator'; +import { ValidatorFactory } from '../../../model/validators/validator-factory'; +import { + MODEL_INFO_TABLE_NAME_ENV_VAR, + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, + UseCaseTypes +} from '../../../utils/constants'; + +describe('ValidatorFactory', () => { + let ddbMockedClient: any; + let storageMgmt: StorageManagement; + let useCaseConfigMgmt: UseCaseConfigManagement; + + beforeAll(() => { + process.env.AWS_SDK_USER_AGENT = `{ "customUserAgent": "AWSSOLUTION/SO0276/v2.1.0" }`; + process.env[MODEL_INFO_TABLE_NAME_ENV_VAR] = 'model-info-table'; + process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] = 'UseCaseConfigTable'; + + ddbMockedClient = mockClient(DynamoDBClient); + + storageMgmt = new StorageManagement(); + useCaseConfigMgmt = new UseCaseConfigManagement(); + }); + + beforeEach(() => { + ddbMockedClient.reset(); + }); + + afterEach(() => { + ddbMockedClient.reset(); + jest.clearAllTimers(); + }); + + afterAll(async () => { + delete process.env.AWS_SDK_USER_AGENT; + delete process.env[MODEL_INFO_TABLE_NAME_ENV_VAR]; + delete process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]; + + try { + ddbMockedClient.restore(); + } catch (error) { + // Ignore restore errors + } + + jest.clearAllMocks(); + jest.clearAllTimers(); + + if (global.gc) { + global.gc(); + } + + await new Promise((resolve) => setTimeout(resolve, 10)); + }); + + describe('createValidator', () => { + it('should create TextUseCaseValidator for Chat use case type', () => { + const validator = ValidatorFactory.createValidator(UseCaseTypes.CHAT, storageMgmt, useCaseConfigMgmt); + + expect(validator).toBeInstanceOf(TextUseCaseValidator); + }); + + it('should create AgentUseCaseValidator for Agent use case type', () => { + const validator = ValidatorFactory.createValidator(UseCaseTypes.AGENT, storageMgmt, useCaseConfigMgmt); + + expect(validator).toBeInstanceOf(AgentUseCaseValidator); + }); + + it('should create AgentBuilderUseCaseValidator for AgentBuilder use case type', () => { + const validator = ValidatorFactory.createValidator( + UseCaseTypes.AGENT_BUILDER, + storageMgmt, + useCaseConfigMgmt + ); + + expect(validator).toBeInstanceOf(AgentBuilderUseCaseValidator); + }); + + it('should throw error for invalid use case type', () => { + expect(() => { + ValidatorFactory.createValidator('InvalidType', storageMgmt, useCaseConfigMgmt); + }).toThrow('Invalid use case type: InvalidType'); + }); + + it('should throw error for undefined use case type', () => { + expect(() => { + ValidatorFactory.createValidator(undefined as any, storageMgmt, useCaseConfigMgmt); + }).toThrow('Invalid use case type: undefined'); + }); + + it('should throw error for null use case type', () => { + expect(() => { + ValidatorFactory.createValidator(null as any, storageMgmt, useCaseConfigMgmt); + }).toThrow('Invalid use case type: null'); + }); + + it('should throw error for empty string use case type', () => { + expect(() => { + ValidatorFactory.createValidator('', storageMgmt, useCaseConfigMgmt); + }).toThrow('Invalid use case type: '); + }); + }); + + describe('validator instances', () => { + it('should create different instances for each call', () => { + const validator1 = ValidatorFactory.createValidator(UseCaseTypes.CHAT, storageMgmt, useCaseConfigMgmt); + const validator2 = ValidatorFactory.createValidator(UseCaseTypes.CHAT, storageMgmt, useCaseConfigMgmt); + + expect(validator1).not.toBe(validator2); + expect(validator1).toBeInstanceOf(TextUseCaseValidator); + expect(validator2).toBeInstanceOf(TextUseCaseValidator); + }); + + it('should create validators with correct dependencies', () => { + const validator = ValidatorFactory.createValidator( + UseCaseTypes.AGENT, + storageMgmt, + useCaseConfigMgmt + ) as AgentUseCaseValidator; + + expect(validator).toBeInstanceOf(AgentUseCaseValidator); + // Verify that the validator has the correct dependencies injected + expect((validator as any).storageMgmt).toBe(storageMgmt); + expect((validator as any).useCaseConfigMgmt).toBe(useCaseConfigMgmt); + }); + }); +}); diff --git a/source/lambda/use-case-management/test/model/validators/workflow-validator.test.ts b/source/lambda/use-case-management/test/model/validators/workflow-validator.test.ts new file mode 100644 index 00000000..b8223c2f --- /dev/null +++ b/source/lambda/use-case-management/test/model/validators/workflow-validator.test.ts @@ -0,0 +1,402 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { CognitoIdentityProviderClient } from '@aws-sdk/client-cognito-identity-provider'; +import { DynamoDBClient, GetItemCommand } from '@aws-sdk/client-dynamodb'; +import { marshall } from '@aws-sdk/util-dynamodb'; +import { mockClient } from 'aws-sdk-client-mock'; +import { StorageManagement } from '../../../ddb/storage-management'; +import { UseCaseConfigManagement } from '../../../ddb/use-case-config-management'; +import { WorkflowUseCaseConfiguration } from '../../../model/types'; +import { UseCase } from '../../../model/use-case'; +import { WorkflowUseCaseValidator } from '../../../model/validators/workflow-validator'; +import { + CfnParameterKeys, + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, + UseCaseTypes, + WORKFLOW_ORCHESTRATION_PATTERNS +} from '../../../utils/constants'; + +describe('WorkflowUseCaseValidator - Config Merge Tests', () => { + let validator: WorkflowUseCaseValidator; + let ddbMockedClient: any; + let cognitoMockClient: any; + let cfnParameters: Map; + + beforeAll(() => { + process.env.AWS_SDK_USER_AGENT = `{ "customUserAgent": "AWSSOLUTION/SO0276/v2.1.0" }`; + process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] = 'UseCaseConfigTable'; + + ddbMockedClient = mockClient(DynamoDBClient); + cognitoMockClient = mockClient(CognitoIdentityProviderClient); + + const storageMgmt = new StorageManagement(); + const useCaseConfigManagement = new UseCaseConfigManagement(); + validator = new WorkflowUseCaseValidator(storageMgmt, useCaseConfigManagement); + + cfnParameters = new Map(); + cfnParameters.set(CfnParameterKeys.UseCaseConfigRecordKey, 'fake-id'); + }); + + beforeEach(() => { + ddbMockedClient.reset(); + cognitoMockClient.reset(); + }); + + afterEach(() => { + ddbMockedClient.reset(); + cognitoMockClient.reset(); + jest.clearAllTimers(); + }); + + afterAll(async () => { + delete process.env.AWS_SDK_USER_AGENT; + delete process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR]; + + try { + ddbMockedClient.restore(); + cognitoMockClient.restore(); + } catch (error) { + // Ignore restore errors + } + + jest.clearAllMocks(); + jest.clearAllTimers(); + + if (global.gc) { + global.gc(); + } + + await new Promise((resolve) => setTimeout(resolve, 10)); + }); + + describe('validateUpdateUseCase - AgentsAsToolsParams.Agents array merge', () => { + it('should replace Agents array when provided in update config', async () => { + const existingConfig = { + UseCaseType: UseCaseTypes.WORKFLOW, + WorkflowParams: { + SystemPrompt: 'You are a workflow coordinator', + OrchestrationPattern: WORKFLOW_ORCHESTRATION_PATTERNS.AGENTS_AS_TOOLS, + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-1', + UseCaseName: 'Agent 1', + UseCaseType: UseCaseTypes.AGENT_BUILDER, + AgentBuilderParams: { + SystemPrompt: 'Agent 1 prompt' + } + }, + { + UseCaseId: 'agent-2', + UseCaseName: 'Agent 2', + UseCaseType: UseCaseTypes.AGENT_BUILDER, + AgentBuilderParams: { + SystemPrompt: 'Agent 2 prompt' + } + } + ] + } + } + }; + + const updateConfig = { + UseCaseType: UseCaseTypes.WORKFLOW, + WorkflowParams: { + SystemPrompt: 'Updated workflow coordinator', + OrchestrationPattern: WORKFLOW_ORCHESTRATION_PATTERNS.AGENTS_AS_TOOLS, + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-3', + UseCaseName: 'Agent 3', + UseCaseType: UseCaseTypes.AGENT_BUILDER, + AgentBuilderParams: { + SystemPrompt: 'Agent 3 prompt' + } + } + ] + } + } + } as WorkflowUseCaseConfiguration; + + ddbMockedClient.on(GetItemCommand).resolves({ + Item: marshall({ + key: 'test-key', + config: existingConfig + }) + }); + + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + cfnParameters, + updateConfig, + 'fake-user-id', + undefined, + UseCaseTypes.WORKFLOW + ); + + const result = await validator.validateUpdateUseCase(mockUseCase, 'old-record-key'); + const resultConfig = result.configuration as WorkflowUseCaseConfiguration; + + expect(resultConfig.WorkflowParams?.AgentsAsToolsParams?.Agents).toHaveLength(1); + expect(resultConfig.WorkflowParams?.AgentsAsToolsParams?.Agents?.[0].UseCaseId).toBe('agent-3'); + expect(resultConfig.WorkflowParams?.AgentsAsToolsParams?.Agents?.[0].UseCaseName).toBe('Agent 3'); + expect(resultConfig.WorkflowParams?.SystemPrompt).toBe('Updated workflow coordinator'); + }); + + it('should fail validation when clearing Agents array with agents-as-tools pattern', async () => { + const existingConfig = { + UseCaseType: UseCaseTypes.WORKFLOW, + WorkflowParams: { + SystemPrompt: 'You are a workflow coordinator', + OrchestrationPattern: WORKFLOW_ORCHESTRATION_PATTERNS.AGENTS_AS_TOOLS, + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-1', + UseCaseName: 'Agent 1', + UseCaseType: UseCaseTypes.AGENT_BUILDER, + AgentBuilderParams: { + SystemPrompt: 'Agent 1 prompt' + } + }, + { + UseCaseId: 'agent-2', + UseCaseName: 'Agent 2', + UseCaseType: UseCaseTypes.AGENT_BUILDER, + AgentBuilderParams: { + SystemPrompt: 'Agent 2 prompt' + } + } + ] + } + } + }; + + const updateConfig = { + UseCaseType: UseCaseTypes.WORKFLOW, + WorkflowParams: { + SystemPrompt: 'Updated workflow coordinator', + OrchestrationPattern: WORKFLOW_ORCHESTRATION_PATTERNS.AGENTS_AS_TOOLS, + AgentsAsToolsParams: {} + } + } as WorkflowUseCaseConfiguration; + + ddbMockedClient.on(GetItemCommand).resolves({ + Item: marshall({ + key: 'test-key', + config: existingConfig + }) + }); + + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + cfnParameters, + updateConfig, + 'fake-user-id', + undefined, + UseCaseTypes.WORKFLOW + ); + + await expect(validator.validateUpdateUseCase(mockUseCase, 'old-record-key')).rejects.toThrow( + 'Agents must be a non-empty array.' + ); + }); + + it('should fail validation when providing empty Agents array with agents-as-tools pattern', async () => { + const existingConfig = { + UseCaseType: UseCaseTypes.WORKFLOW, + WorkflowParams: { + SystemPrompt: 'You are a workflow coordinator', + OrchestrationPattern: WORKFLOW_ORCHESTRATION_PATTERNS.AGENTS_AS_TOOLS, + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-1', + UseCaseName: 'Agent 1', + UseCaseType: UseCaseTypes.AGENT_BUILDER, + AgentBuilderParams: { + SystemPrompt: 'Agent 1 prompt' + } + } + ] + } + } + }; + + const updateConfig = { + UseCaseType: UseCaseTypes.WORKFLOW, + WorkflowParams: { + SystemPrompt: 'Updated workflow coordinator', + OrchestrationPattern: WORKFLOW_ORCHESTRATION_PATTERNS.AGENTS_AS_TOOLS, + AgentsAsToolsParams: { + Agents: [] + } + } + } as WorkflowUseCaseConfiguration; + + ddbMockedClient.on(GetItemCommand).resolves({ + Item: marshall({ + key: 'test-key', + config: existingConfig + }) + }); + + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + cfnParameters, + updateConfig, + 'fake-user-id', + undefined, + UseCaseTypes.WORKFLOW + ); + + await expect(validator.validateUpdateUseCase(mockUseCase, 'old-record-key')).rejects.toThrow( + 'Agents must be a non-empty array.' + ); + }); + + it('should merge other WorkflowParams fields normally while updating Agents', async () => { + const existingConfig = { + UseCaseType: UseCaseTypes.WORKFLOW, + WorkflowParams: { + SystemPrompt: 'You are a workflow coordinator', + OrchestrationPattern: WORKFLOW_ORCHESTRATION_PATTERNS.AGENTS_AS_TOOLS, + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-1', + UseCaseName: 'Agent 1', + UseCaseType: UseCaseTypes.AGENT_BUILDER, + AgentBuilderParams: { + SystemPrompt: 'Agent 1 prompt' + } + } + ] + }, + MemoryConfig: { + LongTermEnabled: false + } + }, + LlmParams: { + ModelProvider: 'Bedrock', + Temperature: 0.5 + } + }; + + const updateConfig = { + UseCaseType: UseCaseTypes.WORKFLOW, + WorkflowParams: { + SystemPrompt: 'Updated workflow coordinator', + OrchestrationPattern: WORKFLOW_ORCHESTRATION_PATTERNS.AGENTS_AS_TOOLS, + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-2', + UseCaseName: 'Agent 2', + UseCaseType: UseCaseTypes.AGENT_BUILDER, + AgentBuilderParams: { + SystemPrompt: 'Agent 2 prompt' + } + } + ] + }, + MemoryConfig: { + LongTermEnabled: true + } + }, + LlmParams: { + Temperature: 0.8 + } + } as WorkflowUseCaseConfiguration; + + ddbMockedClient.on(GetItemCommand).resolves({ + Item: marshall({ + key: 'test-key', + config: existingConfig + }) + }); + + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + cfnParameters, + updateConfig, + 'fake-user-id', + undefined, + UseCaseTypes.WORKFLOW + ); + + const result = await validator.validateUpdateUseCase(mockUseCase, 'old-record-key'); + const resultConfig = result.configuration as WorkflowUseCaseConfiguration; + + expect(resultConfig.WorkflowParams?.AgentsAsToolsParams?.Agents).toHaveLength(1); + expect(resultConfig.WorkflowParams?.AgentsAsToolsParams?.Agents?.[0].UseCaseId).toBe('agent-2'); + expect(resultConfig.WorkflowParams?.SystemPrompt).toBe('Updated workflow coordinator'); + expect(resultConfig.WorkflowParams?.MemoryConfig?.LongTermEnabled).toBe(true); + expect(resultConfig.LlmParams?.Temperature).toBe(0.8); + expect(resultConfig.LlmParams?.ModelProvider).toBe('Bedrock'); + }); + + it('should preserve existing Agents when WorkflowParams not in update config', async () => { + const existingConfig = { + UseCaseType: UseCaseTypes.WORKFLOW, + UseCaseName: 'workflow-test', + WorkflowParams: { + SystemPrompt: 'You are a workflow coordinator', + OrchestrationPattern: WORKFLOW_ORCHESTRATION_PATTERNS.AGENTS_AS_TOOLS, + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-1', + UseCaseName: 'Agent 1', + UseCaseType: UseCaseTypes.AGENT_BUILDER, + AgentBuilderParams: { + SystemPrompt: 'Agent 1 prompt' + } + } + ] + } + } + }; + + const updateConfig = { + UseCaseType: UseCaseTypes.WORKFLOW, + UseCaseName: 'updated-workflow-test' + } as WorkflowUseCaseConfiguration; + + ddbMockedClient.on(GetItemCommand).resolves({ + Item: marshall({ + key: 'test-key', + config: existingConfig + }) + }); + + const mockUseCase = new UseCase( + 'fake-id', + 'fake-name', + 'fake-description', + cfnParameters, + updateConfig, + 'fake-user-id', + undefined, + UseCaseTypes.WORKFLOW + ); + + const result = await validator.validateUpdateUseCase(mockUseCase, 'old-record-key'); + const resultConfig = result.configuration as WorkflowUseCaseConfiguration; + + expect(resultConfig.UseCaseName).toBe('updated-workflow-test'); + expect(resultConfig.WorkflowParams?.AgentsAsToolsParams?.Agents).toHaveLength(1); + expect(resultConfig.WorkflowParams?.AgentsAsToolsParams?.Agents?.[0].UseCaseId).toBe('agent-1'); + }); + }); +}); diff --git a/source/lambda/use-case-management/test/s3/s3-management.test.ts b/source/lambda/use-case-management/test/s3/s3-management.test.ts new file mode 100644 index 00000000..d59a1d5e --- /dev/null +++ b/source/lambda/use-case-management/test/s3/s3-management.test.ts @@ -0,0 +1,377 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { S3Client } from '@aws-sdk/client-s3'; +import { createPresignedPost } from '@aws-sdk/s3-presigned-post'; +import { S3Management, SchemaUploadParams, PresignedPostResponse, SchemaUploadResponse } from '../../s3/s3-management'; +import { MCP_SCHEMA_UPLOAD_CONSTRAINTS } from '../../utils/constants'; +import { FileUploadInfo } from '../../model/adapters/mcp-adapter'; + +jest.mock('@aws-sdk/client-s3'); +jest.mock('@aws-sdk/s3-presigned-post'); +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn().mockReturnValue({}) +})); + +jest.mock('../../power-tools-init', () => ({ + logger: { + info: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + warn: jest.fn() + }, + tracer: { + captureMethod: () => () => {}, + captureAWSv3Client: jest.fn() + } +})); + +describe('S3Management', () => { + let s3Management: S3Management; + let mockS3Client: jest.Mocked; + let mockCreatePresignedPost: jest.MockedFunction; + + const mockBucketName = 'test-bucket'; + const originalEnv = process.env; + + beforeEach(() => { + jest.clearAllMocks(); + + // Set up environment + process.env = { + ...originalEnv, + GAAB_DEPLOYMENTS_BUCKET: mockBucketName + }; + + mockS3Client = new S3Client({}) as jest.Mocked; + mockCreatePresignedPost = createPresignedPost as jest.MockedFunction; + + s3Management = new S3Management(); + }); + + afterEach(() => { + process.env = originalEnv; + }); + + describe('Creation of presigned URL post using createSchemaUploadPresignedPost', () => { + const mockParams: SchemaUploadParams = { + fileName: 'test-schema.json', + schemaType: 'openApiSchema', + userId: 'user123', + contentType: 'application/json', + fileExtension: '.json' + }; + + const mockPresignedPostResult = { + url: 'https://test-bucket.s3.amazonaws.com', + fields: { + key: 'mcp/schemas/openApiSchema/test-schema.json', + 'x-amz-meta-userid': 'user123', + 'x-amz-meta-filename': 'test-schema.json', + 'x-amz-meta-fileextension': 'json', + 'Content-Type': 'application/json', + 'tagging': + 'schemaTypeopenApiSchemauploadedByuser123sourcemcp-apistatusinactive' + } + }; + + beforeEach(() => { + mockCreatePresignedPost.mockResolvedValue(mockPresignedPostResult); + }); + + it('should create presigned POST with correct parameters', async () => { + const result: PresignedPostResponse = await s3Management.createSchemaUploadPresignedPost(mockParams); + + expect(mockCreatePresignedPost).toHaveBeenCalledWith( + expect.any(Object), + expect.objectContaining({ + Bucket: mockBucketName, + Key: expect.stringMatching( + /^mcp\/schemas\/openApiSchema\/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\.json$/ + ), + Conditions: expect.arrayContaining([ + ['starts-with', '$key', 'mcp/schemas/openApiSchema/'], + [ + 'content-length-range', + MCP_SCHEMA_UPLOAD_CONSTRAINTS.MIN_FILE_SIZE_BYTES, + MCP_SCHEMA_UPLOAD_CONSTRAINTS.MAX_FILE_SIZE_BYTES + ], + ['eq', '$x-amz-meta-userid', mockParams.userId], + ['eq', '$x-amz-meta-filename', mockParams.fileName], + ['eq', '$x-amz-meta-fileextension', mockParams.fileExtension], + ['eq', '$Content-Type', mockParams.contentType], + ['eq', '$tagging', expect.stringContaining('')] + ]), + Fields: expect.objectContaining({ + key: expect.stringMatching( + /^mcp\/schemas\/openApiSchema\/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\.json$/ + ), + 'x-amz-meta-userid': mockParams.userId, + 'x-amz-meta-filename': mockParams.fileName, + 'x-amz-meta-fileextension': mockParams.fileExtension, + 'Content-Type': mockParams.contentType, + 'tagging': expect.stringContaining('') + }), + Expires: MCP_SCHEMA_UPLOAD_CONSTRAINTS.PRESIGNED_URL_EXPIRY_SECONDS + }) + ); + + expect(result).toEqual({ + uploadUrl: mockPresignedPostResult.url, + formFields: mockPresignedPostResult.fields, + fileName: mockParams.fileName, + expiresIn: MCP_SCHEMA_UPLOAD_CONSTRAINTS.PRESIGNED_URL_EXPIRY_SECONDS, + createdAt: expect.any(String) + }); + }); + + it('should create XML tagging with correct format', async () => { + await s3Management.createSchemaUploadPresignedPost(mockParams); + + const callArgs = mockCreatePresignedPost.mock.calls[0][1]; + const taggingCondition = callArgs.Conditions?.find( + (condition: any) => Array.isArray(condition) && condition[0] === 'eq' && condition[1] === '$tagging' + ); + const taggingField = callArgs.Fields?.tagging; + + expect(taggingCondition).toBeDefined(); + expect(taggingField).toBeDefined(); + + const expectedTagging = + 'schemaTypeopenApiSchemauploadedByuser123sourcemcp-apistatusinactive'; + expect(taggingField).toBe(expectedTagging); + }); + + it('should include security constraints in conditions', async () => { + await s3Management.createSchemaUploadPresignedPost(mockParams); + + const callArgs = mockCreatePresignedPost.mock.calls[0][1]; + const conditions = callArgs.Conditions; + + // Check for path traversal protection + expect(conditions).toContainEqual(['starts-with', '$key', 'mcp/schemas/openApiSchema/']); + + // Check for file size constraints + expect(conditions).toContainEqual([ + 'content-length-range', + MCP_SCHEMA_UPLOAD_CONSTRAINTS.MIN_FILE_SIZE_BYTES, + MCP_SCHEMA_UPLOAD_CONSTRAINTS.MAX_FILE_SIZE_BYTES + ]); + + // Check for content type enforcement + expect(conditions).toContainEqual(['eq', '$Content-Type', mockParams.contentType]); + }); + + it('should throw error when createPresignedPost fails', async () => { + const error = new Error('S3 service error'); + mockCreatePresignedPost.mockRejectedValue(error); + + await expect(s3Management.createSchemaUploadPresignedPost(mockParams)).rejects.toThrow( + 'Failed to generate presigned POST: S3 service error' + ); + }); + }); + + describe('Create a set of presignedUrls for multiple files using createSchemaUploadPresignedPosts', () => { + const mockUserId = 'user123'; + const mockFiles: FileUploadInfo[] = [ + { + fileName: 'schema1.json', + schemaType: 'openApiSchema', + contentType: 'application/json', + fileExtension: '.json' + }, + { + fileName: 'schema2.smithy', + schemaType: 'smithyModel', + contentType: 'text/plain', + fileExtension: '.smithy' + } + ]; + + const mockPresignedPostResult = { + url: 'https://test-bucket.s3.amazonaws.com', + fields: { + key: 'test-key', + 'x-amz-meta-userid': 'user123' + } + }; + + beforeEach(() => { + mockCreatePresignedPost.mockResolvedValue(mockPresignedPostResult); + }); + + it('should create presigned POSTs for multiple files', async () => { + const result: SchemaUploadResponse = await s3Management.createSchemaUploadPresignedPosts( + mockUserId, + mockFiles + ); + + expect(mockCreatePresignedPost).toHaveBeenCalledTimes(2); + expect(result.uploads).toHaveLength(2); + + result.uploads.forEach((upload, index) => { + expect(upload).toEqual({ + uploadUrl: mockPresignedPostResult.url, + formFields: mockPresignedPostResult.fields, + fileName: mockFiles[index].fileName, + expiresIn: MCP_SCHEMA_UPLOAD_CONSTRAINTS.PRESIGNED_URL_EXPIRY_SECONDS, + createdAt: expect.any(String) + }); + }); + }); + + it('should handle empty files array', async () => { + const result: SchemaUploadResponse = await s3Management.createSchemaUploadPresignedPosts(mockUserId, []); + + expect(mockCreatePresignedPost).not.toHaveBeenCalled(); + expect(result.uploads).toHaveLength(0); + }); + + it('should handle single file', async () => { + const singleFile = [mockFiles[0]]; + const result: SchemaUploadResponse = await s3Management.createSchemaUploadPresignedPosts( + mockUserId, + singleFile + ); + + expect(mockCreatePresignedPost).toHaveBeenCalledTimes(1); + expect(result.uploads).toHaveLength(1); + }); + + it('should propagate errors from individual presigned POST creation', async () => { + const error = new Error('Individual presigned POST failed'); + mockCreatePresignedPost.mockRejectedValueOnce(error); + + await expect(s3Management.createSchemaUploadPresignedPosts(mockUserId, mockFiles)).rejects.toThrow( + 'Failed to generate presigned POST: Individual presigned POST failed' + ); + }); + + it('should call createSchemaUploadPresignedPost with correct parameters for each file', async () => { + const spy = jest.spyOn(s3Management, 'createSchemaUploadPresignedPost'); + + await s3Management.createSchemaUploadPresignedPosts(mockUserId, mockFiles); + + expect(spy).toHaveBeenCalledTimes(2); + + expect(spy).toHaveBeenNthCalledWith(1, { + fileName: mockFiles[0].fileName, + schemaType: mockFiles[0].schemaType, + userId: mockUserId, + contentType: mockFiles[0].contentType, + fileExtension: mockFiles[0].fileExtension + }); + + expect(spy).toHaveBeenNthCalledWith(2, { + fileName: mockFiles[1].fileName, + schemaType: mockFiles[1].schemaType, + userId: mockUserId, + contentType: mockFiles[1].contentType, + fileExtension: mockFiles[1].fileExtension + }); + }); + + it('should handle partial failures correctly', async () => { + const error = new Error('failure!'); + mockCreatePresignedPost.mockResolvedValueOnce(mockPresignedPostResult).mockRejectedValueOnce(error); + + await expect(s3Management.createSchemaUploadPresignedPosts(mockUserId, mockFiles)).rejects.toThrow( + 'Failed to generate presigned POST: failure!' + ); + }); + }); + + describe('XML tagging helper functions', () => { + it('should create proper XML tag structure', async () => { + const mockParams: SchemaUploadParams = { + fileName: 'test.json', + schemaType: 'openApiSchema', + userId: 'user123', + contentType: 'application/json', + fileExtension: '.json' + }; + + await s3Management.createSchemaUploadPresignedPost(mockParams); + + const callArgs = mockCreatePresignedPost.mock.calls[0][1]; + const tagging = callArgs.Fields?.tagging; + + // Verify XML structure + expect(tagging).toContain(''); + expect(tagging).toContain(''); + expect(tagging).toContain(''); + expect(tagging).toContain(''); + + // Verify individual tags + expect(tagging).toContain('schemaTypeopenApiSchema'); + expect(tagging).toContain('uploadedByuser123'); + expect(tagging).toContain('sourcemcp-api'); + expect(tagging).toContain('statusinactive'); + }); + + it('should handle special characters in tag values', async () => { + const mockParams: SchemaUploadParams = { + fileName: 'test-file.json', + schemaType: 'openApiSchema', + userId: 'user@example.com', + contentType: 'application/json', + fileExtension: 'json' + }; + + await s3Management.createSchemaUploadPresignedPost(mockParams); + + const callArgs = mockCreatePresignedPost.mock.calls[0][1]; + const tagging = callArgs.Fields?.tagging; + + expect(tagging).toContain('uploadedByuser@example.com'); + }); + + it('should handle different schema types correctly', async () => { + const schemaTypes = [ + { type: 'lambda', extension: '.json', contentType: 'application/json' }, + { type: 'openApiSchema', extension: '.yaml', contentType: 'application/yaml' }, + { type: 'smithyModel', extension: '.smithy', contentType: 'text/plain' } + ]; + + for (const schema of schemaTypes) { + jest.clearAllMocks(); + + // Set up mock for this iteration + const mockPresignedPostResult = { + url: 'https://test-bucket.s3.amazonaws.com', + fields: { + key: `mcp/schemas/${schema.type}/test-key`, + 'x-amz-meta-userid': 'user123' + } + }; + mockCreatePresignedPost.mockResolvedValue(mockPresignedPostResult); + + const mockParams: SchemaUploadParams = { + fileName: `test-schema${schema.extension}`, + schemaType: schema.type, + userId: 'user123', + contentType: schema.contentType, + fileExtension: schema.extension + }; + + await s3Management.createSchemaUploadPresignedPost(mockParams); + + const callArgs = mockCreatePresignedPost.mock.calls[0][1]; + + // Verify the key prefix matches the schema type + expect(callArgs.Key).toMatch(new RegExp(`^mcp/schemas/${schema.type}/`)); + + // Verify the conditions include the correct prefix + expect(callArgs.Conditions).toContainEqual(['starts-with', '$key', `mcp/schemas/${schema.type}/`]); + + // Verify the content type is enforced + expect(callArgs.Conditions).toContainEqual(['eq', '$Content-Type', schema.contentType]); + + // Verify the tagging includes the correct schema type + const tagging = callArgs.Fields?.tagging; + expect(tagging).toContain(`schemaType${schema.type}`); + } + }); + }); +}); diff --git a/source/lambda/use-case-management/test/command.test.ts b/source/lambda/use-case-management/test/use-case-command.test.ts similarity index 99% rename from source/lambda/use-case-management/test/command.test.ts rename to source/lambda/use-case-management/test/use-case-command.test.ts index 5f9e2497..b2bddd86 100644 --- a/source/lambda/use-case-management/test/command.test.ts +++ b/source/lambda/use-case-management/test/use-case-command.test.ts @@ -30,7 +30,7 @@ import { PermanentlyDeleteUseCaseCommand, UpdateUseCaseCommand, GetUseCaseCommand -} from '../command'; +} from '../model/commands/use-case-command'; import { ListUseCasesAdapter } from '../model/list-use-cases'; import { UseCase } from '../model/use-case'; import { @@ -275,6 +275,7 @@ describe('When testing Use Case Commands', () => { 'Temperature': { 'N': '0.1' } } }, + 'ProvisionedConcurrencyValue': { 'N': '0' }, 'UseCaseDescription': { 'S': 'fake-description' }, 'UseCaseName': { 'S': 'fake-name' }, 'UseCaseType': { 'S': 'Text' } diff --git a/source/lambda/use-case-management/test/index.test.ts b/source/lambda/use-case-management/test/use-case-handler.test.ts similarity index 91% rename from source/lambda/use-case-management/test/index.test.ts rename to source/lambda/use-case-management/test/use-case-handler.test.ts index bed27c51..a5724134 100644 --- a/source/lambda/use-case-management/test/index.test.ts +++ b/source/lambda/use-case-management/test/use-case-handler.test.ts @@ -1,5 +1,24 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 + +jest.mock('../power-tools-init', () => ({ + logger: { + info: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + warn: jest.fn() + }, + metrics: { + addMetric: jest.fn(), + publishStoredMetrics: jest.fn() + }, + tracer: { + getRootXrayTraceId: jest.fn().mockReturnValue('test-trace-id'), + captureMethod: () => () => {}, + captureAWSv3Client: jest.fn() + } +})); + import { CloudFormationClient, CreateStackCommand, @@ -26,6 +45,7 @@ import { COGNITO_POLICY_TABLE_ENV_VAR, CfnParameterKeys, IS_INTERNAL_USER_ENV_VAR, + MCP_CONTENT_TYPES, MODEL_INFO_TABLE_NAME_ENV_VAR, POWERTOOLS_METRICS_NAMESPACE_ENV_VAR, TEMPLATE_FILE_EXTN_ENV_VAR, @@ -50,6 +70,7 @@ describe('When invoking the lambda function', () => { beforeAll(() => { process.env.AWS_SDK_USER_AGENT = `{ "customUserAgent": "AWSSOLUTION/SO0276/v2.1.0" }`; + process.env._X_AMZN_TRACE_ID = 'test-trace-id'; process.env[POWERTOOLS_METRICS_NAMESPACE_ENV_VAR] = 'UnitTest'; process.env[USE_CASES_TABLE_NAME_ENV_VAR] = 'UseCaseTable'; process.env[ARTIFACT_BUCKET_ENV_VAR] = 'fake-artifact-bucket'; @@ -158,7 +179,7 @@ describe('When invoking the lambda function', () => { }); it('should create a stack and update ddb for create action', async () => { - const lambda = await import('../index'); + const lambda = await import('../use-case-handler'); expect(await lambda.lambdaHandler(createUseCaseApiEvent as unknown as APIGatewayEvent)).toEqual({ 'body': 'SUCCESS', @@ -167,7 +188,7 @@ describe('When invoking the lambda function', () => { 'Access-Control-Allow-Headers': 'Origin,X-Requested-With,Content-Type,Accept', 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', 'Access-Control-Allow-Origin': '*', - 'Content-Type': 'application/json' + 'Content-Type': MCP_CONTENT_TYPES.JSON }, 'isBase64Encoded': false, 'statusCode': 200 @@ -175,7 +196,7 @@ describe('When invoking the lambda function', () => { }); it('should create a bedrock kb stack and update ddb for create action', async () => { - const lambda = await import('../index'); + const lambda = await import('../use-case-handler'); expect( await lambda.lambdaHandler(createUseCaseApiEventBedrockKnowledgeBase as unknown as APIGatewayEvent) @@ -186,7 +207,7 @@ describe('When invoking the lambda function', () => { 'Access-Control-Allow-Headers': 'Origin,X-Requested-With,Content-Type,Accept', 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', 'Access-Control-Allow-Origin': '*', - 'Content-Type': 'application/json' + 'Content-Type': MCP_CONTENT_TYPES.JSON }, 'isBase64Encoded': false, 'statusCode': 200 @@ -219,7 +240,7 @@ describe('When invoking the lambda function', () => { }) }); - const lambda = await import('../index'); + const lambda = await import('../use-case-handler'); expect(await lambda.lambdaHandler(createUseCaseApiEventNoPrompt as unknown as APIGatewayEvent)).toEqual({ 'body': 'SUCCESS', @@ -228,7 +249,7 @@ describe('When invoking the lambda function', () => { 'Access-Control-Allow-Headers': 'Origin,X-Requested-With,Content-Type,Accept', 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', 'Access-Control-Allow-Origin': '*', - 'Content-Type': 'application/json' + 'Content-Type': MCP_CONTENT_TYPES.JSON }, 'isBase64Encoded': false, 'statusCode': 200 @@ -286,7 +307,7 @@ describe('When invoking the lambda function', () => { ] }); - const lambda = await import('../index'); + const lambda = await import('../use-case-handler'); expect(await lambda.lambdaHandler(updateUseCaseApiEvent as unknown as APIGatewayEvent)).toEqual({ 'body': 'SUCCESS', @@ -295,7 +316,7 @@ describe('When invoking the lambda function', () => { 'Access-Control-Allow-Headers': 'Origin,X-Requested-With,Content-Type,Accept', 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', 'Access-Control-Allow-Origin': '*', - 'Content-Type': 'application/json' + 'Content-Type': MCP_CONTENT_TYPES.JSON }, 'isBase64Encoded': false, 'statusCode': 200 @@ -303,7 +324,7 @@ describe('When invoking the lambda function', () => { }); it('should get deployed stacks with a GET request', async () => { - const lambda = await import('../index'); + const lambda = await import('../use-case-handler'); ddbMockedClient.on(ScanCommand).resolves({ Items: [ @@ -397,6 +418,7 @@ describe('When invoking the lambda function', () => { 'Name': 'test-2', 'UseCaseId': '11111111-fake-id', 'CreatedDate': '2024-07-22T20:32:00Z', + 'Description': 'test case 2', 'useCaseUUID': 'fake-uuid', 'status': 'CREATE_COMPLETE', 'cloudFrontWebUrl': 'mock-cloudfront-url', @@ -407,6 +429,7 @@ describe('When invoking the lambda function', () => { 'Name': 'test-1', 'UseCaseId': '11111111-fake-id', 'CreatedDate': '2024-07-22T20:31:00Z', + 'Description': 'test case 1', 'useCaseUUID': 'fake-uuid', 'status': 'CREATE_COMPLETE', 'cloudFrontWebUrl': 'mock-cloudfront-url', @@ -421,7 +444,7 @@ describe('When invoking the lambda function', () => { 'Access-Control-Allow-Headers': 'Origin,X-Requested-With,Content-Type,Accept', 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', 'Access-Control-Allow-Origin': '*', - 'Content-Type': 'application/json' + 'Content-Type': MCP_CONTENT_TYPES.JSON }, 'isBase64Encoded': false, 'statusCode': 200 @@ -429,7 +452,7 @@ describe('When invoking the lambda function', () => { }); it('should create a stack and update ddb for create action with ExistingRestApiId', async () => { - const lambda = await import('../index'); + const lambda = await import('../use-case-handler'); const eventWithRestApiId = { ...createUseCaseApiEvent, @@ -446,7 +469,7 @@ describe('When invoking the lambda function', () => { 'Access-Control-Allow-Headers': 'Origin,X-Requested-With,Content-Type,Accept', 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', 'Access-Control-Allow-Origin': '*', - 'Content-Type': 'application/json' + 'Content-Type': MCP_CONTENT_TYPES.JSON }, 'isBase64Encoded': false, 'statusCode': 200 @@ -461,7 +484,7 @@ describe('When invoking the lambda function', () => { }); it('should handle API Gateway errors gracefully', async () => { - const lambda = await import('../index'); + const lambda = await import('../use-case-handler'); // Mock API Gateway to throw an error apiGatewayMockedClient.on(GetResourcesCommand).rejects(new Error('API Gateway Error')); @@ -475,11 +498,11 @@ describe('When invoking the lambda function', () => { }; expect(await lambda.lambdaHandler(eventWithRestApiId as unknown as APIGatewayEvent)).toEqual({ - 'body': 'Internal Error - Please contact support and quote the following trace id: undefined', + 'body': 'Internal Error - Please contact support and quote the following trace id: test-trace-id', 'headers': { 'Access-Control-Allow-Origin': '*', - 'Content-Type': 'text/plain', - '_X_AMZN_TRACE_ID': undefined, + 'Content-Type': MCP_CONTENT_TYPES.TEXT_PLAIN, + '_X_AMZN_TRACE_ID': 'test-trace-id', 'x-amzn-ErrorType': 'CustomExecutionError' }, 'isBase64Encoded': false, @@ -488,7 +511,7 @@ describe('When invoking the lambda function', () => { }); it('should handle missing root resource', async () => { - const lambda = await import('../index'); + const lambda = await import('../use-case-handler'); // Mock API Gateway to return no root resource apiGatewayMockedClient.on(GetResourcesCommand).resolves({ @@ -509,11 +532,11 @@ describe('When invoking the lambda function', () => { }; expect(await lambda.lambdaHandler(eventWithRestApiId as unknown as APIGatewayEvent)).toEqual({ - 'body': 'Internal Error - Please contact support and quote the following trace id: undefined', + 'body': 'Internal Error - Please contact support and quote the following trace id: test-trace-id', 'headers': { 'Access-Control-Allow-Origin': '*', - 'Content-Type': 'text/plain', - '_X_AMZN_TRACE_ID': undefined, + 'Content-Type': MCP_CONTENT_TYPES.TEXT_PLAIN, + '_X_AMZN_TRACE_ID': 'test-trace-id', 'x-amzn-ErrorType': 'CustomExecutionError' }, 'isBase64Encoded': false, @@ -528,12 +551,12 @@ describe('When invoking the lambda function', () => { }); it('Should fail to invoke lambda since env is not set up correctly', async () => { - const lambda = import('../index'); + const lambda = import('../use-case-handler'); await expect( (await lambda).lambdaHandler(createUseCaseApiEvent as unknown as APIGatewayEvent) ).rejects.toThrow( - 'Missing required environment variables: USER_POOL_ID. This should not happen and indicates in issue with your deployment.' + 'Missing required environment variables: USER_POOL_ID. This should not happen and indicates an issue with your deployment.' ); }); @@ -544,6 +567,7 @@ describe('When invoking the lambda function', () => { afterAll(() => { delete process.env.AWS_SDK_USER_AGENT; + delete process.env._X_AMZN_TRACE_ID; delete process.env[POWERTOOLS_METRICS_NAMESPACE_ENV_VAR]; delete process.env[USE_CASES_TABLE_NAME_ENV_VAR]; delete process.env[ARTIFACT_BUCKET_ENV_VAR]; diff --git a/source/lambda/use-case-management/test/utils/utils.test.ts b/source/lambda/use-case-management/test/utils/utils.test.ts new file mode 100644 index 00000000..ffa8d50a --- /dev/null +++ b/source/lambda/use-case-management/test/utils/utils.test.ts @@ -0,0 +1,338 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { APIGatewayEvent } from 'aws-lambda'; +import { + parseEventBody, + handleLambdaError, + extractUserId, + generateUUID, + getRetrySettings, + checkEnv, + isValidArnWithRegexKey +} from '../../utils/utils'; +import RequestValidationError from '../../utils/error'; +import { MAX_INPUT_PAYLOAD_SIZE, REQUIRED_ENV_VARS, REQUIRED_MCP_ENV_VARS } from '../../utils/constants'; + +jest.mock('../../power-tools-init', () => ({ + logger: { + info: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + warn: jest.fn() + }, + tracer: { + getRootXrayTraceId: jest.fn().mockReturnValue('test-trace-id'), + captureMethod: () => () => {}, + captureAWSv3Client: jest.fn() + } +})); + +jest.mock('../../utils/http-response-formatters', () => ({ + formatError: jest.fn().mockReturnValue({ + statusCode: 500, + body: JSON.stringify({ message: 'Error' }) + }) +})); + +const createMockEvent = (body: string | null): APIGatewayEvent => ({ + body, + httpMethod: 'POST', + resource: '/test', + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/test', + pathParameters: null, + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: { + accountId: 'test-account', + apiId: 'test-api', + authorizer: {}, + httpMethod: 'POST', + identity: {} as any, + path: '/test', + protocol: 'HTTP/1.1', + requestId: 'test-request-id', + requestTime: '01/Jan/2023:00:00:00 +0000', + requestTimeEpoch: 1672531200, + resourceId: 'test-resource', + resourcePath: '/test', + stage: 'test' + } +}); + +describe('Utils Functions', () => { + describe('parseEventBody', () => { + it('should parse valid JSON object', () => { + const event = createMockEvent('{"key": "value"}'); + const result = parseEventBody(event); + expect(result).toEqual({ key: 'value' }); + }); + + it('should handle empty body with default empty object', () => { + const event = createMockEvent(null); + const result = parseEventBody(event); + expect(result).toEqual({}); + }); + + it('should handle empty string body', () => { + const event = createMockEvent(''); + const result = parseEventBody(event); + expect(result).toEqual({}); + }); + + it('should throw error for invalid JSON', () => { + const event = createMockEvent('invalid json'); + expect(() => parseEventBody(event)).toThrow(RequestValidationError); + expect(() => parseEventBody(event)).toThrow('Invalid JSON in request body'); + }); + + it('should throw error for primitive values', () => { + const event = createMockEvent('"string"'); + expect(() => parseEventBody(event)).toThrow(RequestValidationError); + expect(() => parseEventBody(event)).toThrow('Invalid request format'); + }); + + it('should throw error for null values', () => { + const event = createMockEvent('null'); + expect(() => parseEventBody(event)).toThrow(RequestValidationError); + expect(() => parseEventBody(event)).toThrow('Invalid request format'); + }); + + it('should throw error for array values', () => { + const event = createMockEvent('[1, 2, 3]'); + expect(() => parseEventBody(event)).toThrow(RequestValidationError); + expect(() => parseEventBody(event)).toThrow('Invalid request format'); + }); + + it('should throw error for payload too large', () => { + const largePayload = '{"data": "' + 'x'.repeat(MAX_INPUT_PAYLOAD_SIZE) + '"}'; + const event = createMockEvent(largePayload); + expect(() => parseEventBody(event)).toThrow(RequestValidationError); + expect(() => parseEventBody(event)).toThrow('Request body exceeds maximum allowed size'); + }); + + it('should accept valid nested objects', () => { + const event = createMockEvent('{"user": {"name": "test", "age": 30}}'); + const result = parseEventBody(event); + expect(result).toEqual({ user: { name: 'test', age: 30 } }); + }); + }); + + describe('extractUserId', () => { + it('should extract user ID from event context', () => { + const event = createMockEvent('{}'); + event.requestContext.authorizer = { UserId: 'test-user-123' }; + + const result = extractUserId(event); + expect(result).toBe('test-user-123'); + }); + + it('should throw error when authorizer context is missing', () => { + const event = createMockEvent('{}'); + event.requestContext.authorizer = undefined; + + expect(() => extractUserId(event)).toThrow('Missing authorizer context in API Gateway event'); + }); + + it('should throw error when UserId is missing', () => { + const event = createMockEvent('{}'); + event.requestContext.authorizer = {}; + + expect(() => extractUserId(event)).toThrow('Missing UserId in authorizer context'); + }); + + it('should throw error when UserId is empty string', () => { + const event = createMockEvent('{}'); + event.requestContext.authorizer = { UserId: '' }; + + expect(() => extractUserId(event)).toThrow('Missing UserId in authorizer context'); + }); + }); + + describe('generateUUID', () => { + it('should generate a valid UUID with default parameter (false)', () => { + const uuid = generateUUID(); + expect(uuid).toMatch(/^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i); + }); + + it('should generate a valid UUID when explicitly passed false', () => { + const uuid = generateUUID(false); + expect(uuid).toMatch(/^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i); + }); + + it('should generate a short UUID when requested', () => { + const shortUuid = generateUUID(true); + expect(shortUuid).toMatch(/^[0-9a-f]{8}$/i); + }); + }); + + describe('getRetrySettings', () => { + it('should return default retry settings', () => { + const settings = getRetrySettings(); + expect(settings).toHaveProperty('maxRetries'); + expect(settings).toHaveProperty('backOffRate'); + expect(settings).toHaveProperty('initialDelayMs'); + expect(typeof settings.maxRetries).toBe('number'); + expect(typeof settings.backOffRate).toBe('number'); + expect(typeof settings.initialDelayMs).toBe('number'); + }); + }); + + describe('handleLambdaError', () => { + it('should handle RequestValidationError', () => { + const error = new RequestValidationError('Test validation error'); + const result = handleLambdaError(error, 'testAction', 'TestContext'); + + expect(result).toBeDefined(); + expect(result.statusCode).toBe(500); + }); + + it('should handle generic errors', () => { + const error = new Error('Generic error'); + const result = handleLambdaError(error, 'testAction'); + + expect(result).toBeDefined(); + expect(result.statusCode).toBe(500); + }); + }); + + describe('checkEnv', () => { + it('should handle empty array of required variables', () => { + expect(() => { + checkEnv([]); + }).not.toThrow(); + }); + + it('should handle custom list of required variables', () => { + process.env.CUSTOM_VAR1 = 'value'; + process.env.CUSTOM_VAR2 = 'value'; + + expect(() => { + checkEnv(['CUSTOM_VAR1', 'CUSTOM_VAR2']); + }).not.toThrow(); + + expect(() => { + checkEnv(['CUSTOM_VAR1', 'MISSING_VAR']); + }).toThrow( + 'Missing required environment variables: MISSING_VAR. This should not happen and indicates an issue with your deployment.' + ); + }); + }); + + describe('isValidArnWithRegexKey', () => { + describe('Valid ARNs', () => { + it('should validate bedrock-agentcore-identity-OAUTH ARNs', () => { + const validOAuthArn = 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/my-vault/oauth2credentialprovider/my-provider'; + expect(isValidArnWithRegexKey(validOAuthArn, 'bedrock-agentcore', 'bedrock-agentcore-identity-OAUTH')).toBe(true); + }); + + it('should validate bedrock-agentcore-identity-API_KEY ARNs', () => { + const validApiKeyArn = 'arn:aws:bedrock-agentcore:us-west-2:123456789012:token-vault/test-vault/apikeycredentialprovider/test-provider'; + expect(isValidArnWithRegexKey(validApiKeyArn, 'bedrock-agentcore', 'bedrock-agentcore-identity-API_KEY')).toBe(true); + }); + + it('should validate bedrock-agentcore-gateway ARNs', () => { + const validGatewayArn = 'arn:aws:bedrock-agentcore:eu-west-1:123456789012:gateway/my-gateway-123'; + expect(isValidArnWithRegexKey(validGatewayArn, 'bedrock-agentcore', 'bedrock-agentcore-gateway')).toBe(true); + }); + + it('should validate lambda ARNs', () => { + const validLambdaArn = 'arn:aws:lambda:us-east-1:123456789012:function:my-function'; + expect(isValidArnWithRegexKey(validLambdaArn, 'lambda', 'lambda')).toBe(true); + + const validLambdaArnWithVersion = 'arn:aws:lambda:us-east-1:123456789012:function:my-function:1'; + expect(isValidArnWithRegexKey(validLambdaArnWithVersion, 'lambda', 'lambda')).toBe(true); + }); + }); + + describe('Invalid ARN format', () => { + it('should reject invalid ARN format', () => { + const invalidArn = 'not-an-arn'; + expect(isValidArnWithRegexKey(invalidArn, 'bedrock-agentcore', 'bedrock-agentcore-gateway')).toBe(false); + }); + + it('should reject malformed ARN', () => { + const malformedArn = 'arn:aws:bedrock-agentcore'; + expect(isValidArnWithRegexKey(malformedArn, 'bedrock-agentcore', 'bedrock-agentcore-gateway')).toBe(false); + }); + + it('should reject ARN with invalid characters', () => { + const invalidArn = 'arn:aws:bedrock-agentcore:us-east-1:123456789012:gateway/invalid@gateway'; + expect(isValidArnWithRegexKey(invalidArn, 'bedrock-agentcore', 'bedrock-agentcore-gateway')).toBe(false); + }); + }); + + describe('Wrong service validation', () => { + it('should reject ARNs with wrong service', () => { + const wrongServiceArn = 'arn:aws:s3:us-east-1:123456789012:gateway/my-gateway'; + expect(isValidArnWithRegexKey(wrongServiceArn, 'bedrock-agentcore', 'bedrock-agentcore-gateway')).toBe(false); + }); + + it('should reject lambda ARN when expecting bedrock-agentcore', () => { + const lambdaArn = 'arn:aws:lambda:us-east-1:123456789012:function:my-function'; + expect(isValidArnWithRegexKey(lambdaArn, 'bedrock-agentcore', 'bedrock-agentcore-gateway')).toBe(false); + }); + }); + + describe('Invalid region/account validation', () => { + it('should reject ARNs with missing region', () => { + const noRegionArn = 'arn:aws:bedrock-agentcore::123456789012:gateway/my-gateway'; + expect(isValidArnWithRegexKey(noRegionArn, 'bedrock-agentcore', 'bedrock-agentcore-gateway')).toBe(false); + }); + + it('should reject ARNs with invalid account ID', () => { + const invalidAccountArn = 'arn:aws:bedrock-agentcore:us-east-1:invalid-account:gateway/my-gateway'; + expect(isValidArnWithRegexKey(invalidAccountArn, 'bedrock-agentcore', 'bedrock-agentcore-gateway')).toBe(false); + }); + + it('should reject ARNs with short account ID', () => { + const shortAccountArn = 'arn:aws:bedrock-agentcore:us-east-1:123456789:gateway/my-gateway'; + expect(isValidArnWithRegexKey(shortAccountArn, 'bedrock-agentcore', 'bedrock-agentcore-gateway')).toBe(false); + }); + }); + + describe('Wrong resource pattern validation', () => { + it('should reject gateway ARN with OAuth pattern', () => { + const gatewayArn = 'arn:aws:bedrock-agentcore:us-east-1:123456789012:gateway/my-gateway'; + expect(isValidArnWithRegexKey(gatewayArn, 'bedrock-agentcore', 'bedrock-agentcore-identity-OAUTH')).toBe(false); + }); + + it('should reject OAuth ARN with gateway pattern', () => { + const oauthArn = 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/vault/oauth2credentialprovider/provider'; + expect(isValidArnWithRegexKey(oauthArn, 'bedrock-agentcore', 'bedrock-agentcore-gateway')).toBe(false); + }); + + it('should reject lambda ARN with wrong resource format', () => { + const invalidLambdaArn = 'arn:aws:lambda:us-east-1:123456789012:invalid-resource'; + expect(isValidArnWithRegexKey(invalidLambdaArn, 'lambda', 'lambda')).toBe(false); + }); + }); + + describe('Unknown regex key validation', () => { + it('should reject ARNs with unknown regex key', () => { + const validArn = 'arn:aws:bedrock-agentcore:us-east-1:123456789012:gateway/my-gateway'; + expect(isValidArnWithRegexKey(validArn, 'bedrock-agentcore', 'unknown-regex-key')).toBe(false); + }); + + it('should reject ARNs with empty regex key', () => { + const validArn = 'arn:aws:bedrock-agentcore:us-east-1:123456789012:gateway/my-gateway'; + expect(isValidArnWithRegexKey(validArn, 'bedrock-agentcore', '')).toBe(false); + }); + }); + + describe('Edge cases', () => { + it('should handle empty ARN string', () => { + expect(isValidArnWithRegexKey('', 'bedrock-agentcore', 'bedrock-agentcore-gateway')).toBe(false); + }); + + it('should handle null/undefined ARN', () => { + expect(isValidArnWithRegexKey(null as any, 'bedrock-agentcore', 'bedrock-agentcore-gateway')).toBe(false); + expect(isValidArnWithRegexKey(undefined as any, 'bedrock-agentcore', 'bedrock-agentcore-gateway')).toBe(false); + }); + }); + }); +}); diff --git a/source/lambda/use-case-management/test/workflows-handler.test.ts b/source/lambda/use-case-management/test/workflows-handler.test.ts new file mode 100644 index 00000000..467431e8 --- /dev/null +++ b/source/lambda/use-case-management/test/workflows-handler.test.ts @@ -0,0 +1,481 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { APIGatewayEvent } from 'aws-lambda'; +import { workflowsLambdaHandler, workflowsHandler } from '../workflows-handler'; +import * as httpFormattersModule from '../utils/http-response-formatters'; +import { + ARTIFACT_BUCKET_ENV_VAR, + COGNITO_POLICY_TABLE_ENV_VAR, + FILES_METADATA_TABLE_NAME_ENV_VAR, + IS_INTERNAL_USER_ENV_VAR, + MODEL_INFO_TABLE_NAME_ENV_VAR, + MULTIMODAL_DATA_BUCKET_ENV_VAR, + POWERTOOLS_METRICS_NAMESPACE_ENV_VAR, + TEMPLATE_FILE_EXTN_ENV_VAR, + USER_POOL_ID_ENV_VAR, + USE_CASES_TABLE_NAME_ENV_VAR, + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, + DEPLOYMENT_PLATFORM_STACK_NAME_ENV_VAR, + GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR, + SHARED_ECR_CACHE_PREFIX_ENV_VAR, + AMAZON_TRACE_ID_HEADER +} from '../utils/constants'; + +jest.mock('../power-tools-init', () => ({ + logger: { + error: jest.fn() + }, + metrics: {}, + tracer: { + getRootXrayTraceId: jest.fn().mockReturnValue('test-trace-id'), + captureMethod: jest.fn(() => () => {}), + captureAWSv3Client: jest.fn() + } +})); + +jest.mock('../utils/http-response-formatters'); + +jest.mock('aws-node-user-agent-config', () => ({ + customAwsConfig: jest.fn().mockReturnValue({}) +})); + +// Mock the command classes +jest.mock('../model/commands/use-case-command', () => ({ + CreateUseCaseCommand: jest.fn().mockImplementation(() => ({ + execute: jest.fn().mockResolvedValue({ message: 'success' }) + })), + UpdateUseCaseCommand: jest.fn().mockImplementation(() => ({ + execute: jest.fn().mockResolvedValue({ message: 'success' }) + })), + DeleteUseCaseCommand: jest.fn().mockImplementation(() => ({ + execute: jest.fn().mockResolvedValue({ message: 'success' }) + })), + PermanentlyDeleteUseCaseCommand: jest.fn().mockImplementation(() => ({ + execute: jest.fn().mockResolvedValue({ message: 'success' }) + })), + ListUseCasesCommand: jest.fn().mockImplementation(() => ({ + execute: jest.fn().mockResolvedValue({ message: 'success' }) + })), + GetUseCaseCommand: jest.fn().mockImplementation(() => ({ + execute: jest.fn().mockResolvedValue({ message: 'success' }) + })) +})); + +// Mock the adapter classes +jest.mock('../model/adapters/workflow-use-case-adapter', () => ({ + WorkflowUseCaseDeploymentAdapter: jest.fn().mockImplementation(() => ({})), + WorkflowUseCaseInfoAdapter: jest.fn().mockImplementation(() => ({})) +})); + +jest.mock('../model/list-use-cases', () => ({ + ListUseCasesAdapter: jest.fn().mockImplementation(() => ({})) +})); + +jest.mock('../model/get-use-case', () => ({ + GetUseCaseAdapter: jest.fn().mockImplementation(() => ({})) +})); + +// Mock utility functions +jest.mock('../utils/utils', () => ({ + ...jest.requireActual('../utils/utils'), + getRootResourceId: jest.fn().mockResolvedValue('test-root-resource-id'), + parseEventBody: jest.fn().mockReturnValue({ UseCaseType: 'Workflow' }), + handleLambdaError: jest.fn().mockReturnValue({ statusCode: 400, body: 'error' }) +})); + +describe('Workflows Handler Tests', () => { + const mockFormatResponse = jest.mocked(httpFormattersModule.formatResponse); + const mockFormatError = jest.mocked(httpFormattersModule.formatError); + + beforeEach(() => { + jest.clearAllMocks(); + mockFormatResponse.mockReturnValue({ statusCode: 200, body: 'success' } as any); + mockFormatError.mockReturnValue({ statusCode: '400', body: 'error' } as any); + + // Set required environment variables + process.env.AWS_SDK_USER_AGENT = JSON.stringify({ customUserAgent: 'test-agent' }); + process.env[AMAZON_TRACE_ID_HEADER] = 'test-trace-id'; + process.env[POWERTOOLS_METRICS_NAMESPACE_ENV_VAR] = 'test'; + process.env[USE_CASES_TABLE_NAME_ENV_VAR] = 'test-table'; + process.env[USE_CASE_CONFIG_TABLE_NAME_ENV_VAR] = 'test-config-table'; + process.env[GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR] = 'test-bucket'; + + // Set additional required environment variables for workflows handler + process.env[COGNITO_POLICY_TABLE_ENV_VAR] = 'test-cognito-policy-table'; + process.env[USER_POOL_ID_ENV_VAR] = 'test-user-pool-id'; + process.env[ARTIFACT_BUCKET_ENV_VAR] = 'test-artifact-bucket'; + process.env[MODEL_INFO_TABLE_NAME_ENV_VAR] = 'test-model-info-table'; + process.env[TEMPLATE_FILE_EXTN_ENV_VAR] = '.json'; + process.env[IS_INTERNAL_USER_ENV_VAR] = 'false'; + process.env[DEPLOYMENT_PLATFORM_STACK_NAME_ENV_VAR] = 'test-platform-stack'; + process.env[SHARED_ECR_CACHE_PREFIX_ENV_VAR] = 'test-ecr-prefix'; + process.env[FILES_METADATA_TABLE_NAME_ENV_VAR] = 'test-multimodal-table'; + process.env[MULTIMODAL_DATA_BUCKET_ENV_VAR] = 'test-multimodal-bucket'; + }); + + describe('workflowsLambdaHandler', () => { + describe('LIST workflows - GET /deployments/workflows', () => { + it('should successfully list workflows', async () => { + const mockEvent = { + httpMethod: 'GET', + resource: '/deployments/workflows', + body: null, + pathParameters: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/workflows', + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: {} as any + } as APIGatewayEvent; + + const result = await workflowsLambdaHandler(mockEvent); + + expect(mockFormatResponse).toHaveBeenCalledWith({ message: 'success' }); + expect(result.statusCode).toBe(200); + }); + + it('should handle list workflows error', async () => { + // Mock formatResponse to throw an error to simulate command failure + mockFormatResponse.mockImplementationOnce(() => { + throw new Error('Database connection failed'); + }); + + const mockEvent = { + httpMethod: 'GET', + resource: '/deployments/workflows', + body: null, + pathParameters: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/workflows', + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: {} as any + } as APIGatewayEvent; + + const result = await workflowsLambdaHandler(mockEvent); + + expect(result.statusCode).toBe(400); + }); + }); + + describe('CREATE workflow - POST /deployments/workflows', () => { + it('should successfully create a workflow', async () => { + const mockEvent = { + httpMethod: 'POST', + resource: '/deployments/workflows', + body: JSON.stringify({ + UseCaseType: 'Workflow', + UseCaseName: 'Test Workflow', + UseCaseDescription: 'A test workflow for validation' + }), + pathParameters: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/workflows', + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: {} as any + } as APIGatewayEvent; + + const result = await workflowsLambdaHandler(mockEvent); + + expect(mockFormatResponse).toHaveBeenCalledWith({ message: 'success' }); + expect(result.statusCode).toBe(200); + }); + + it('should handle create workflow validation error', async () => { + // Mock formatResponse to throw an error to simulate validation failure + mockFormatResponse.mockImplementationOnce(() => { + throw new Error('Invalid workflow configuration'); + }); + + const mockEvent = { + httpMethod: 'POST', + resource: '/deployments/workflows', + body: JSON.stringify({ + UseCaseType: 'Workflow', + UseCaseName: '', // Invalid empty name + UseCaseDescription: 'A test workflow' + }), + pathParameters: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/workflows', + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: {} as any + } as APIGatewayEvent; + + const result = await workflowsLambdaHandler(mockEvent); + + expect(result.statusCode).toBe(400); + }); + }); + + describe('GET single workflow - GET /deployments/workflows/{useCaseId}', () => { + it('should successfully get a workflow by ID', async () => { + const mockEvent = { + httpMethod: 'GET', + resource: '/deployments/workflows/{useCaseId}', + pathParameters: { 'useCaseId': 'test-workflow-123' }, + body: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/workflows/test-workflow-123', + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: {} as any + } as APIGatewayEvent; + + const result = await workflowsLambdaHandler(mockEvent); + + expect(mockFormatResponse).toHaveBeenCalledWith({ message: 'success' }); + expect(result.statusCode).toBe(200); + }); + + it('should handle get workflow not found error', async () => { + // Mock formatResponse to throw an error to simulate not found + mockFormatResponse.mockImplementationOnce(() => { + throw new Error('Workflow not found'); + }); + + const mockEvent = { + httpMethod: 'GET', + resource: '/deployments/workflows/{useCaseId}', + pathParameters: { 'useCaseId': 'non-existent-workflow' }, + body: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/workflows/non-existent-workflow', + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: {} as any + } as APIGatewayEvent; + + const result = await workflowsLambdaHandler(mockEvent); + + expect(result.statusCode).toBe(400); + }); + }); + + describe('UPDATE workflow - PATCH /deployments/workflows/{useCaseId}', () => { + it('should successfully update a workflow', async () => { + const mockEvent = { + httpMethod: 'PATCH', + resource: '/deployments/workflows/{useCaseId}', + pathParameters: { 'useCaseId': 'test-workflow-123' }, + body: JSON.stringify({ + UseCaseType: 'Workflow', + UseCaseName: 'Updated Test Workflow', + UseCaseDescription: 'An updated test workflow' + }), + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/workflows/test-workflow-123', + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: {} as any + } as APIGatewayEvent; + + const result = await workflowsLambdaHandler(mockEvent); + + expect(mockFormatResponse).toHaveBeenCalledWith({ message: 'success' }); + expect(result.statusCode).toBe(200); + }); + + it('should handle update workflow validation error', async () => { + // Mock formatResponse to throw an error to simulate validation failure + mockFormatResponse.mockImplementationOnce(() => { + throw new Error('Invalid update parameters'); + }); + + const mockEvent = { + httpMethod: 'PATCH', + resource: '/deployments/workflows/{useCaseId}', + pathParameters: { 'useCaseId': 'test-workflow-123' }, + body: JSON.stringify({ + UseCaseType: 'InvalidType', // Invalid use case type + UseCaseName: 'Updated Test Workflow' + }), + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/workflows/test-workflow-123', + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: {} as any + } as APIGatewayEvent; + + const result = await workflowsLambdaHandler(mockEvent); + + expect(result.statusCode).toBe(400); + }); + }); + + describe('DELETE workflow - DELETE /deployments/workflows/{useCaseId}', () => { + it('should successfully delete a workflow', async () => { + const mockEvent = { + httpMethod: 'DELETE', + resource: '/deployments/workflows/{useCaseId}', + pathParameters: { 'useCaseId': 'test-workflow-123' }, + body: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/workflows/test-workflow-123', + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: {} as any + } as APIGatewayEvent; + + const result = await workflowsLambdaHandler(mockEvent); + + expect(mockFormatResponse).toHaveBeenCalledWith({ message: 'success' }); + expect(result.statusCode).toBe(200); + }); + + it('should successfully permanently delete a workflow', async () => { + const mockEvent = { + httpMethod: 'DELETE', + resource: '/deployments/workflows/{useCaseId}', + pathParameters: { 'useCaseId': 'test-workflow-123' }, + body: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/workflows/test-workflow-123', + queryStringParameters: { permanent: 'true' }, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: {} as any + } as APIGatewayEvent; + + const result = await workflowsLambdaHandler(mockEvent); + + expect(mockFormatResponse).toHaveBeenCalledWith({ message: 'success' }); + expect(result.statusCode).toBe(200); + }); + + it('should handle delete workflow error', async () => { + // Mock formatResponse to throw an error to simulate deletion failure + mockFormatResponse.mockImplementationOnce(() => { + throw new Error('Workflow deletion failed'); + }); + + const mockEvent = { + httpMethod: 'DELETE', + resource: '/deployments/workflows/{useCaseId}', + pathParameters: { 'useCaseId': 'protected-workflow' }, + body: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/workflows/protected-workflow', + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: {} as any + } as APIGatewayEvent; + + const result = await workflowsLambdaHandler(mockEvent); + + expect(result.statusCode).toBe(400); + }); + }); + + describe('Invalid routes and methods', () => { + it('should throw error for invalid HTTP method', async () => { + const mockEvent = { + httpMethod: 'PUT', // Invalid method + resource: '/deployments/workflows', + body: null, + pathParameters: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/workflows', + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: {} as any + } as APIGatewayEvent; + + // The handler should throw an error for invalid HTTP methods + await expect(workflowsLambdaHandler(mockEvent)).rejects.toThrow('Invalid HTTP method: PUT'); + }); + + it('should throw error for invalid resource path', async () => { + const mockEvent = { + httpMethod: 'GET', + resource: '/invalid/path', // Invalid resource + body: null, + pathParameters: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/invalid/path', + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: {} as any + } as APIGatewayEvent; + + // The handler should throw an error for invalid resource paths + await expect(workflowsLambdaHandler(mockEvent)).rejects.toThrow('Invalid HTTP method: GET'); + }); + }); + + describe('General error handling', () => { + it('should handle general execution errors', async () => { + mockFormatResponse.mockImplementationOnce(() => { + throw new Error('Unexpected error'); + }); + + const mockEvent = { + httpMethod: 'GET', + resource: '/deployments/workflows', + body: null, + pathParameters: null, + headers: {}, + multiValueHeaders: {}, + isBase64Encoded: false, + path: '/deployments/workflows', + queryStringParameters: null, + multiValueQueryStringParameters: null, + stageVariables: null, + requestContext: {} as any + } as APIGatewayEvent; + + const result = await workflowsLambdaHandler(mockEvent); + + expect(result.statusCode).toBe(400); + }); + }); + }); + + describe('workflowsHandler middleware', () => { + it('should export middy-wrapped handler', () => { + expect(workflowsHandler).toBeDefined(); + expect(typeof workflowsHandler).toBe('function'); + }); + }); +}); diff --git a/source/lambda/use-case-management/tsconfig.json b/source/lambda/use-case-management/tsconfig.json index aa1ce162..01b07c98 100644 --- a/source/lambda/use-case-management/tsconfig.json +++ b/source/lambda/use-case-management/tsconfig.json @@ -29,6 +29,9 @@ "moduleResolution": "Node", "rootDir": ".", "paths": { + "aws-sdk-lib": [ + "../layers/aws-sdk-lib/dist" + ], "aws-node-user-agent-config": [ "../layers/aws-node-user-agent-config/dist" ], @@ -85,7 +88,13 @@ ], "@aws-sdk/client-api-gateway": [ "../layers/aws-sdk-lib/node_modules/@aws-sdk/client-api-gateway" + ], + "@aws-sdk/client-s3": [ + "../layers/aws-sdk-lib/node_modules/@aws-sdk/client-s3" + ], + "@aws-sdk/s3-presigned-post": [ + "../layers/aws-sdk-lib/node_modules/@aws-sdk/s3-presigned-post" ] - }, + } } } \ No newline at end of file diff --git a/source/lambda/use-case-management/index.ts b/source/lambda/use-case-management/use-case-handler.ts similarity index 50% rename from source/lambda/use-case-management/index.ts rename to source/lambda/use-case-management/use-case-handler.ts index 3e6b8273..30337233 100644 --- a/source/lambda/use-case-management/index.ts +++ b/source/lambda/use-case-management/use-case-handler.ts @@ -6,27 +6,23 @@ import { logMetrics } from '@aws-lambda-powertools/metrics/middleware'; import { captureLambdaHandler } from '@aws-lambda-powertools/tracer/middleware'; import middy from '@middy/core'; import { APIGatewayEvent } from 'aws-lambda'; -import { APIGatewayClient, GetResourcesCommand } from "@aws-sdk/client-api-gateway"; +import { CaseCommand } from './model/commands/case-command'; import { - CaseCommand, CreateUseCaseCommand, DeleteUseCaseCommand, ListUseCasesCommand, PermanentlyDeleteUseCaseCommand, - Status, UpdateUseCaseCommand, GetUseCaseCommand - -} from './command'; +} from './model/commands/use-case-command'; import { ListUseCasesAdapter } from './model/list-use-cases'; import { UseCase } from './model/use-case'; import { logger, metrics, tracer } from './power-tools-init'; -import { checkEnv } from './utils/check-env'; -import { formatError, formatResponse } from './utils/http-response-formatters'; -import RequestValidationError from './utils/error'; -import { ChatUseCaseDeploymentAdapter, ChatUseCaseInfoAdapter } from './model/chat-use-case-adapter'; -import { AgentUseCaseDeploymentAdapter } from './model/agent-use-case-adapter'; -import { UseCaseTypeFromApiEvent } from './utils/constants'; +import { checkEnv, handleLambdaError, getRootResourceId, parseEventBody, getStackAction } from './utils/utils'; +import { formatResponse } from './utils/http-response-formatters'; +import { ChatUseCaseDeploymentAdapter, ChatUseCaseInfoAdapter } from './model/adapters/chat-use-case-adapter'; +import { AgentUseCaseDeploymentAdapter } from './model/adapters/agent-use-case-adapter'; +import { Status, UseCaseTypeFromApiEvent } from './utils/constants'; import { GetUseCaseAdapter } from './model/get-use-case'; const commands: Map = new Map(); @@ -45,64 +41,12 @@ const routeMap = new Map([ ['DELETE:/deployments/{useCaseId}', 'delete'] ]); -const getStackAction = (event: APIGatewayEvent): string => { - const routeKey = `${event.httpMethod}:${event.resource}`; - const baseAction = routeMap.get(routeKey); - - if (!baseAction) { - logger.error(`Invalid HTTP method: ${event.httpMethod}, at resource: ${event.resource}`); - throw new Error(`Invalid HTTP method: ${event.httpMethod}, at resource: ${event.resource}`); - } - - // Special case for permanent delete - if (baseAction === 'delete' && event.queryStringParameters?.permanent === 'true') { - return 'permanentlyDelete'; - } - - return baseAction; -}; - -async function getRootResourceId(apiId: string): Promise { - logger.debug('Retrieving root resource ID', { apiId }); - const client = new APIGatewayClient({}); - - try { - const response = await client.send( - new GetResourcesCommand({ - restApiId: apiId - }) - ); - - const rootResource = response.items?.find(resource => resource.path === '/'); - - if (!rootResource?.id) { - const error = 'Could not find root resource'; - logger.error(error, { apiId }); - throw new Error(error); - } - - logger.debug('Successfully retrieved root resource ID', { - apiId, - rootResourceId: rootResource.id - }); - - return rootResource.id; - } catch (error) { - logger.error('Error retrieving root resource ID', { - apiId, - error: error as Error - }); - throw error; - } -} - - export const lambdaHandler = async (event: APIGatewayEvent) => { checkEnv(); - const stackAction = getStackAction(event); + const stackAction = getStackAction(event, routeMap); const command = commands.get(stackAction); - + if (!command) { logger.error(`Invalid action: ${stackAction}`); throw new Error(`Invalid action: ${stackAction}`); @@ -117,41 +61,25 @@ export const lambdaHandler = async (event: APIGatewayEvent) => { } return formatResponse(response); } catch (error: unknown) { - return handleError(error, stackAction); + const mcpAction = event.httpMethod && event.resource ? `${event.httpMethod}:${event.resource}` : 'unknown'; + return handleLambdaError(error, mcpAction, 'Usecase'); } }; -export const handleError = (error: unknown, stackAction: string) => { - const rootTraceId = tracer.getRootXrayTraceId(); - let errorMessage; - - if (error instanceof RequestValidationError) { - logger.error(`Validation of request failed with error: ${error}`); - logger.error(`Error while validating request for action: ${stackAction}, root trace id: ${rootTraceId}`); - errorMessage = `Request Validation Error - Please contact support and quote the following trace id: ${rootTraceId}`; - } else { - logger.error(`${error}`); - logger.error(`Error while executing action: ${stackAction}, root trace id: ${rootTraceId}`); - errorMessage = `Internal Error - Please contact support and quote the following trace id: ${rootTraceId}`; - } - return formatError({ - message: errorMessage, - extraHeaders: { '_X_AMZN_TRACE_ID': rootTraceId as string } - }); -}; - -export const adaptEvent = async (event: APIGatewayEvent, stackAction: string): Promise => { +export const adaptEvent = async ( + event: APIGatewayEvent, + stackAction: string +): Promise => { if (stackAction === 'list') { return new ListUseCasesAdapter(event); } else if (stackAction === 'delete' || stackAction === 'permanentlyDelete') { return new ChatUseCaseInfoAdapter(event); - } - else if (stackAction === 'get') { + } else if (stackAction === 'get') { return new GetUseCaseAdapter(event); } // Parse the event body - const eventBody = JSON.parse(event.body || '{}'); + const eventBody = parseEventBody(event); const useCaseType = eventBody.UseCaseType; // Only get root resource ID when ExistingRestApiId is provided diff --git a/source/lambda/use-case-management/utils/check-env.ts b/source/lambda/use-case-management/utils/check-env.ts deleted file mode 100644 index 3eb0e122..00000000 --- a/source/lambda/use-case-management/utils/check-env.ts +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env node -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -import { logger } from '../power-tools-init'; -import { REQUIRED_ENV_VARS } from './constants'; - -export const checkEnv = () => { - let missingVars = []; - for (let envVar of REQUIRED_ENV_VARS) { - if (!process.env[envVar]) { - missingVars.push(envVar); - } - } - if (missingVars.length > 0) { - const errMsg = `Missing required environment variables: ${missingVars.join( - ', ' - )}. This should not happen and indicates in issue with your deployment.`; - logger.error(errMsg); - throw new Error(errMsg); - } -}; diff --git a/source/lambda/use-case-management/utils/constants.ts b/source/lambda/use-case-management/utils/constants.ts index 9cf53a28..9a17bb16 100644 --- a/source/lambda/use-case-management/utils/constants.ts +++ b/source/lambda/use-case-management/utils/constants.ts @@ -2,6 +2,8 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 +export const MAX_INPUT_PAYLOAD_SIZE = 1024 * 1024; // 1MB for lambda event payload +export const MAX_INPUT_PAYLOAD_OBJECT_DEPTH = 10; // Maximum allowed object depth to prevent deeply nested objects export const COGNITO_POLICY_TABLE_ENV_VAR = 'COGNITO_POLICY_TABLE_NAME'; export const USER_POOL_ID_ENV_VAR = 'USER_POOL_ID'; export const CLIENT_ID_ENV_VAR = 'CLIENT_ID'; @@ -16,8 +18,11 @@ export const TEMPLATE_FILE_EXTN_ENV_VAR = 'TEMPLATE_FILE_EXTN'; export const IS_INTERNAL_USER_ENV_VAR = 'IS_INTERNAL_USER'; export const USE_CASE_CONFIG_TABLE_NAME_ENV_VAR = 'USE_CASE_CONFIG_TABLE_NAME'; export const CFN_DEPLOY_ROLE_ARN_ENV_VAR = 'CFN_DEPLOY_ROLE_ARN'; +export const FILES_METADATA_TABLE_NAME_ENV_VAR = 'MULTIMODAL_METADATA_TABLE_NAME'; +export const MULTIMODAL_DATA_BUCKET_ENV_VAR = 'MULTIMODAL_DATA_BUCKET'; export const INFERENCE_PROFILE = 'inference-profile'; export const STACK_DEPLOYMENT_SOURCE_USE_CASE = 'UseCase'; +export const AMAZON_TRACE_ID_HEADER = '_X_AMZN_TRACE_ID'; export const REQUIRED_ENV_VARS = [ COGNITO_POLICY_TABLE_ENV_VAR, @@ -30,10 +35,33 @@ export const REQUIRED_ENV_VARS = [ IS_INTERNAL_USER_ENV_VAR, USE_CASE_CONFIG_TABLE_NAME_ENV_VAR ]; +export const GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR = 'GAAB_DEPLOYMENTS_BUCKET'; + +export const DEPLOYMENT_PLATFORM_STACK_NAME_ENV_VAR = 'DEPLOYMENT_PLATFORM_STACK_NAME'; +export const SHARED_ECR_CACHE_PREFIX_ENV_VAR = 'SHARED_ECR_CACHE_PREFIX'; +export const STRANDS_TOOLS_SSM_PARAM_ENV_VAR = 'STRANDS_TOOLS_SSM_PARAM'; + +export const REQUIRED_MCP_ENV_VARS = [ + POWERTOOLS_METRICS_NAMESPACE_ENV_VAR, + USE_CASES_TABLE_NAME_ENV_VAR, + USE_CASE_CONFIG_TABLE_NAME_ENV_VAR, + GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR, + STRANDS_TOOLS_SSM_PARAM_ENV_VAR +]; + +export const AGENT_CORE_DEPLOYMENT_REQUIRED_ENV_VARS = [ + ...REQUIRED_ENV_VARS, + GAAB_DEPLOYMENTS_BUCKET_NAME_ENV_VAR, + DEPLOYMENT_PLATFORM_STACK_NAME_ENV_VAR, + SHARED_ECR_CACHE_PREFIX_ENV_VAR, + FILES_METADATA_TABLE_NAME_ENV_VAR, + MULTIMODAL_DATA_BUCKET_ENV_VAR +]; export const DEFAULT_LIST_USE_CASES_PAGE_SIZE = 10; export const TTL_SECONDS = 60 * 60 * 24 * 89; // 89 days, 90 days CFN deleted stack is not available +export const CONFIG_TTL_SECONDS = 60 * 10; // 10 minutes for config cleanup export const DYNAMODB_TTL_ATTRIBUTE_NAME = 'TTL'; export const DDB_SCAN_RECORDS_LIMIT = 500; @@ -78,9 +106,16 @@ export enum CloudWatchMetrics { UC_DESCRIBE_FAILURE = 'UCDescribeFailure' } +export enum Status { + SUCCESS = 'SUCCESS', + FAILED = 'FAILED' +} + export const enum CHAT_PROVIDERS { BEDROCK = 'Bedrock', - SAGEMAKER = 'SageMaker' + SAGEMAKER = 'SageMaker', + AGENT_CORE = 'AgentCore', + BEDROCK_AGENT = 'BedrockAgent' } export const enum ModelInfoTableKeys { @@ -93,12 +128,17 @@ export const enum ModelInfoTableKeys { export const enum UseCaseTypes { CHAT = 'Chat', RAGChat = 'RAGChat', - AGENT = 'Agent' + AGENT = 'Agent', + AGENT_BUILDER = 'AgentBuilder', + MCP_SERVER = 'MCPServer', + WORKFLOW = 'Workflow' } export const enum UseCaseTypeFromApiEvent { TEXT = 'Text', - AGENT = 'Agent' + AGENT = 'Agent', + AGENT_BUILDER = 'AgentBuilder', + WORKFLOW = 'Workflow' } export const enum AgentProviders { @@ -110,6 +150,11 @@ export const enum KnowledgeBaseTypes { BEDROCK = 'Bedrock' } +export enum OUTBOUND_AUTH_PROVIDER_TYPES { + API_KEY = 'API_KEY', + OAUTH = 'OAUTH' +} + export const enum CfnParameterKeys { KnowledgeBaseType = 'KnowledgeBaseType', BedrockKnowledgeBaseId = 'BedrockKnowledgeBaseId', @@ -125,6 +170,7 @@ export const enum CfnParameterKeys { ExistingPrivateSubnetIds = 'ExistingPrivateSubnetIds', ExistingSecurityGroupIds = 'ExistingSecurityGroupIds', ExistingCognitoUserPoolId = 'ExistingCognitoUserPoolId', + ComponentCognitoUserPoolId = 'ComponentCognitoUserPoolId', ExistingCognitoUserPoolClient = 'ExistingCognitoUserPoolClient', CognitoDomainPrefix = 'CognitoDomainPrefix', ExistingCognitoGroupPolicyTableName = 'ExistingCognitoGroupPolicyTableName', @@ -138,9 +184,19 @@ export const enum CfnParameterKeys { BedrockAgentAliasId = 'BedrockAgentAliasId', UseInferenceProfile = 'UseInferenceProfile', FeedbackEnabled = 'FeedbackEnabled', + ProvisionedConcurrencyValue = 'ProvisionedConcurrencyValue', ExistingRestApiId = 'ExistingRestApiId', ExistingApiRootResourceId = 'ExistingApiRootResourceId', - StackDeploymentSource = 'StackDeploymentSource' + StackDeploymentSource = 'StackDeploymentSource', + EcrUri = 'EcrUri', + S3BucketName = 'S3BucketName', + + // AgentCore deployment params + EnableLongTermMemory = 'EnableLongTermMemory', + SharedEcrCachePrefix = 'SharedEcrCachePrefix', + MultimodalEnabled = 'MultimodalEnabled', + ExistingMultimodalDataMetadataTable = 'ExistingMultimodalDataMetadataTable', + ExistingMultimodalDataBucket = 'ExistingMultimodalDataBucket' } export const enum CfnOutputKeys { @@ -164,9 +220,19 @@ export const RetainedCfnParameterKeys = [ CfnParameterKeys.ExistingCognitoUserPoolId ]; -export const ChatRequiredPlaceholders = { Bedrock: [], SageMaker: ['{input}', '{history}'] }; +export const ChatRequiredPlaceholders = { + Bedrock: [], + SageMaker: ['{input}', '{history}'], + AgentCore: [], // Agent Core doesn't use traditional prompt placeholders + BedrockAgent: [] // Bedrock Agent doesn't use traditional prompt placeholders +}; export const DisambiguationRequiredPlaceholders = ['{input}', '{history}']; -export const RAGChatRequiredPlaceholders = { Bedrock: ['{context}'], SageMaker: ['{input}', '{context}', '{history}'] }; +export const RAGChatRequiredPlaceholders = { + Bedrock: ['{context}'], + SageMaker: ['{input}', '{context}', '{history}'], + AgentCore: [], // Agent Core doesn't use traditional RAG placeholders + BedrockAgent: [] // Bedrock Agent doesn't use traditional RAG placeholders +}; export const RETRY_CONFIG = { maxRetries: 5, @@ -184,3 +250,77 @@ export const enum AUTHENTICATION_PROVIDERS { } export const SUPPORTED_AUTHENTICATION_PROVIDERS = [AUTHENTICATION_PROVIDERS.COGNITO]; + +// MCP Operation Types +export enum McpOperationTypes { + UPLOAD_SCHEMA = 'upload-schemas', + CREATE = 'create', + LIST = 'list', + GET = 'get', + UPDATE = 'update', + DELETE = 'delete', + DEPLOY = 'deploy', + PERMANENTLY_DELETE = 'permanentlyDelete' +} + +// Gateway Target Types for MCP Schema uploads +export enum GATEWAY_TARGET_TYPES { + LAMBDA = 'lambda', + OPEN_API = 'openApiSchema', + SMITHY = 'smithyModel' +} + +// Content types for MCP schema uploads - reusable across different constraints +export const MCP_CONTENT_TYPES = { + JSON: 'application/json', + YAML: 'application/yaml', + TEXT_YAML: 'text/yaml', + TEXT_PLAIN: 'text/plain' +} as const; + +// MCP Schema upload constraints +export const MCP_SCHEMA_UPLOAD_CONSTRAINTS = { + MIN_FILE_SIZE_BYTES: 1, // Prevents empty file uploads + MAX_FILE_SIZE_BYTES: 2 * 1024 * 1024, // 2MB + ALLOWED_CONTENT_TYPES: Object.values(MCP_CONTENT_TYPES), + PRESIGNED_URL_EXPIRY_SECONDS: 300 // 5 minutes +}; + +// Schema type specific file extension mappings +export const SCHEMA_TYPE_FILE_EXTENSIONS = { + [GATEWAY_TARGET_TYPES.LAMBDA]: [ + '.json' // Lambda JSON schema files + ], + [GATEWAY_TARGET_TYPES.OPEN_API]: [ + '.json', // OpenAPI JSON format + '.yaml', // OpenAPI YAML format + '.yml' // OpenAPI YAML format (alternative extension) + ], + [GATEWAY_TARGET_TYPES.SMITHY]: [ + '.smithy', // Smithy IDL files + '.json' // Smithy JSON representation + ] +}; +export const SUPPORTED_MCP_FILE_EXTENSIONS = [...new Set(Object.values(SCHEMA_TYPE_FILE_EXTENSIONS).flat())]; + +// Workflow orchestration patterns +export enum WORKFLOW_ORCHESTRATION_PATTERNS { + AGENTS_AS_TOOLS = 'agents-as-tools' +} + +export const SUPPORTED_WORKFLOW_ORCHESTRATION_PATTERNS: string[] = Object.values(WORKFLOW_ORCHESTRATION_PATTERNS); + +export const ARN_RESOURCE_REGEX_MAP: Record = { + // bedrock agentcore oauth2credentialprovider identity resources + 'bedrock-agentcore-identity-OAUTH': /^token-vault\/([A-Za-z0-9._-]+)\/oauth2credentialprovider\/([A-Za-z0-9._-]+)$/, + // bedrock agentcore apikeycredentialprovider identity resources + 'bedrock-agentcore-identity-API_KEY': + /^token-vault\/([A-Za-z0-9._-]+)\/apikeycredentialprovider\/([A-Za-z0-9._-]+)$/, + // bedrock agentcore gateway resources + 'bedrock-agentcore-gateway': /^gateway\/[a-zA-Z0-9-]+$/, + // lambda function resource + lambda: /^function:[^:]+(:[^:]+)?$/ +}; + +export const AGENT_CORE_SYSTEM_PROMPT_MAX_LENGTH = 60000; + diff --git a/source/lambda/use-case-management/utils/http-response-formatters.ts b/source/lambda/use-case-management/utils/http-response-formatters.ts index 5f3ce3cf..5bfd2a3f 100644 --- a/source/lambda/use-case-management/utils/http-response-formatters.ts +++ b/source/lambda/use-case-management/utils/http-response-formatters.ts @@ -11,11 +11,11 @@ * @returns */ export const formatResponse = ( - body: string | { [key: string]: string }, + body: string | { [key: string]: any }, extraHeaders: { [key: string]: string } = {} ) => { const defaultHeaders = { - 'Content-Type': 'application/json', + 'Content-Type':'application/json', 'Access-Control-Allow-Headers': 'Origin,X-Requested-With,Content-Type,Accept', 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET', 'Access-Control-Allow-Credentials': true, diff --git a/source/lambda/use-case-management/utils/utils.ts b/source/lambda/use-case-management/utils/utils.ts index 64a3d6d1..36c521f4 100644 --- a/source/lambda/use-case-management/utils/utils.ts +++ b/source/lambda/use-case-management/utils/utils.ts @@ -1,14 +1,147 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -import { RETRY_CONFIG } from './constants'; +import { APIGatewayEvent } from 'aws-lambda'; +import { randomUUID } from 'crypto'; +import { APIGatewayClient, GetResourcesCommand } from '@aws-sdk/client-api-gateway'; +import { AWSClientManager } from 'aws-sdk-lib'; +import { logger, tracer } from '../power-tools-init'; +import { REQUIRED_ENV_VARS, ARN_RESOURCE_REGEX_MAP, RETRY_CONFIG, MAX_INPUT_PAYLOAD_SIZE } from './constants'; +import { formatError } from './http-response-formatters'; +import RequestValidationError from './error'; +import { parse, validate } from '@aws-sdk/util-arn-parser'; +/** + * Interface for retry settings + */ export interface RetrySettings { maxRetries: number; backOffRate: number; initialDelayMs: number; } +/** + * Validates that required environment variables are set + * @param requiredVars - Array of required environment variable names + */ +export const checkEnv = (requiredVars: string[] = REQUIRED_ENV_VARS) => { + let missingVars = []; + for (let envVar of requiredVars) { + if (!process.env[envVar]) { + missingVars.push(envVar); + } + } + if (missingVars.length > 0) { + const errMsg = `Missing required environment variables: ${missingVars.join( + ', ' + )}. This should not happen and indicates an issue with your deployment.`; + logger.error(errMsg); + throw new Error(errMsg); + } +}; + +/** + * Retrieves the root resource ID for an API Gateway REST API + * @param apiId - The API Gateway REST API ID + * @returns Promise - The root resource ID + */ +export async function getRootResourceId(apiId: string): Promise { + logger.debug(`Retrieving root resource ID for apiId: ${apiId}`); + const client = AWSClientManager.getServiceClient('apigateway', tracer); + + try { + const response = await client.send( + new GetResourcesCommand({ + restApiId: apiId + }) + ); + + const rootResource = response.items?.find((resource) => resource.path === '/'); + + if (!rootResource?.id) { + const error = 'Could not find root resource'; + logger.error(`${error} for apiId: ${apiId}`); + throw new Error(error); + } + + logger.debug(`Successfully retrieved root resource ID: ${rootResource.id} for apiId: ${apiId}`); + + return rootResource.id; + } catch (error) { + logger.error(`Error retrieving root resource ID for apiId: ${apiId}, error: ${(error as Error).message}`); + throw error; + } +} + +/** + * Generic error handler for Lambda operations + * @param error - The error that occurred + * @param action - The action that was being performed + * @param context - Optional context for error messages (e.g., 'MCP', 'UseCase') + * @returns Formatted error response + */ +export const handleLambdaError = (error: unknown, action: string, context: string = ''): any => { + const rootTraceId = tracer.getRootXrayTraceId(); + let errorMessage; + const contextPrefix = context ? `${context} ` : ''; + + if (error instanceof RequestValidationError) { + logger.error(`Validation of ${contextPrefix} request failed with error: ${error}`); + logger.error( + `Error while validating ${contextPrefix} request for action: ${action}, root trace id: ${rootTraceId}` + ); + errorMessage = `Request Validation Error - Please contact support and quote the following trace id: ${rootTraceId}`; + } else { + logger.error(`${contextPrefix} Management Error: ${error}`); + logger.error(`Error while executing ${contextPrefix} action: ${action}, root trace id: ${rootTraceId}`); + errorMessage = `Internal Error - Please contact support and quote the following trace id: ${rootTraceId}`; + } + + return formatError({ + message: errorMessage, + extraHeaders: { '_X_AMZN_TRACE_ID': rootTraceId as string } + }); +}; + +/** + * Safely parses API Gateway event body with basic validations + * @param event - API Gateway event + * @returns Parsed and validated event body + * @throws RequestValidationError if validation fails + */ +export const parseEventBody = (event: APIGatewayEvent): any => { + const body = event.body || '{}'; + + if (body.length > MAX_INPUT_PAYLOAD_SIZE) { + logger.error(`Request body too large: ${body.length} bytes (max: ${MAX_INPUT_PAYLOAD_SIZE})`); + throw new RequestValidationError('Request body exceeds maximum allowed size'); + } + + if (typeof body !== 'string') { + logger.error('Request body must be a string'); + throw new RequestValidationError('Invalid request body format'); + } + + let parsed: any; + try { + parsed = JSON.parse(body); + } catch (error) { + logger.error(`Failed to parse JSON: ${(error as Error).message}`); + throw new RequestValidationError('Invalid JSON in request body'); + } + + if (typeof parsed !== 'object' || parsed === null || Array.isArray(parsed)) { + // Validate it's an object (not primitive or array) + throw new RequestValidationError('Invalid request format'); + } + + return parsed; +}; + +/** + * Gets retry settings for DynamoDB operations + * @returns RetrySettings object with default values + */ export function getRetrySettings(): RetrySettings { return { maxRetries: RETRY_CONFIG.maxRetries, @@ -17,10 +150,84 @@ export function getRetrySettings(): RetrySettings { }; } +/** + * Delays execution for the specified number of milliseconds + * @param delayMillis - Number of milliseconds to delay + * @returns Promise that resolves after the delay + */ export function delay(delayMillis: number): Promise { - return new Promise((resolve) => { - setTimeout(() => { - resolve(); - }, delayMillis); - }); + return new Promise((resolve) => setTimeout(resolve, delayMillis)); +} + +/** + * Extracts user ID from API Gateway event context + * @param event - The API Gateway event + * @returns The user ID from the authorizer context + * @throws Error if authorizer context or UserId is missing + */ +export function extractUserId(event: APIGatewayEvent): string { + if (!event.requestContext?.authorizer) { + throw new Error('Missing authorizer context in API Gateway event'); + } + + const userId = event.requestContext.authorizer.UserId; + if (!userId) { + throw new Error('Missing UserId in authorizer context'); + } + + return userId; +} + +/** + * Generates a UUID v4 string using the native crypto.randomUUID() method + * @param shortUUID - Optional flag to return only the first segment of the UUID + * @returns A UUID v4 string (full or shortened) + */ +export function generateUUID(shortUUID: boolean = false): string { + const generatedUuid = randomUUID(); + if (shortUUID) { + return generatedUuid.split('-')[0]; + } + return generatedUuid; +} + +const is12DigitAccount = (x?: string) => typeof x === 'string' && /^\d{12}$/.test(x); + +const hasRegion = (x?: string) => typeof x === 'string' && x.length > 0; + +export function isValidArnWithRegexKey(arn: string, service: string, regexKey: string): boolean { + if (!validate(arn)) return false; + + let parsed; + try { + parsed = parse(arn); + } catch { + return false; + } + + // Validate Arn based on Service + if (parsed.service !== service) return false; + // Validate Arn has region and account Id values + if (!hasRegion(parsed.region) || !is12DigitAccount(parsed.accountId)) return false; + + const resourceRe = ARN_RESOURCE_REGEX_MAP[regexKey]; + if (!resourceRe) return false; // unknown regex key + return resourceRe.test(parsed.resource); } + +export const getStackAction = (event: APIGatewayEvent, routeMap: Map): string => { + const routeKey = `${event.httpMethod}:${event.resource}`; + const baseAction = routeMap.get(routeKey); + + if (!baseAction) { + logger.error(`Invalid HTTP method: ${event.httpMethod}, at resource: ${event.resource}`); + throw new Error(`Invalid HTTP method: ${event.httpMethod}, at resource: ${event.resource}`); + } + + // Special case for permanent delete + if (baseAction === 'delete' && event.queryStringParameters?.permanent === 'true') { + return 'permanentlyDelete'; + } + + return baseAction; +}; diff --git a/source/lambda/use-case-management/workflows-handler.ts b/source/lambda/use-case-management/workflows-handler.ts new file mode 100644 index 00000000..b178cf9f --- /dev/null +++ b/source/lambda/use-case-management/workflows-handler.ts @@ -0,0 +1,121 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { injectLambdaContext } from '@aws-lambda-powertools/logger/middleware'; +import { logMetrics } from '@aws-lambda-powertools/metrics/middleware'; +import { captureLambdaHandler } from '@aws-lambda-powertools/tracer/middleware'; +import middy from '@middy/core'; +import { APIGatewayEvent } from 'aws-lambda'; +import { + CreateUseCaseCommand, + DeleteUseCaseCommand, + PermanentlyDeleteUseCaseCommand, + UpdateUseCaseCommand, + GetUseCaseCommand +} from './model/commands/use-case-command'; +import { ListUseCasesAdapter } from './model/list-use-cases'; +import { UseCase } from './model/use-case'; +import { logger, metrics, tracer } from './power-tools-init'; +import { getRootResourceId, parseEventBody, checkEnv, handleLambdaError } from './utils/utils'; +import { formatResponse } from './utils/http-response-formatters'; +import { + WorkflowUseCaseDeploymentAdapter, + WorkflowUseCaseInfoAdapter +} from './model/adapters/workflow-use-case-adapter'; +import { AGENT_CORE_DEPLOYMENT_REQUIRED_ENV_VARS, Status, UseCaseTypeFromApiEvent } from './utils/constants'; +import { GetUseCaseAdapter } from './model/get-use-case'; +import { CaseCommand } from './model/commands/case-command'; +import { ListWorkflowCommand } from './model/commands/workflow-command'; + +const commands: Map = new Map(); +commands.set('create', new CreateUseCaseCommand()); +commands.set('update', new UpdateUseCaseCommand()); +commands.set('delete', new DeleteUseCaseCommand()); +commands.set('permanentlyDelete', new PermanentlyDeleteUseCaseCommand()); +commands.set('list', new ListWorkflowCommand()); +commands.set('get', new GetUseCaseCommand()); + +const routeMap = new Map([ + ['GET:/deployments/workflows', 'list'], + ['POST:/deployments/workflows', 'create'], + ['GET:/deployments/workflows/{useCaseId}', 'get'], + ['PATCH:/deployments/workflows/{useCaseId}', 'update'], + ['DELETE:/deployments/workflows/{useCaseId}', 'delete'] +]); + +const getStackAction = (event: APIGatewayEvent): string => { + const routeKey = `${event.httpMethod}:${event.resource}`; + const baseAction = routeMap.get(routeKey); + + if (!baseAction) { + logger.error(`Invalid HTTP method: ${event.httpMethod}, at resource: ${event.resource}`); + throw new Error(`Invalid HTTP method: ${event.httpMethod}, at resource: ${event.resource}`); + } + + // Special case for permanent delete + if (baseAction === 'delete' && event.queryStringParameters?.permanent === 'true') { + return 'permanentlyDelete'; + } + + return baseAction; +}; + +export const workflowsLambdaHandler = async (event: APIGatewayEvent) => { + checkEnv(AGENT_CORE_DEPLOYMENT_REQUIRED_ENV_VARS); + + const stackAction = getStackAction(event); + const command = commands.get(stackAction); + + if (!command) { + logger.error(`Invalid action: ${stackAction}`); + throw new Error(`Invalid action: ${stackAction}`); + } + try { + const response = await command.execute(await adaptEvent(event, stackAction)); + + // as create stack and update stack failures don't throw error, but returns a Failure response + // to render a 500 request in the UI the following error is + if (response === Status.FAILED) { + throw new Error('Command execution failed'); + } + return formatResponse(response); + } catch (error: unknown) { + const workflowAction = event.httpMethod && event.resource ? `${event.httpMethod}:${event.resource}` : 'unknown'; + return handleLambdaError(error, workflowAction, 'Workflow'); + } +}; + +export const adaptEvent = async ( + event: APIGatewayEvent, + stackAction: string +): Promise => { + if (stackAction === 'list') { + return new ListUseCasesAdapter(event); + } else if (stackAction === 'delete' || stackAction === 'permanentlyDelete') { + return new WorkflowUseCaseInfoAdapter(event); + } else if (stackAction === 'get') { + return new GetUseCaseAdapter(event); + } + + // Parse the event body + const eventBody = parseEventBody(event); + const useCaseType = eventBody.UseCaseType; + + // Only get root resource ID when ExistingRestApiId is provided + let rootResourceId; + if (eventBody.ExistingRestApiId) { + rootResourceId = await getRootResourceId(eventBody.ExistingRestApiId); + } + + // Create the appropriate adapter based on UseCaseType + if (useCaseType !== UseCaseTypeFromApiEvent.WORKFLOW) { + throw new Error(`Unsupported UseCaseType: ${useCaseType}`); + } + return new WorkflowUseCaseDeploymentAdapter(event, rootResourceId); +}; + +export const workflowsHandler = middy(workflowsLambdaHandler).use([ + captureLambdaHandler(tracer), + injectLambdaContext(logger), + logMetrics(metrics) +]); \ No newline at end of file diff --git a/source/lambda/websocket-connectors/package-lock.json b/source/lambda/websocket-connectors/package-lock.json index e00dbb00..22594e28 100644 --- a/source/lambda/websocket-connectors/package-lock.json +++ b/source/lambda/websocket-connectors/package-lock.json @@ -1,12 +1,12 @@ { "name": "@amzn/websocket-connectors", - "version": "3.0.7", + "version": "4.0.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@amzn/websocket-connectors", - "version": "3.0.7", + "version": "4.0.0", "license": "Apache-2.0", "devDependencies": { "@types/jest": "^29.5.14", @@ -16,7 +16,7 @@ "aws-sdk-client-mock": "^4.1.0", "eslint": "^9.16.0", "jest": "^29.7.0", - "prettier": "^3.4.2", + "prettier": "^3.6.2", "ts-jest": "^29.2.5", "ts-node": "^10.9.2", "typescript": "^5.7.2" @@ -785,10 +785,11 @@ } }, "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -3674,9 +3675,9 @@ "dev": true }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -4229,9 +4230,9 @@ } }, "node_modules/prettier": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.4.2.tgz", - "integrity": "sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ==", + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", "dev": true, "bin": { "prettier": "bin/prettier.cjs" @@ -5583,9 +5584,9 @@ } }, "js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, "requires": { "argparse": "^1.0.7", @@ -7748,9 +7749,9 @@ "dev": true }, "js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "requires": { "argparse": "^2.0.1" @@ -8175,9 +8176,9 @@ "dev": true }, "prettier": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.4.2.tgz", - "integrity": "sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ==", + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", "dev": true }, "pretty-format": { diff --git a/source/lambda/websocket-connectors/package.json b/source/lambda/websocket-connectors/package.json index fcaa98a0..28bcda3c 100644 --- a/source/lambda/websocket-connectors/package.json +++ b/source/lambda/websocket-connectors/package.json @@ -1,6 +1,6 @@ { "name": "@amzn/websocket-connectors", - "version": "3.0.7", + "version": "4.0.0", "description": "This lambda function is used to handle connect and disconnect requests", "main": "connect-handler.js", "scripts": { @@ -12,7 +12,7 @@ "code-linter-js": "./node_modules/eslint/bin/eslint.js lambda --ext .js", "code-linter-ts": "./node_modules/eslint/bin/eslint.js bin lib --ext .ts", "code-linter": "npm run code-linter-ts && npm run code-linter-js", - "code-formatter": "./node_modules/prettier/bin-prettier.js --config .prettierrc.yml '**/*.ts' '**/*.js' --write" + "code-formatter": "prettier --config ../../../.prettierrc.yml --ignore-path ../../../.prettierignore --write '**/*.{js,ts,json,css,md}'" }, "author": { "name": "Amazon Web Services", @@ -27,7 +27,7 @@ "aws-sdk-client-mock": "^4.1.0", "eslint": "^9.16.0", "jest": "^29.7.0", - "prettier": "^3.4.2", + "prettier": "^3.6.2", "ts-jest": "^29.2.5", "ts-node": "^10.9.2", "typescript": "^5.7.2" diff --git a/source/model-info/chat-bedrock-ai21.jamba-1-5-large-v1.json b/source/model-info/chat-bedrock-ai21.jamba-1-5-large-v1.json deleted file mode 100644 index 435890cd..00000000 --- a/source/model-info/chat-bedrock-ai21.jamba-1-5-large-v1.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "UseCase": "Chat", - "ModelProviderName": "Bedrock", - "ModelName": "ai21.jamba-1-5-large-v1:0", - "DisplayName": "Jamba 1.5 Large", - "Description": "The Jamba 1.5 Model Family has a 256K token effective context window, one of the largest on the market. Jamba 1.5 models focus on speed and efficiency, delivering up to 2.5x faster inference than leading models of comparable size. Jamba supports function calling/ tool use, structured output (JSON) and documents API.", - "AllowsStreaming": true, - "Prompt": "You are a friendly AI assistant that is helpful, honest, and harmless.", - "MaxTemperature": "1", - "DefaultTemperature": "1", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": null, - "ai_prefix": "AI", - "human_prefix": "Human", - "output": null - }, - "MaxPromptSize": 480000, - "MaxChatMessageSize": 480000 -} \ No newline at end of file diff --git a/source/model-info/chat-bedrock-ai21.jamba-1-5-mini-v1.json b/source/model-info/chat-bedrock-ai21.jamba-1-5-mini-v1.json deleted file mode 100644 index 2fd4b698..00000000 --- a/source/model-info/chat-bedrock-ai21.jamba-1-5-mini-v1.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "UseCase": "Chat", - "ModelProviderName": "Bedrock", - "ModelName": "ai21.jamba-1-5-mini-v1:0", - "DisplayName": "Jamba 1.5 Mini", - "Description": "The Jamba 1.5 Model Family has a 256K token effective context window, one of the largest on the market. Jamba 1.5 models focus on speed and efficiency, delivering up to 2.5x faster inference than leading models of comparable size. Jamba supports function calling/ tool use, structured output (JSON) and documents API.", - "AllowsStreaming": true, - "Prompt": "You are a friendly AI assistant that is helpful, honest, and harmless.", - "MaxTemperature": "1", - "DefaultTemperature": "1", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": null, - "ai_prefix": "AI", - "human_prefix": "Human", - "output": null - }, - "MaxPromptSize": 480000, - "MaxChatMessageSize": 480000 -} \ No newline at end of file diff --git a/source/model-info/chat-bedrock-amazon-nova-lite-v1.json b/source/model-info/chat-bedrock-amazon-nova-lite-v1.json deleted file mode 100644 index db070490..00000000 --- a/source/model-info/chat-bedrock-amazon-nova-lite-v1.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "UseCase": "Chat", - "ModelProviderName": "Bedrock", - "ModelName": "amazon.nova-lite-v1:0", - "DisplayName": "Nova Lite", - "Description": "Nova Lite is a is a multimodal understanding foundation model. It is multilingual and can reason over text, images and videos.", - "AllowsStreaming": true, - "Prompt": "You are a friendly AI assistant that is helpful, honest, and harmless.", - "MaxTemperature": "1", - "DefaultTemperature": "0.5", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": null, - "ai_prefix": "Bot", - "human_prefix": "User", - "output": null - }, - "MaxPromptSize": 562500, - "MaxChatMessageSize": 562500 -} \ No newline at end of file diff --git a/source/model-info/chat-bedrock-amazon-nova-micro-v1.json b/source/model-info/chat-bedrock-amazon-nova-micro-v1.json deleted file mode 100644 index fe771500..00000000 --- a/source/model-info/chat-bedrock-amazon-nova-micro-v1.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "UseCase": "Chat", - "ModelProviderName": "Bedrock", - "ModelName": "amazon.nova-micro-v1:0", - "DisplayName": "Nova Micro", - "Description": "Nova Micro is a text - text understanding foundation model. It is multilingual and can reason over text.", - "AllowsStreaming": true, - "Prompt": "You are a friendly AI assistant that is helpful, honest, and harmless.", - "MaxTemperature": "1", - "DefaultTemperature": "0.5", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": null, - "ai_prefix": "Bot", - "human_prefix": "User", - "output": null - }, - "MaxPromptSize": 240000, - "MaxChatMessageSize": 240000 -} \ No newline at end of file diff --git a/source/model-info/chat-bedrock-amazon-nova-pro-v1.json b/source/model-info/chat-bedrock-amazon-nova-pro-v1.json deleted file mode 100644 index 0fb5803a..00000000 --- a/source/model-info/chat-bedrock-amazon-nova-pro-v1.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "UseCase": "Chat", - "ModelProviderName": "Bedrock", - "ModelName": "amazon.nova-pro-v1:0", - "DisplayName": "Nova Pro", - "Description": "Nova Pro is a multimodal understanding foundation model. It is multilingual and can reason over text, images and videos.", - "AllowsStreaming": true, - "Prompt": "You are a friendly AI assistant that is helpful, honest, and harmless.", - "MaxTemperature": "1", - "DefaultTemperature": "0.5", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": null, - "ai_prefix": "Bot", - "human_prefix": "User", - "output": null - }, - "MaxPromptSize": 562500, - "MaxChatMessageSize": 562500 -} \ No newline at end of file diff --git a/source/model-info/chat-bedrock-anthropic-claude-v3-haiku-v1.json b/source/model-info/chat-bedrock-anthropic-claude-v3-haiku-v1.json deleted file mode 100644 index 0be695f9..00000000 --- a/source/model-info/chat-bedrock-anthropic-claude-v3-haiku-v1.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "UseCase": "Chat", - "ModelProviderName": "Bedrock", - "ModelName": "anthropic.claude-3-haiku-20240307-v1:0", - "DisplayName": "Claude 3 Haiku", - "Description": "Claude 3 Haiku is Anthropic's fastest, most compact model for near-instant responsiveness. It answers simple queries and requests with speed. Customers will be able to build seamless AI experiences that mimic human interactions. Claude 3 Haiku can process images and return text outputs, and features a 200K context window.", - "AllowsStreaming": true, - "Prompt": "You are a friendly AI assistant that is helpful, honest, and harmless.", - "MaxTemperature": "1", - "DefaultTemperature": "1", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": null, - "ai_prefix": "A", - "human_prefix": "H", - "output": null - }, - "MaxPromptSize": 375000, - "MaxChatMessageSize": 375000 -} \ No newline at end of file diff --git a/source/model-info/chat-bedrock-anthropic-claude-v3.5-sonnet-v1.json b/source/model-info/chat-bedrock-anthropic-claude-v3.5-sonnet-v1.json deleted file mode 100644 index d813be2f..00000000 --- a/source/model-info/chat-bedrock-anthropic-claude-v3.5-sonnet-v1.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "UseCase": "Chat", - "ModelProviderName": "Bedrock", - "ModelName": "anthropic.claude-3-5-sonnet-20240620-v1:0", - "DisplayName": "Claude 3.5 Sonnet", - "Description": "Claude 3.5 Sonnet raises the industry bar for intelligence, outperforming competitor models and Claude 3 Opus on a wide range of evaluations, with the speed and cost of our mid-tier model, Claude 3 Sonnet.", - "AllowsStreaming": true, - "Prompt": "You are a friendly AI assistant that is helpful, honest, and harmless.", - "MaxTemperature": "1", - "DefaultTemperature": "1", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": null, - "ai_prefix": "A", - "human_prefix": "H", - "output": null - }, - "MaxPromptSize": 375000, - "MaxChatMessageSize": 375000 -} \ No newline at end of file diff --git a/source/model-info/chat-bedrock-cohere-command-r-plus-v1.json b/source/model-info/chat-bedrock-cohere-command-r-plus-v1.json deleted file mode 100644 index 4205c1bb..00000000 --- a/source/model-info/chat-bedrock-cohere-command-r-plus-v1.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "UseCase": "Chat", - "ModelProviderName": "Bedrock", - "ModelName": "cohere.command-r-plus-v1:0", - "DisplayName": "Command R+", - "Description": "Command R+ is a highly performant generative language model optimized for large scale production workloads.", - "AllowsStreaming": false, - "Prompt": "You are a friendly AI assistant that is helpful, honest, and harmless.", - "MaxTemperature": "1", - "DefaultTemperature": "0.3", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": null, - "ai_prefix": "AI", - "human_prefix": "Human", - "output": null - }, - "MaxPromptSize": 240000, - "MaxChatMessageSize": 240000 -} \ No newline at end of file diff --git a/source/model-info/chat-bedrock-cohere-command-r-v1.json b/source/model-info/chat-bedrock-cohere-command-r-v1.json deleted file mode 100644 index e6c463ba..00000000 --- a/source/model-info/chat-bedrock-cohere-command-r-v1.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "UseCase": "Chat", - "ModelProviderName": "Bedrock", - "ModelName": "cohere.command-r-v1:0", - "DisplayName": "Command R", - "Description": "Command R is a generative language model optimized for long-context tasks and large scale production workloads.", - "AllowsStreaming": false, - "Prompt": "You are a friendly AI assistant that is helpful, honest, and harmless.", - "MaxTemperature": "1", - "DefaultTemperature": "0.3", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": null, - "ai_prefix": "AI", - "human_prefix": "Human", - "output": null - }, - "MaxPromptSize": 240000, - "MaxChatMessageSize": 240000 -} \ No newline at end of file diff --git a/source/model-info/chat-bedrock-llama3-70b-instruct-v1.json b/source/model-info/chat-bedrock-llama3-70b-instruct-v1.json deleted file mode 100644 index 464b954e..00000000 --- a/source/model-info/chat-bedrock-llama3-70b-instruct-v1.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "UseCase": "Chat", - "ModelProviderName": "Bedrock", - "ModelName": "meta.llama3-70b-instruct-v1:0", - "DisplayName": "Llama 3 70B Instruct", - "Description": "Meta Llama 3 is an accessible, open large language model (LLM) designed for developers, researchers, and businesses to build, experiment, and responsibly scale their generative AI ideas. Part of a foundational system, it serves as a bedrock for innovation in the global community. Ideal for content creation, conversational AI, language understanding, R&D, and Enterprise applications.", - "AllowsStreaming": true, - "Prompt": "You are a friendly AI assistant that is helpful, honest, and harmless.", - "MaxTemperature": "1", - "DefaultTemperature": "0.5", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": null, - "ai_prefix": "AI", - "human_prefix": "Human", - "output": null - }, - "MaxPromptSize": 15000, - "MaxChatMessageSize": 15000 -} \ No newline at end of file diff --git a/source/model-info/chat-bedrock-llama3-8b-instruct-v1.json b/source/model-info/chat-bedrock-llama3-8b-instruct-v1.json deleted file mode 100644 index 9c942dac..00000000 --- a/source/model-info/chat-bedrock-llama3-8b-instruct-v1.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "UseCase": "Chat", - "ModelProviderName": "Bedrock", - "ModelName": "meta.llama3-8b-instruct-v1:0", - "DisplayName": "Llama 3 8B Instruct", - "Description": "Meta Llama 3 is an accessible, open large language model (LLM) designed for developers, researchers, and businesses to build, experiment, and responsibly scale their generative AI ideas. Part of a foundational system, it serves as a bedrock for innovation in the global community. Ideal for limited computational power and resources, edge devices, and faster training times.", - "AllowsStreaming": true, - "Prompt": "You are a friendly AI assistant that is helpful, honest, and harmless.", - "MaxTemperature": "1", - "DefaultTemperature": "0.5", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": null, - "ai_prefix": "AI", - "human_prefix": "Human", - "output": null - }, - "MaxPromptSize": 15000, - "MaxChatMessageSize": 15000 -} \ No newline at end of file diff --git a/source/model-info/ragchat-bedrock-ai21.jamba-1-5-large-v1.json b/source/model-info/ragchat-bedrock-ai21.jamba-1-5-large-v1.json deleted file mode 100644 index c3f6f076..00000000 --- a/source/model-info/ragchat-bedrock-ai21.jamba-1-5-large-v1.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "UseCase": "RAGChat", - "ModelProviderName": "Bedrock", - "ModelName": "ai21.jamba-1-5-large-v1:0", - "DisplayName": "Jamba 1.5 Large", - "Description": "The Jamba 1.5 Model Family has a 256K token effective context window, one of the largest on the market. Jamba 1.5 models focus on speed and efficiency, delivering up to 2.5x faster inference than leading models of comparable size. Jamba supports function calling/ tool use, structured output (JSON) and documents API.", - "AllowsStreaming": true, - "Prompt": "References:\n{context}\n\nCarefully read the reference passages above and try to truthfully answer the Human's question. If the answer is not explicitly contained within the references, respond with \"Sorry I don't know\". It is very important that you respond \"Sorry I don't know\" if the answer is not found within the references above. Do not make use of any information outside of the references. Try to be brief and write a response in no more than 5 complete sentences.", - "DisambiguationPrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\nChat History:\n{history}\nFollow Up Input: {input}\nStandalone question:", - "MaxTemperature": "1", - "DefaultTemperature": "1", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": "context", - "ai_prefix": "AI", - "human_prefix": "Human", - "output": null - }, - "MaxPromptSize": 480000, - "MaxChatMessageSize": 480000 -} \ No newline at end of file diff --git a/source/model-info/ragchat-bedrock-ai21.jamba-1-5-mini-v1.json b/source/model-info/ragchat-bedrock-ai21.jamba-1-5-mini-v1.json deleted file mode 100644 index 034856e7..00000000 --- a/source/model-info/ragchat-bedrock-ai21.jamba-1-5-mini-v1.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "UseCase": "RAGChat", - "ModelProviderName": "Bedrock", - "ModelName": "ai21.jamba-1-5-mini-v1:0", - "DisplayName": "Jamba 1.5 Mini", - "Description": "The Jamba 1.5 Model Family has a 256K token effective context window, one of the largest on the market. Jamba 1.5 models focus on speed and efficiency, delivering up to 2.5x faster inference than leading models of comparable size. Jamba supports function calling/ tool use, structured output (JSON) and documents API.", - "AllowsStreaming": true, - "Prompt": "References:\n{context}\n\nCarefully read the reference passages above and try to truthfully answer the Human's question. If the answer is not explicitly contained within the references, respond with \"Sorry I don't know\". It is very important that you respond \"Sorry I don't know\" if the answer is not found within the references above. Do not make use of any information outside of the references. Try to be brief and write a response in no more than 5 complete sentences.", - "DisambiguationPrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\nChat History:\n{history}\nFollow Up Input: {input}\nStandalone question:", - "MaxTemperature": "1", - "DefaultTemperature": "1", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": "context", - "ai_prefix": "AI", - "human_prefix": "Human", - "output": null - }, - "MaxPromptSize": 480000, - "MaxChatMessageSize": 480000 -} \ No newline at end of file diff --git a/source/model-info/ragchat-bedrock-amazon-nova-lite-v1.json b/source/model-info/ragchat-bedrock-amazon-nova-lite-v1.json deleted file mode 100644 index 3c82a05d..00000000 --- a/source/model-info/ragchat-bedrock-amazon-nova-lite-v1.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "UseCase": "RAGChat", - "ModelProviderName": "Bedrock", - "ModelName": "amazon.nova-lite-v1:0", - "DisplayName": "Nova Lite", - "Description": "Nova Lite is a is a multimodal understanding foundation model. It is multilingual and can reason over text, images and videos.", - "AllowsStreaming": true, - "Prompt": "References:\n{context}\n\nCarefully read the reference passages above and try to truthfully answer the Human's question. If the answer is not explicitly contained within the references, respond with \"Sorry I don't know\". It is very important that you respond \"Sorry I don't know\" if the answer is not found within the references above. Do not make use of any information outside of the references. Try to be brief and write a response in no more than 5 complete sentences.", - "DisambiguationPrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\nChat History:\n{history}\nFollow Up Input: {input}\nStandalone question:", - "MaxTemperature": "1", - "DefaultTemperature": "0.5", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": "context", - "ai_prefix": "Bot", - "human_prefix": "User", - "output": "answer" - }, - "MaxPromptSize": 562500, - "MaxChatMessageSize": 562500 -} \ No newline at end of file diff --git a/source/model-info/ragchat-bedrock-amazon-nova-micro-v1.json b/source/model-info/ragchat-bedrock-amazon-nova-micro-v1.json deleted file mode 100644 index f4af540e..00000000 --- a/source/model-info/ragchat-bedrock-amazon-nova-micro-v1.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "UseCase": "RAGChat", - "ModelProviderName": "Bedrock", - "ModelName": "amazon.nova-micro-v1:0", - "DisplayName": "Nova Micro", - "Description": "Nova Micro is a text - text understanding foundation model. It is multilingual and can reason over text.", - "AllowsStreaming": true, - "Prompt": "References:\n{context}\n\nCarefully read the reference passages above and try to truthfully answer the Human's question. If the answer is not explicitly contained within the references, respond with \"Sorry I don't know\". It is very important that you respond \"Sorry I don't know\" if the answer is not found within the references above. Do not make use of any information outside of the references. Try to be brief and write a response in no more than 5 complete sentences.", - "DisambiguationPrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\nChat History:\n{history}\nFollow Up Input: {input}\nStandalone question:", - "MaxTemperature": "1", - "DefaultTemperature": "0.5", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": "context", - "ai_prefix": "Bot", - "human_prefix": "User", - "output": "answer" - }, - "MaxPromptSize": 240000, - "MaxChatMessageSize": 240000 -} \ No newline at end of file diff --git a/source/model-info/ragchat-bedrock-amazon-nova-pro-v1.json b/source/model-info/ragchat-bedrock-amazon-nova-pro-v1.json deleted file mode 100644 index 0340afa7..00000000 --- a/source/model-info/ragchat-bedrock-amazon-nova-pro-v1.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "UseCase": "RAGChat", - "ModelProviderName": "Bedrock", - "ModelName": "amazon.nova-pro-v1:0", - "DisplayName": "Nova Pro", - "Description": "Nova Pro is a multimodal understanding foundation model. It is multilingual and can reason over text, images and videos.", - "AllowsStreaming": true, - "Prompt": "References:\n{context}\n\nCarefully read the reference passages above and try to truthfully answer the Human's question. If the answer is not explicitly contained within the references, respond with \"Sorry I don't know\". It is very important that you respond \"Sorry I don't know\" if the answer is not found within the references above. Do not make use of any information outside of the references. Try to be brief and write a response in no more than 5 complete sentences.", - "DisambiguationPrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\nChat History:\n{history}\nFollow Up Input: {input}\nStandalone question:", - "MaxTemperature": "1", - "DefaultTemperature": "0.5", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": "context", - "ai_prefix": "Bot", - "human_prefix": "User", - "output": "answer" - }, - "MaxPromptSize": 562500, - "MaxChatMessageSize": 562500 -} \ No newline at end of file diff --git a/source/model-info/ragchat-bedrock-anthropic-claude-v3-haiku-v1.json b/source/model-info/ragchat-bedrock-anthropic-claude-v3-haiku-v1.json deleted file mode 100644 index 17c9df10..00000000 --- a/source/model-info/ragchat-bedrock-anthropic-claude-v3-haiku-v1.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "UseCase": "RAGChat", - "ModelProviderName": "Bedrock", - "ModelName": "anthropic.claude-3-haiku-20240307-v1:0", - "DisplayName": "Claude 3 Haiku", - "Description": "Claude 3 Haiku is Anthropic's fastest, most compact model for near-instant responsiveness. It answers simple queries and requests with speed. Customers will be able to build seamless AI experiences that mimic human interactions. Claude 3 Haiku can process images and return text outputs, and features a 200K context window.", - "AllowsStreaming": true, - "Prompt": "\n\nYou are a friendly AI assistant. You provide answers only based on the provided reference passages.\n\nHere are reference passages in tags:\n\n{context}\n\n\nCarefully read the references above and thoughtfully answer the question below. If the answer can not be extracted from the references, then respond with \"Sorry I don't know\". It is very important that you only use information found within the references to answer. Try to be brief in your response.", - "DisambiguationPrompt": "\n\nHuman: Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat history:\n{history}\n\nFollow up question: {input}\n\nAssistant: Standalone question:", - "MaxTemperature": "1", - "DefaultTemperature": "1", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": "context", - "ai_prefix": "A", - "human_prefix": "H", - "output": "answer" - }, - "MaxPromptSize": 375000, - "MaxChatMessageSize": 375000 -} \ No newline at end of file diff --git a/source/model-info/ragchat-bedrock-anthropic-claude-v3.5-sonnet-v1.json b/source/model-info/ragchat-bedrock-anthropic-claude-v3.5-sonnet-v1.json deleted file mode 100644 index 3b285b7e..00000000 --- a/source/model-info/ragchat-bedrock-anthropic-claude-v3.5-sonnet-v1.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "UseCase": "RAGChat", - "ModelProviderName": "Bedrock", - "ModelName": "anthropic.claude-3-5-sonnet-20240620-v1:0", - "DisplayName": "Claude 3.5 Sonnet", - "Description": "Claude 3.5 Sonnet raises the industry bar for intelligence, outperforming competitor models and Claude 3 Opus on a wide range of evaluations, with the speed and cost of our mid-tier model, Claude 3 Sonnet.", - "AllowsStreaming": true, - "Prompt": "\n\nYou are a friendly AI assistant. You provide answers only based on the provided reference passages.\n\nHere are reference passages in tags:\n\n{context}\n\n\nCarefully read the references above and thoughtfully answer the question below. If the answer can not be extracted from the references, then respond with \"Sorry I don't know\". It is very important that you only use information found within the references to answer. Try to be brief in your response.", - "DisambiguationPrompt": "\n\nHuman: Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat history:\n{history}\n\nFollow up question: {input}\n\nAssistant: Standalone question:", - "MaxTemperature": "1", - "DefaultTemperature": "1", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": "context", - "ai_prefix": "A", - "human_prefix": "H", - "output": "answer" - }, - "MaxPromptSize": 375000, - "MaxChatMessageSize": 375000 -} \ No newline at end of file diff --git a/source/model-info/ragchat-bedrock-cohere-command-r-plus-v1.json b/source/model-info/ragchat-bedrock-cohere-command-r-plus-v1.json deleted file mode 100644 index b4582cd8..00000000 --- a/source/model-info/ragchat-bedrock-cohere-command-r-plus-v1.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "UseCase": "RAGChat", - "ModelProviderName": "Bedrock", - "ModelName": "cohere.command-r-plus-v1:0", - "DisplayName": "Command R+", - "Description": "Command R+ is a highly performant generative language model optimized for large scale production workloads.", - "AllowsStreaming": false, - "Prompt": "References:\n{context}\n\nCarefully read the reference passages above and try to truthfully answer the Human's question. If the answer is not explicitly contained within the references, respond with \"Sorry I don't know\". It is very important that you respond \"Sorry I don't know\" if the answer is not found within the references above. Do not make use of any information outside of the references. Try to be brief and write a response in no more than 5 complete sentences.", - "DisambiguationPrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\nChat History:\n{history}\nFollow Up Input: {input}\nStandalone question:", - "MaxTemperature": "1", - "DefaultTemperature": "0.3", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": "context", - "ai_prefix": "AI", - "human_prefix": "Human", - "output": "answer" - }, - "MaxPromptSize": 240000, - "MaxChatMessageSize": 240000 -} \ No newline at end of file diff --git a/source/model-info/ragchat-bedrock-cohere-command-r-v1.json b/source/model-info/ragchat-bedrock-cohere-command-r-v1.json deleted file mode 100644 index 314911b6..00000000 --- a/source/model-info/ragchat-bedrock-cohere-command-r-v1.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "UseCase": "RAGChat", - "ModelProviderName": "Bedrock", - "ModelName": "cohere.command-r-v1:0", - "DisplayName": "Command R", - "Description": "Command R is a generative language model optimized for long-context tasks and large scale production workloads.", - "AllowsStreaming": false, - "Prompt": "References:\n{context}\n\nCarefully read the reference passages above and try to truthfully answer the Human's question. If the answer is not explicitly contained within the references, respond with \"Sorry I don't know\". It is very important that you respond \"Sorry I don't know\" if the answer is not found within the references above. Do not make use of any information outside of the references. Try to be brief and write a response in no more than 5 complete sentences.", - "DisambiguationPrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\nChat History:\n{history}\nFollow Up Input: {input}\nStandalone question:", - "MaxTemperature": "1", - "DefaultTemperature": "0.3", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": "context", - "ai_prefix": "AI", - "human_prefix": "Human", - "output": "answer" - }, - "MaxPromptSize": 240000, - "MaxChatMessageSize": 240000 -} \ No newline at end of file diff --git a/source/model-info/ragchat-bedrock-llama3-70b-instruct-v1.json b/source/model-info/ragchat-bedrock-llama3-70b-instruct-v1.json deleted file mode 100644 index d583a8e2..00000000 --- a/source/model-info/ragchat-bedrock-llama3-70b-instruct-v1.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "UseCase": "RAGChat", - "ModelProviderName": "Bedrock", - "ModelName": "meta.llama3-70b-instruct-v1:0", - "DisplayName": "Llama 3 70B Instruct", - "Description": "Meta Llama 3 is an accessible, open large language model (LLM) designed for developers, researchers, and businesses to build, experiment, and responsibly scale their generative AI ideas. Part of a foundational system, it serves as a bedrock for innovation in the global community. Ideal for content creation, conversational AI, language understanding, R&D, and Enterprise applications.", - "AllowsStreaming": true, - "Prompt": "References:\n{context}\n\nCarefully read the reference passages above and try to truthfully answer the Human's question. If the answer is not explicitly contained within the references, respond with \"Sorry I don't know\". It is very important that you respond \"Sorry I don't know\" if the answer is not found within the references above. Do not make use of any information outside of the references. Try to be brief and write a response in no more than 5 complete sentences.", - "DisambiguationPrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\nChat History:\n{history}\nFollow Up Input: {input}\nStandalone question:", - "MaxTemperature": "1", - "DefaultTemperature": "0.5", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": "context", - "ai_prefix": "AI", - "human_prefix": "Human", - "output": "answer" - }, - "MaxPromptSize": 15000, - "MaxChatMessageSize": 15000 -} \ No newline at end of file diff --git a/source/model-info/ragchat-bedrock-llama3-8b-instruct-v1.json b/source/model-info/ragchat-bedrock-llama3-8b-instruct-v1.json deleted file mode 100644 index c0aa34f0..00000000 --- a/source/model-info/ragchat-bedrock-llama3-8b-instruct-v1.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "UseCase": "RAGChat", - "ModelProviderName": "Bedrock", - "ModelName": "meta.llama3-8b-instruct-v1:0", - "DisplayName": "Llama 3 8B Instruct", - "Description": "Meta Llama 3 is an accessible, open large language model (LLM) designed for developers, researchers, and businesses to build, experiment, and responsibly scale their generative AI ideas. Part of a foundational system, it serves as a bedrock for innovation in the global community. Ideal for limited computational power and resources, edge devices, and faster training times.", - "AllowsStreaming": true, - "Prompt": "References:\n{context}\n\nCarefully read the reference passages above and try to truthfully answer the Human's question. If the answer is not explicitly contained within the references, respond with \"Sorry I don't know\". It is very important that you respond \"Sorry I don't know\" if the answer is not found within the references above. Do not make use of any information outside of the references. Try to be brief and write a response in no more than 5 complete sentences.", - "DisambiguationPrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\nChat History:\n{history}\nFollow Up Input: {input}\nStandalone question:", - "MaxTemperature": "1", - "DefaultTemperature": "0.5", - "MinTemperature": "0", - "DefaultStopSequences": [], - "MemoryConfig": { - "history": "history", - "input": "input", - "context": "context", - "ai_prefix": "AI", - "human_prefix": "Human", - "output": "answer" - }, - "MaxPromptSize": 15000, - "MaxChatMessageSize": 15000 -} \ No newline at end of file diff --git a/source/pre-build-ecr-images.sh b/source/pre-build-ecr-images.sh new file mode 100755 index 00000000..7e6b7b24 --- /dev/null +++ b/source/pre-build-ecr-images.sh @@ -0,0 +1,504 @@ +#!/bin/bash +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# Pre-build ECR images for local development +# This script builds ECR images locally when DIST_OUTPUT_BUCKET is not set (local deployments) +# Images are pushed to ECR during the asset staging phase (stage-assets.sh) + +[ "$DEBUG" == 'true' ] && set -x +set -e + +echo "=== Pre-building ECR Images ===" + +# Function to check prerequisites +check_prerequisites() { + local missing_tools=() + + # Check for required tools + if ! command -v docker >/dev/null 2>&1; then + missing_tools+=("docker") + fi + + if ! command -v aws >/dev/null 2>&1; then + missing_tools+=("aws") + fi + + if ! command -v yq >/dev/null 2>&1; then + missing_tools+=("yq") + fi + + if [ ${#missing_tools[@]} -gt 0 ]; then + echo "❌ Missing required tools:" + for tool in "${missing_tools[@]}"; do + echo " - $tool" + done + echo "" + echo "Please install the missing tools and try again." + echo "Installation instructions:" + echo " - Docker: https://docs.docker.com/get-docker/" + echo " - AWS CLI: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html" + echo " - yq: brew install yq (macOS) or https://github.com/mikefarah/yq#install" + exit 1 + fi + + echo "✅ All required tools are available (docker, aws, yq)" +} + +# Function to check AWS credentials +check_aws_credentials() { + echo "Checking AWS credentials..." + + # First check if AWS CLI is configured at all + if ! aws configure list >/dev/null 2>&1; then + echo "" + echo "❌ AWS CLI is not configured" + echo "" + echo "Please configure AWS credentials using one of the following methods:" + echo "" + echo "1. AWS CLI configure:" + echo " aws configure" + echo "" + echo "2. Environment variables:" + echo " export AWS_ACCESS_KEY_ID=your-access-key" + echo " export AWS_SECRET_ACCESS_KEY=your-secret-key" + echo " export AWS_SESSION_TOKEN=your-session-token # (if using temporary credentials)" + echo "" + echo "3. AWS SSO login:" + echo " aws sso login --profile your-profile" + echo "" + echo "4. AWS profiles:" + echo " export AWS_PROFILE=your-profile-name" + echo "" + echo "For more information, see:" + echo "https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html" + echo "" + return 1 + fi + + # Try to get AWS account ID to verify credentials are active + local test_output + test_output=$(aws sts get-caller-identity --query Account --output text 2>&1) + local aws_exit_code=$? + + if [ $aws_exit_code -ne 0 ] || [ -z "$test_output" ] || [[ "$test_output" == *"error"* ]] || [[ "$test_output" == *"Unable to locate credentials"* ]]; then + echo "" + echo "❌ AWS credentials are not active or valid" + echo "" + echo "Error details:" + echo "$test_output" + echo "" + echo "Common solutions:" + echo "" + echo "1. If using AWS SSO, ensure you're logged in:" + echo " aws sso login --profile your-profile" + echo "" + echo "2. If using temporary credentials, ensure they haven't expired:" + echo " aws sts get-caller-identity" + echo "" + echo "3. If using AWS profiles, ensure the correct profile is set:" + echo " export AWS_PROFILE=your-profile-name" + echo "" + echo "4. If using environment variables, ensure they are set and valid:" + echo " echo \$AWS_ACCESS_KEY_ID" + echo " echo \$AWS_SECRET_ACCESS_KEY" + echo "" + echo "5. Test your credentials:" + echo " aws sts get-caller-identity" + echo "" + echo "For more troubleshooting, see:" + echo "https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-troubleshooting.html" + echo "" + return 1 + fi + + echo "✅ AWS credentials are active" + echo "AWS Account ID: $test_output" + + # Set the account ID for later use + AWS_ACCOUNT_ID="$test_output" + return 0 +} + + +# Check if this is a local deployment (no DIST_OUTPUT_BUCKET) +if [ -z "$DIST_OUTPUT_BUCKET" ]; then + # Check prerequisites first + check_prerequisites + + # Check AWS credentials early to provide clear error messages + if ! check_aws_credentials; then + exit 1 + fi + + echo "Local deployment detected - building ECR images locally" + + # Store current directory and determine script location + execution_dir="$PWD" + script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + echo "Execution directory: $execution_dir" + echo "Script location: $script_dir" + + # Define paths relative to script location (source/) + project_root="$(dirname "$script_dir")" # One level up from source/ + cdk_json_path="$script_dir/infrastructure/cdk.json" # source/infrastructure/cdk.json + solution_manifest_path="$project_root/solution-manifest.yaml" # project_root/solution-manifest.yaml + deployment_dir="$project_root/deployment" # project_root/deployment + + # Get version from environment or cdk.json + if [ -n "$VERSION" ]; then + IMAGE_TAG="$VERSION" + echo "Using VERSION environment variable: $IMAGE_TAG" + else + # Use script-relative path to cdk.json + if [ -f "$cdk_json_path" ]; then + IMAGE_TAG=$(node -p "require('$cdk_json_path').context.solution_version" 2>/dev/null || echo "v4.0.0") + echo "Using version from $cdk_json_path: $IMAGE_TAG" + else + IMAGE_TAG="v4.0.0" + echo "Using fallback version (cdk.json not found at $cdk_json_path): $IMAGE_TAG" + fi + fi + + # Sanitize version tag (remove double 'v' prefix if present) + ORIGINAL_TAG="$IMAGE_TAG" + IMAGE_TAG=$(echo "$IMAGE_TAG" | sed 's/^vv/v/') + if [ "$ORIGINAL_TAG" != "$IMAGE_TAG" ]; then + echo "Sanitized version tag from '$ORIGINAL_TAG' to '$IMAGE_TAG'" + fi + + # Add local suffix for local deployments if not already present + if [[ "$IMAGE_TAG" != *"-local" ]]; then + IMAGE_TAG="${IMAGE_TAG}-local" + echo "Added local suffix to version tag: $IMAGE_TAG" + fi + + echo "Final image tag: $IMAGE_TAG" + + # AWS credentials already checked and AWS_ACCOUNT_ID is set + + # Get AWS region from environment or AWS CLI config + AWS_REGION="${AWS_REGION:-$(aws configure get region 2>/dev/null || echo 'us-east-1')}" + echo "AWS Region: $AWS_REGION" + + # Function to validate requirements.txt before build + validate_requirements() { + local image_dir="$1" + local image_name="$2" + + echo "Validating requirements.txt for $image_name..." + + # Check if pyproject.toml exists (indicates UV workflow) + if [ -f "$image_dir/pyproject.toml" ]; then + echo "✅ Found pyproject.toml - UV workflow available" + + # Check if requirements.txt exists + if [ ! -f "$image_dir/requirements.txt" ]; then + echo "⚠️ requirements.txt not found - will be generated during build" + return 0 + fi + + # Check if requirements.txt is newer than pyproject.toml + if [ "$image_dir/pyproject.toml" -nt "$image_dir/requirements.txt" ]; then + echo "⚠️ pyproject.toml is newer than requirements.txt - will be regenerated during build" + return 0 + fi + + echo "✅ requirements.txt is current" + else + # Traditional pip workflow - requirements.txt must exist + if [ ! -f "$image_dir/requirements.txt" ]; then + echo "❌ requirements.txt not found and no pyproject.toml available" + return 1 + fi + echo "✅ Found requirements.txt for traditional pip workflow" + fi + + return 0 + } + + # List of common directories to copy into each image directory before build + # Add more common directories here as needed + COMMON_DIRS=( + "gaab-strands-common" + ) + + # Function to copy common directories into image directory + copy_common_dirs() { + local image_dir="$1" + local image_name="$2" + + echo "Copying common directories into $image_name..." + + for common_dir in "${COMMON_DIRS[@]}"; do + local source_path="$deployment_dir/ecr/$common_dir" + local dest_path="$image_dir/$common_dir" + + # Check if common directory exists + if [ ! -d "$source_path" ]; then + echo "⚠️ Common directory not found: $source_path - skipping" + continue + fi + + # Remove existing common directory if present + if [ -d "$dest_path" ]; then + echo " Removing existing $common_dir directory..." + rm -rf "$dest_path" + fi + + # Copy common directory + echo " Copying $common_dir..." + cp -r "$source_path" "$dest_path" + if [ $? -ne 0 ]; then + echo "❌ Failed to copy $common_dir to $image_dir" + return 1 + fi + + echo " ✅ Copied $common_dir" + done + + return 0 + } + + # Function to build ECR image locally (no push) + build_ecr_image_local() { + local image_dir="$1" + local image_name="$2" + + echo "" + echo "=== Building $image_name locally ===" + + # Check if image directory exists + if [ ! -d "$image_dir" ]; then + echo "⚠️ Image directory not found: $image_dir - skipping $image_name" + return 0 + fi + + # Copy common directories before build + if ! copy_common_dirs "$image_dir" "$image_name"; then + echo "❌ Failed to copy common directories for $image_name" + return 1 + fi + + # Navigate to image directory + cd "$image_dir" + + # Validate requirements before build + if ! validate_requirements "$image_dir" "$image_name"; then + echo "❌ Requirements validation failed for $image_name" + cd "$execution_dir" + return 1 + fi + + # Check if build script exists + if [ ! -f "scripts/build-container.sh" ]; then + echo "❌ Build script not found: $image_dir/scripts/build-container.sh" + cd "$execution_dir" + return 1 + fi + + # Build the container locally only + echo "Building container locally..." + ./scripts/build-container.sh + if [ $? -ne 0 ]; then + echo "❌ Failed to build $image_name container" + cd "$execution_dir" + return 1 + fi + + echo "✅ Successfully built $image_name:latest locally" + echo "ℹ️ Image will be pushed to ECR during asset staging phase" + + # Return to execution directory + cd "$execution_dir" + return 0 + } + + # Function to get container images from solution manifest + get_container_images() { + # Check if yq is available for YAML parsing + if ! command -v yq >/dev/null 2>&1; then + echo "❌ Error: yq is not installed or not in PATH" + echo "" + echo "yq is required to parse the solution-manifest.yaml file." + echo "Please install yq using one of the following methods:" + echo "" + echo " macOS (Homebrew):" + echo " brew install yq" + echo "" + echo " macOS (MacPorts):" + echo " sudo port install yq" + echo "" + echo " Ubuntu/Debian:" + echo " sudo apt install yq" + echo "" + echo " CentOS/RHEL/Fedora:" + echo " sudo yum install yq" + echo "" + echo " Manual installation:" + echo " https://github.com/mikefarah/yq#install" + echo "" + echo "After installation, please run this script again." + exit 1 + fi + + # Use script-relative path to solution manifest + local manifest_path="$solution_manifest_path" + if [ ! -f "$manifest_path" ]; then + echo "❌ Error: solution-manifest.yaml not found at $manifest_path" + echo "Script directory: $script_dir" + echo "Project root: $project_root" + echo "Please ensure the solution-manifest.yaml file exists at the project root" + exit 1 + fi + if [ ! -f "$manifest_path" ]; then + echo "❌ Error: solution-manifest.yaml not found at $manifest_path" + echo "Current directory: $(pwd)" + echo "Expected location: $(realpath $manifest_path 2>/dev/null || echo $manifest_path)" + echo "Please ensure the solution-manifest.yaml file exists at the project root" + exit 1 + fi + + + + # Use yq to parse YAML and extract container images with timeout + local images + local yq_exit_code + + # Add timeout to prevent hanging + if command -v timeout >/dev/null 2>&1; then + images=$(timeout 10 yq eval '.container_images[]' "$manifest_path" 2>&1) + yq_exit_code=$? + else + images=$(yq eval '.container_images[]' "$manifest_path" 2>&1) + yq_exit_code=$? + fi + + + + if [ $yq_exit_code -ne 0 ]; then + echo "❌ Error: Failed to parse solution-manifest.yaml with yq" + echo "yq command: yq eval '.container_images[]' $manifest_path" + echo "yq output: $images" + echo "Please check that the file is valid YAML and contains a 'container_images' section" + + # Show the relevant section of the YAML file for debugging + echo "" + echo "Relevant section of solution-manifest.yaml:" + grep -A 5 -B 2 "container_images" "$manifest_path" || echo "container_images section not found" + exit 1 + fi + + + echo "$images" + } + + # Get container images from solution manifest + echo "Parsing container images from solution-manifest.yaml..." + container_images=$(get_container_images) + + if [ -z "$container_images" ]; then + echo "❌ No container images found in solution manifest" + echo "Expected to find container_images section in solution-manifest.yaml" + exit 1 + fi + + echo "" + echo "Container images to build (from solution-manifest.yaml):" + echo "$container_images" | while read -r image; do + if [ -n "$image" ]; then + echo " - $image" + fi + done + + # Build and push each container image + echo "" + echo "=== Starting Container Image Builds ===" + + overall_success=true + built_images=() + skipped_images=() + + while IFS= read -r image_name; do + if [ -n "$image_name" ]; then + echo "" + echo "Processing $image_name image..." + + # Use script-relative path to deployment directory + image_dir="$deployment_dir/ecr/$image_name" + + # Check if image directory exists + if [ ! -d "$image_dir" ]; then + echo "⚠️ Image directory not found: $image_dir - skipping $image_name" + skipped_images+=("$image_name") + continue + fi + + # Check if image directory exists + if [ ! -d "$image_dir" ]; then + echo "⚠️ Image directory not found: $image_dir - skipping $image_name" + skipped_images+=("$image_name") + continue + fi + + # Build the image locally + if build_ecr_image_local "$image_dir" "$image_name"; then + built_images+=("$image_name") + echo "✅ Successfully processed $image_name" + else + echo "❌ Failed to process $image_name" + overall_success=false + fi + fi + done <<< "$container_images" + + # Summary + echo "" + echo "=== ECR Image Build Summary ===" + + # Display built images + if [ ${#built_images[@]} -gt 0 ]; then + echo "Successfully built images locally:" + for image_name in "${built_images[@]}"; do + echo "✅ $image_name: BUILT LOCALLY" + echo " Local image: $image_name:latest" + echo " Will be pushed during asset staging to: $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$image_name:$IMAGE_TAG" + done + fi + + # Display skipped images + if [ ${#skipped_images[@]} -gt 0 ]; then + echo "" + echo "Skipped images (directories not found):" + for image_name in "${skipped_images[@]}"; do + echo "⚠️ $image_name: SKIPPED ($deployment_dir/ecr/$image_name not found)" + done + fi + + # Check if at least one image was built successfully + if [ ${#built_images[@]} -eq 0 ]; then + echo "" + echo "❌ No ECR images were built successfully - CDK deployment cannot proceed" + echo "Please check the error messages above and ensure:" + echo " 1. Docker is running and accessible" + echo " 2. Container image directories exist in $deployment_dir/ecr/" + echo " 3. Build scripts are executable and working" + exit 1 + fi + + echo "" + if [ "$overall_success" = true ]; then + echo "✅ ECR images built locally successfully" + echo "ℹ️ Images will be pushed to ECR during the asset staging phase" + echo "ℹ️ Run './stage-assets.sh' to push images to ECR and stage CloudFormation assets" + else + echo "⚠️ Some ECR images failed to build, but at least one succeeded" + echo "ℹ️ CDK synthesis can proceed with available images" + fi + +else + echo "Pipeline deployment detected (DIST_OUTPUT_BUCKET is set) - skipping local ECR image build" + echo "ECR images will be resolved via pull-through cache from aws-solutions public ECR repository" +fi + +echo "=== Pre-build ECR Images Complete ===" \ No newline at end of file diff --git a/source/scripts/v2_migration/pyproject.toml b/source/scripts/v2_migration/pyproject.toml index 5b1bd22c..dd8bfa70 100644 --- a/source/scripts/v2_migration/pyproject.toml +++ b/source/scripts/v2_migration/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gaab-v2-migration" -version = "3.0.7" +version = "4.0.0" authors = [ "Amazon Web Services" ] description = "Migration script to convert v1.X use cases to v2.X" packages = [ diff --git a/source/stage-assets.sh b/source/stage-assets.sh index 686b0d64..2df5f035 100755 --- a/source/stage-assets.sh +++ b/source/stage-assets.sh @@ -2,88 +2,651 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 +# CDK Asset Staging Script +# +# This script stages CDK assets (zip files and CloudFormation templates) to S3 for deployment. +# +# New Features: +# - Skip existing assets: Use --skip-existing to avoid re-uploading assets that already exist +# - Check old assets: Use --check-old-assets to analyze bucket for old duplicate assets +# - Templates are always uploaded and overwritten regardless of skip mode +# +# Usage Examples: +# ./stage-assets.sh # Normal staging (default) +# ./stage-assets.sh --skip-existing # Skip existing assets for faster staging +# ./stage-assets.sh --check-old-assets # Check for old duplicate assets only + [ "$DEBUG" == 'true' ] && set -x set -e echo "This script should be run from the 'source' folder" -root_folder=infrastructure/cdk.out -exclude_stack="DeploymentPlatformStack.assets.json" +# Configuration flags +SKIP_EXISTING_ASSETS=${SKIP_EXISTING_ASSETS:-false} +CHECK_OLD_ASSETS=${CHECK_OLD_ASSETS:-false} +ECR_ONLY=${ECR_ONLY:-false} -# CDK staging bucket -bucket_prefix="cdk-hnb659fds-assets-" -default_region=`aws configure get region` - -# Confirm the region -echo "The region to upload CDK artifacts to (default:$default_region)?" -read region -region="${region:=$default_region}" - -# Get the account id -aws_account_id=$(aws sts get-caller-identity --query "Account" --output text) - -bucket_name="${bucket_prefix}${aws_account_id}-${region}" -echo "All assets will be uploaded to ${bucket_name}" - -while true; do - read -p "Do you want to proceed? (y/n) " yn - case $yn in - [yY] ) echo -e "Proceeding to upload\n" - break;; - [nN] ) echo exiting; - exit;; - * ) echo invalid response;; +# Function to show usage +show_usage() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Stage CDK assets to S3 bucket for deployment" + echo "" + echo "Options:" + echo " --skip-existing Skip uploading assets that already exist in S3 (faster staging)" + echo " --check-old-assets Check for old duplicate assets (over 30 days old)" + echo " --ecr-only Skip CDK asset uploads and only push ECR images" + echo " --help, -h Show this help message" + echo "" + echo "Environment Variables:" + echo " SKIP_EXISTING_ASSETS=true Same as --skip-existing" + echo " CHECK_OLD_ASSETS=true Same as --check-old-assets" + echo " ECR_ONLY=true Same as --ecr-only" + echo "" + echo "Examples:" + echo " $0 # Normal staging (default behavior)" + echo " $0 --skip-existing # Skip existing assets for faster staging" + echo " $0 --check-old-assets # Check for old duplicate assets only" + echo " $0 --ecr-only # Skip CDK assets, only push ECR images" + echo "" +} + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --skip-existing) + SKIP_EXISTING_ASSETS=true + shift + ;; + --check-old-assets) + CHECK_OLD_ASSETS=true + shift + ;; + --ecr-only) + ECR_ONLY=true + shift + ;; + --help|-h) + show_usage + exit 0 + ;; + *) + echo "Unknown option: $1" + show_usage + exit 1 + ;; esac done +# Show current configuration +echo "" +echo "Configuration:" +echo " Skip existing assets: $SKIP_EXISTING_ASSETS" +echo " Check old assets only: $CHECK_OLD_ASSETS" +echo " ECR images only: $ECR_ONLY" +echo "" + +# Function to check if an S3 object exists +check_s3_object_exists() { + local bucket="$1" + local key="$2" + local region="$3" + + aws s3api head-object --bucket "$bucket" --key "$key" --region "$region" >/dev/null 2>&1 +} + +# Function to get S3 object last modified date +get_s3_object_age_days() { + local bucket="$1" + local key="$2" + local region="$3" + + local last_modified + last_modified=$(aws s3api head-object --bucket "$bucket" --key "$key" --region "$region" --query 'LastModified' --output text 2>/dev/null) + + if [ -n "$last_modified" ]; then + # Convert to epoch and calculate days difference + local last_modified_epoch + last_modified_epoch=$(date -d "$last_modified" +%s 2>/dev/null || date -j -f "%Y-%m-%dT%H:%M:%S" "${last_modified%.*}" +%s 2>/dev/null) + local current_epoch + current_epoch=$(date +%s) + local age_seconds=$((current_epoch - last_modified_epoch)) + local age_days=$((age_seconds / 86400)) + echo "$age_days" + else + echo "-1" + fi +} + +# Function to get container images from solution manifest +get_container_images() { + # Check if yq is available for YAML parsing + if ! command -v yq >/dev/null 2>&1; then + echo "❌ Error: yq is not installed or not in PATH" + echo "Please install yq to parse the solution manifest file:" + echo " - macOS: brew install yq" + echo " - Ubuntu/Debian: sudo apt install yq" + echo " - Other: https://github.com/mikefarah/yq#install" + exit 1 + fi + + # Use yq to parse YAML + if [ ! -f "../solution-manifest.yaml" ]; then + echo "❌ Error: solution-manifest.yaml not found" + exit 1 + fi + + yq eval '.container_images[]' ../solution-manifest.yaml 2>/dev/null +} + +# Global configuration +root_folder=infrastructure/cdk.out +exclude_stack="DeploymentPlatformStack.assets.json" +bucket_prefix="cdk-hnb659fds-assets-" +MAX_PARALLEL=${MAX_PARALLEL:-8} + + + +# Function to upload zip artifacts upload_zip_artifacts() { - asset_file=$1 + local asset_file="$1" echo -e "\nZipping and uploading assets for packaging type zip..." - jq '[.files | to_entries | .[].value | {packaging: .source.packaging, filename:.destinations."current_account-current_region".objectKey}] | map(select(.packaging == "zip")) | .[].filename' $asset_file | while read object; do - zip_filename=$(echo $object | sed 's/"//g') # string off quotes (") - folder_name=$(echo $zip_filename | sed 's/.zip//g' | awk '{print "asset."$1}') # remove ".zip" to ass prepend "asset" - cd $root_folder/$folder_name - echo "$folder_name" - zip -rq - . | aws s3 cp --region $region - s3://$bucket_name/$zip_filename - cd ~- + + # Get zip artifacts, handling different destination key formats + local zip_files + zip_files=$(jq -r '[.files | to_entries[] | .value | select(.source.packaging == "zip") | (.destinations | to_entries[0].value.objectKey)] | .[]' "$asset_file" 2>/dev/null) + + if [ -z "$zip_files" ] || [ "$zip_files" = "null" ]; then + echo "No zip artifacts found in $asset_file" + return 0 + fi + + echo "$zip_files" | while read -r zip_filename; do + if [ -n "$zip_filename" ] && [ "$zip_filename" != "null" ]; then + folder_name=$(echo "$zip_filename" | sed 's/.zip//g' | awk '{print "asset."$1}') # remove ".zip" and prepend "asset" + + # Check if we should skip existing assets + if [ "$SKIP_EXISTING_ASSETS" = "true" ]; then + if check_s3_object_exists "$bucket_name" "$zip_filename" "$region"; then + echo "Skipping existing zip asset: $zip_filename" + continue + fi + fi + + echo "Uploading zip: $zip_filename from $folder_name" + + if [ -d "$root_folder/$folder_name" ]; then + cd "$root_folder/$folder_name" + zip -rq - . | aws s3 cp --region "$region" - "s3://$bucket_name/$zip_filename" + cd - > /dev/null + echo "Completed zip: $zip_filename" + else + echo "Warning: Asset folder not found: $root_folder/$folder_name" + fi + fi done } +# Function to upload root templates upload_root_template() { - asset_file=$1 + local asset_file="$1" - jq '[.files | to_entries | .[].value | {packaging: .source.packaging, filename:.source.path}] | map(select(.packaging == "file")) | map(select(.filename | contains("nested") | not)) | .[].filename' $asset_file | while read object; do - template_name=$(echo $object | sed 's/"//g') - echo "$template_name" - aws s3 cp --quiet --region $region $root_folder/$template_name s3://$bucket_name/$template_name + echo -e "\nUploading root templates..." + + # Get root template files (non-nested) + local root_templates + root_templates=$(jq -r '[.files | to_entries[] | .value | select(.source.packaging == "file") | select(.source.path | contains("nested") | not) | .source.path] | .[]' "$asset_file" 2>/dev/null) + + if [ -z "$root_templates" ] || [ "$root_templates" = "null" ]; then + echo "No root templates found in $asset_file" + return 0 + fi + + echo "$root_templates" | while read -r template_name; do + if [ -n "$template_name" ] && [ "$template_name" != "null" ]; then + # Templates are always uploaded and overwritten + echo "Uploading root template: $template_name (always overwrite)" + if [ -f "$root_folder/$template_name" ]; then + aws s3 cp --quiet --region "$region" "$root_folder/$template_name" "s3://$bucket_name/$template_name" + echo "Completed root template: $template_name" + else + echo "Warning: Template file not found: $root_folder/$template_name" + fi + fi done } +# Function to upload nested templates upload_nested_templates() { - asset_file=$1 + local asset_file="$1" - jq '[.files | to_entries | .[].value | {packaging: .source.packaging, filename:.source.path, objectKey:.destinations."current_account-current_region".objectKey}] | map(select(.packaging == "file")) | map(select(.filename | contains("nested")))' $asset_file | jq -c '.[]' | while read object; do - template_name=$(echo $object | jq '.filename' | sed 's/"//g') - hash_file_name=$(echo $object | jq '.objectKey' | sed 's/"//g') - echo "$template_name" - aws s3 cp --quiet --region $region $root_folder/$template_name s3://$bucket_name/$hash_file_name + echo -e "\nUploading nested templates..." + + # Get nested template files with their hash names + local nested_templates + nested_templates=$(jq -c '[.files | to_entries[] | .value | select(.source.packaging == "file") | select(.source.path | contains("nested")) | {filename: .source.path, objectKey: (.destinations | to_entries[0].value.objectKey)}] | .[]' "$asset_file" 2>/dev/null) + + if [ -z "$nested_templates" ] || [ "$nested_templates" = "null" ]; then + echo "No nested templates found in $asset_file" + return 0 + fi + + echo "$nested_templates" | while read -r template_obj; do + if [ -n "$template_obj" ] && [ "$template_obj" != "null" ]; then + template_name=$(echo "$template_obj" | jq -r '.filename') + hash_file_name=$(echo "$template_obj" | jq -r '.objectKey') + + if [ -n "$template_name" ] && [ "$template_name" != "null" ] && [ -n "$hash_file_name" ] && [ "$hash_file_name" != "null" ]; then + # Templates are always uploaded and overwritten + echo "Uploading nested template: $template_name as $hash_file_name (always overwrite)" + if [ -f "$root_folder/$template_name" ]; then + aws s3 cp --quiet --region "$region" "$root_folder/$template_name" "s3://$bucket_name/$hash_file_name" + echo "Completed nested template: $template_name" + else + echo "Warning: Nested template file not found: $root_folder/$template_name" + fi + fi + fi done } +# Function to check for old duplicate assets +check_old_assets() { + local region="$1" + local bucket_name="$2" + local age_threshold_days=${3:-30} # Default to 30 days + + echo "" + echo "##################################################" + echo "Checking for old duplicate assets (older than $age_threshold_days days)" + echo "##################################################" + + # List all objects in the bucket + local all_objects + all_objects=$(aws s3api list-objects-v2 --bucket "$bucket_name" --region "$region" --query 'Contents[].{Key:Key,LastModified:LastModified}' --output json 2>/dev/null) + + if [ -z "$all_objects" ] || [ "$all_objects" = "null" ]; then + echo "No objects found in bucket or bucket doesn't exist" + return 0 + fi + + # Group objects by base name (without hash) and check for duplicates + local temp_file="/tmp/asset_analysis.json" + echo "$all_objects" > "$temp_file" + + # Find potential duplicates (objects with similar names but different hashes) + local old_assets_found=false + + echo "Analyzing assets for potential duplicates..." + + # Extract unique base patterns (remove hash suffixes) + jq -r '.[].Key' "$temp_file" | while read -r key; do + # Skip template files as they're always overwritten + if [[ "$key" == *.template.json ]] || [[ "$key" == *.template.yaml ]]; then + continue + fi + + # For zip files, extract base name without hash + if [[ "$key" == *.zip ]]; then + base_name=$(echo "$key" | sed -E 's/^[a-f0-9]{64}\.zip$/asset.zip/' | sed -E 's/^asset\.([a-f0-9]{64})\.zip$/asset.zip/') + + # Find all objects with similar pattern + similar_objects=$(jq -r --arg pattern ".*\.zip$" '.[] | select(.Key | test($pattern)) | .Key' "$temp_file") + + if [ $(echo "$similar_objects" | wc -l) -gt 1 ]; then + echo "" + echo "Found multiple zip assets (potential duplicates):" + echo "$similar_objects" | while read -r similar_key; do + age_days=$(get_s3_object_age_days "$bucket_name" "$similar_key" "$region") + if [ "$age_days" -gt "$age_threshold_days" ]; then + echo " 🔴 OLD: $similar_key (${age_days} days old)" + old_assets_found=true + else + echo " 🟢 NEW: $similar_key (${age_days} days old)" + fi + done + fi + fi + done + + rm -f "$temp_file" + + if [ "$old_assets_found" = "true" ]; then + echo "" + echo "⚠️ Found old assets that may be duplicates." + echo "Consider cleaning up assets older than $age_threshold_days days to save storage costs." + echo "" + echo "To clean up old assets, you can run:" + echo "aws s3api list-objects-v2 --bucket $bucket_name --region $region --query 'Contents[?LastModified<\`$(date -d \"$age_threshold_days days ago\" -Iseconds)\`].Key' --output text | xargs -I {} aws s3 rm s3://$bucket_name/{}" + else + echo "" + echo "✅ No old duplicate assets found." + fi +} -for y in `find $root_folder/* -name "*.assets.json" ! -name $exclude_stack`; do +# Process all asset files +upload_all_assets() { + echo "" echo "##################################################" - echo "Parsing asset file $y" + echo "Uploading CDK Assets" echo "##################################################" - upload_zip_artifacts $y # upload zip artifacts + + for asset_file in $(find "$root_folder" -name "*.assets.json" ! -name "$exclude_stack"); do + echo "" + echo "##################################################" + echo "Parsing asset file $asset_file" + echo "##################################################" + + upload_zip_artifacts "$asset_file" + + echo -e "\nUpload of zip assets complete. Now uploading templates\n" + + upload_root_template "$asset_file" + upload_nested_templates "$asset_file" + + echo -e "\nUploading templates complete for $asset_file\n" + done + + echo "All CDK assets uploaded successfully!" +} + +# Helper functions for main execution +get_user_confirmation() { + local bucket_name="$1" + + if [ "$ECR_ONLY" = "true" ]; then + echo "ECR images will be pushed to account ${aws_account_id} in region ${region}" + echo "Mode: ECR images only (skipping CDK assets)" + else + echo "All assets will be uploaded to ${bucket_name}" + if [ "$SKIP_EXISTING_ASSETS" = "true" ]; then + echo "Mode: Skip existing assets (faster staging)" + else + echo "Mode: Overwrite all assets (default behavior)" + fi + echo "Parallel uploads: ${MAX_PARALLEL} (set MAX_PARALLEL env var to change)" + fi + + while true; do + read -p "Do you want to proceed? (y/n) " yn + case $yn in + [yY] ) echo -e "Proceeding to upload\n" + return 0;; + [nN] ) echo "Exiting" + return 1;; + * ) echo "Invalid response";; + esac + done +} - echo -e "\nUpload of zip assets complete. Now uploading templates\n" +get_region_and_account() { + local default_region + default_region=$(aws configure get region) + + echo "The region to upload CDK artifacts to (default:$default_region)?" + read region + region="${region:=$default_region}" + + aws_account_id=$(aws sts get-caller-identity --query "Account" --output text) + bucket_name="${bucket_prefix}${aws_account_id}-${region}" +} - upload_root_template $y # upload root template - upload_nested_templates $y # upload nested template - - echo -e "\nUploading templates complete\n" -done +# CDK Asset Upload Functions (called from main) + +# ECR Image Staging Functions +get_solution_version() { + # Get version from VERSION environment variable or cdk.json + if [ -n "$VERSION" ]; then + echo "$VERSION" + else + # Extract version from cdk.json + node -p "require('./infrastructure/cdk.json').context.solution_version" 2>/dev/null || echo "v4.0.0" + fi +} + +sanitize_version_tag() { + local version="$1" + local deployment_mode="$2" + + # Remove double 'v' prefix if present (e.g., vv4.0.0 -> v4.0.0) + local clean_version=$(echo "$version" | sed 's/^vv/v/') + + # Add local suffix for local deployments if not already present + if [[ "$deployment_mode" == "local" && "$clean_version" != *"-local" ]]; then + clean_version="${clean_version}-local" + fi + + echo "$clean_version" +} + +check_ecr_repository() { + local repo_name="$1" + local region="$2" + + echo "Checking ECR repository: $repo_name" + + if aws ecr describe-repositories --repository-names "$repo_name" --region "$region" >/dev/null 2>&1; then + echo "✅ ECR repository exists: $repo_name" + return 0 + else + echo "Creating ECR repository: $repo_name" + if aws ecr create-repository --repository-name "$repo_name" --region "$region" >/dev/null 2>&1; then + echo "✅ ECR repository created: $repo_name" + return 0 + else + echo "❌ Failed to create ECR repository: $repo_name" + return 1 + fi + fi +} + +build_ecr_image() { + local image_name="$1" + local ecr_dir="../deployment/ecr/$image_name" + + echo "Building ECR image: $image_name" + + if [ ! -d "$ecr_dir" ]; then + echo "❌ ECR directory not found: $ecr_dir" + return 1 + fi + + # Navigate to ECR directory and build + cd "$ecr_dir" + + if [ -f "scripts/build-container.sh" ]; then + echo "Running build script for $image_name..." + if ./scripts/build-container.sh; then + echo "✅ Build completed for $image_name" + cd - >/dev/null + return 0 + else + echo "❌ Build failed for $image_name" + cd - >/dev/null + return 1 + fi + else + echo "❌ Build script not found: $ecr_dir/scripts/build-container.sh" + cd - >/dev/null + return 1 + fi +} + +push_ecr_image() { + local image_name="$1" + local version_tag="$2" + local region="$3" + local account_id="$4" + + echo "Pushing ECR image: $image_name:$version_tag" + + # ECR login + echo "Logging into ECR..." + if ! aws ecr get-login-password --region "$region" | docker login --username AWS --password-stdin "$account_id.dkr.ecr.$region.amazonaws.com"; then + echo "❌ ECR login failed" + return 1 + fi + + # Check if ECR repository exists, create if not + if ! check_ecr_repository "$image_name" "$region"; then + return 1 + fi + + # Tag and push image + local ecr_uri="$account_id.dkr.ecr.$region.amazonaws.com/$image_name" + + echo "Tagging image: $image_name:latest -> $ecr_uri:$version_tag" + if ! docker tag "$image_name:latest" "$ecr_uri:$version_tag"; then + echo "❌ Failed to tag image" + return 1 + fi + + echo "Pushing image to ECR: $ecr_uri:$version_tag" + if docker push "$ecr_uri:$version_tag"; then + echo "✅ Successfully pushed: $ecr_uri:$version_tag" + return 0 + else + echo "❌ Failed to push image to ECR" + return 1 + fi +} + +stage_ecr_images() { + local region="$1" + local account_id="$2" + + echo "" + echo "##################################################" + echo "Staging ECR Images" + echo "##################################################" + + # Determine deployment mode (local if DIST_OUTPUT_BUCKET is not set) + local deployment_mode="local" + if [ -n "$DIST_OUTPUT_BUCKET" ]; then + deployment_mode="pipeline" + fi + + # Get and sanitize version + local raw_version + raw_version=$(get_solution_version) + local version_tag + version_tag=$(sanitize_version_tag "$raw_version" "$deployment_mode") + + echo "Deployment mode: $deployment_mode" + echo "Using version tag: $version_tag" + echo "Target region: $region" + echo "Target account: $account_id" + + # Get container images from solution manifest + local container_images + container_images=$(get_container_images) + + if [ -z "$container_images" ]; then + echo "❌ No container images found in solution manifest" + return 1 + fi + + echo "" + echo "Container images to process:" + echo "$container_images" | while read -r image; do + echo " - $image" + done + + local success=true + local staged_images=() + + # Process each container image + echo "$container_images" | while read -r image_name; do + if [ -n "$image_name" ]; then + echo "" + echo "--- Processing Image: $image_name ---" + + local ecr_dir="../deployment/ecr/$image_name" + if [ -d "$ecr_dir" ]; then + if build_ecr_image "$image_name"; then + if push_ecr_image "$image_name" "$version_tag" "$region" "$account_id"; then + staged_images+=("$account_id.dkr.ecr.$region.amazonaws.com/$image_name:$version_tag") + else + success=false + fi + else + success=false + fi + else + echo "⚠️ ECR directory not found: $ecr_dir" + echo "Skipping $image_name image staging" + fi + fi + done + + # Check final success status + if [ "$success" = true ]; then + echo "" + echo "✅ ECR image staging completed successfully" + echo "" + echo "Staged Image URIs:" + echo "$container_images" | while read -r image_name; do + if [ -n "$image_name" ] && [ -d "../deployment/ecr/$image_name" ]; then + echo " $image_name: $account_id.dkr.ecr.$region.amazonaws.com/$image_name:$version_tag" + fi + done + echo "" + return 0 + else + echo "" + echo "❌ ECR image staging failed" + return 1 + fi +} + +# Main function +main() { + # Get region and account information + get_region_and_account + + # Handle check-old-assets mode + if [ "$CHECK_OLD_ASSETS" = "true" ]; then + echo "Checking for old duplicate assets in ${bucket_name}" + check_old_assets "$region" "$bucket_name" 30 + return 0 + fi + + # Handle ECR-only mode + if [ "$ECR_ONLY" = "true" ]; then + # Get user confirmation to proceed + if ! get_user_confirmation "$bucket_name"; then + return 0 + fi + + # Stage ECR images only + if ! stage_ecr_images "$region" "$aws_account_id"; then + echo "❌ ECR image staging failed" + return 1 + fi + + echo "ECR image staging complete" + return 0 + fi + + # Get user confirmation to proceed + if ! get_user_confirmation "$bucket_name"; then + return 0 + fi + + # Upload CDK assets + upload_all_assets + echo "CDK assets staging complete" + + # Stage ECR images + if ! stage_ecr_images "$region" "$aws_account_id"; then + echo "❌ ECR image staging failed, aborting asset staging" + return 1 + fi + + echo "All stacks complete" + return 0 +} -echo "All stacks complete" +# Execute main function +main "$@" diff --git a/source/ui-chat/package-lock.json b/source/ui-chat/package-lock.json index 2796febb..66966477 100644 --- a/source/ui-chat/package-lock.json +++ b/source/ui-chat/package-lock.json @@ -1,12 +1,12 @@ { "name": "@amzn/gen-ai-app-builder-on-aws-ui-chat", - "version": "3.0.7", + "version": "4.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@amzn/gen-ai-app-builder-on-aws-ui-chat", - "version": "3.0.7", + "version": "4.0.0", "dependencies": { "@aws-amplify/core": "^6.10.0", "@aws-amplify/ui-react": "^6.9.1", @@ -46,7 +46,7 @@ "@typescript-eslint/eslint-plugin": "^8.24.1", "@typescript-eslint/parser": "^8.24.1", "@vitejs/plugin-react-swc": "^3.7.2", - "@vitest/coverage-v8": "^3.0.3", + "@vitest/coverage-v8": "^4.0.10", "eslint": "^9.20.1", "eslint-config-prettier": "^10.0.1", "eslint-plugin-header": "^3.1.1", @@ -61,10 +61,10 @@ "jsdom": "^26.0.0", "msw": "^2.7.0", "node-fetch": "^3.3.2", - "prettier": "^3.5.1", + "prettier": "^3.6.2", "typescript": "^5.7.3", "vite": "^6.0.11", - "vitest": "^3.0.3" + "vitest": "^4.0.10" } }, "node_modules/@adobe/css-tools": { @@ -74,20 +74,6 @@ "dev": true, "license": "MIT" }, - "node_modules/@ampproject/remapping": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", - "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.5", - "@jridgewell/trace-mapping": "^0.3.24" - }, - "engines": { - "node": ">=6.0.0" - } - }, "node_modules/@asamuzakjp/css-color": { "version": "2.8.3", "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-2.8.3.tgz", @@ -1942,9 +1928,9 @@ } }, "node_modules/@babel/helper-string-parser": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz", - "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", "dev": true, "license": "MIT", "engines": { @@ -1952,9 +1938,9 @@ } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", - "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", "dev": true, "license": "MIT", "engines": { @@ -1962,13 +1948,13 @@ } }, "node_modules/@babel/parser": { - "version": "7.26.7", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.7.tgz", - "integrity": "sha512-kEvgGGgEjRUutvdVvZhbn/BxVt+5VSpwXz1j3WYXQbXDo8KzFOPNG2GQbdAiNq8g6wn1yKk7C/qrke03a84V+w==", + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz", + "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", "dev": true, "license": "MIT", "dependencies": { - "@babel/types": "^7.26.7" + "@babel/types": "^7.28.5" }, "bin": { "parser": "bin/babel-parser.js" @@ -1990,14 +1976,14 @@ } }, "node_modules/@babel/types": { - "version": "7.26.7", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.7.tgz", - "integrity": "sha512-t8kDRGrKXyp6+tjUh7hw2RLyclsW4TRoRvRHtSyAX9Bb5ldlFh+90YAYY6awRXrlB4G5G2izNeGySpATlFzmOg==", + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz", + "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", "dev": true, "license": "MIT", "dependencies": { - "@babel/helper-string-parser": "^7.25.9", - "@babel/helper-validator-identifier": "^7.25.9" + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" }, "engines": { "node": ">=6.9.0" @@ -3149,99 +3135,12 @@ "@types/node": ">=18" } }, - "node_modules/@isaacs/cliui": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", - "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", - "dev": true, - "license": "ISC", - "dependencies": { - "string-width": "^5.1.2", - "string-width-cjs": "npm:string-width@^4.2.0", - "strip-ansi": "^7.0.1", - "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", - "wrap-ansi": "^8.1.0", - "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@isaacs/cliui/node_modules/ansi-regex": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", - "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/@isaacs/cliui/node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/@isaacs/cliui/node_modules/strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/@istanbuljs/schema": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", - "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/@jridgewell/gen-mapping": { "version": "0.3.8", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", "license": "MIT", + "peer": true, "dependencies": { "@jridgewell/set-array": "^1.2.1", "@jridgewell/sourcemap-codec": "^1.4.10", @@ -3265,6 +3164,7 @@ "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", "license": "MIT", + "peer": true, "engines": { "node": ">=6.0.0" } @@ -3281,15 +3181,15 @@ } }, "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", - "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", "license": "MIT" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.25", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", - "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", @@ -3679,17 +3579,6 @@ "url": "https://opencollective.com/parcel" } }, - "node_modules/@pkgjs/parseargs": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", - "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", - "dev": true, - "license": "MIT", - "optional": true, - "engines": { - "node": ">=14" - } - }, "node_modules/@pkgr/core": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/@pkgr/core/-/core-0.1.1.tgz", @@ -5899,6 +5788,13 @@ "node": ">=16.0.0" } }, + "node_modules/@standard-schema/spec": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.0.0.tgz", + "integrity": "sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==", + "dev": true, + "license": "MIT" + }, "node_modules/@swc/core": { "version": "1.10.18", "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.10.18.tgz", @@ -6272,6 +6168,17 @@ "integrity": "sha512-nD0Z9fNIZcxYX5Mai2CTmFD7wX7UldCkW2ezCF8D1T5hdiLsnTWDGRpfRYntU6VjTdLQjOvyszru7I1c1oCQew==", "license": "MIT" }, + "node_modules/@types/chai": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz", + "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*", + "assertion-error": "^2.0.1" + } + }, "node_modules/@types/cookie": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz", @@ -6288,6 +6195,13 @@ "@types/ms": "*" } }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/eslint": { "version": "9.6.1", "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-9.6.1.tgz", @@ -6659,31 +6573,30 @@ } }, "node_modules/@vitest/coverage-v8": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-3.0.6.tgz", - "integrity": "sha512-JRTlR8Bw+4BcmVTICa7tJsxqphAktakiLsAmibVLAWbu1lauFddY/tXeM6sAyl1cgkPuXtpnUgaCPhTdz1Qapg==", + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-4.0.10.tgz", + "integrity": "sha512-g+brmtoKa/sAeIohNJnnWhnHtU6GuqqVOSQ4SxDIPcgZWZyhJs5RmF5LpqXs8Kq64lANP+vnbn5JLzhLj/G56g==", "dev": true, "license": "MIT", "dependencies": { - "@ampproject/remapping": "^2.3.0", "@bcoe/v8-coverage": "^1.0.2", - "debug": "^4.4.0", + "@vitest/utils": "4.0.10", + "ast-v8-to-istanbul": "^0.3.8", + "debug": "^4.4.3", "istanbul-lib-coverage": "^3.2.2", "istanbul-lib-report": "^3.0.1", "istanbul-lib-source-maps": "^5.0.6", - "istanbul-reports": "^3.1.7", - "magic-string": "^0.30.17", - "magicast": "^0.3.5", - "std-env": "^3.8.0", - "test-exclude": "^7.0.1", - "tinyrainbow": "^2.0.0" + "istanbul-reports": "^3.2.0", + "magicast": "^0.5.1", + "std-env": "^3.10.0", + "tinyrainbow": "^3.0.3" }, "funding": { "url": "https://opencollective.com/vitest" }, "peerDependencies": { - "@vitest/browser": "3.0.6", - "vitest": "3.0.6" + "@vitest/browser": "4.0.10", + "vitest": "4.0.10" }, "peerDependenciesMeta": { "@vitest/browser": { @@ -6692,38 +6605,40 @@ } }, "node_modules/@vitest/expect": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.0.6.tgz", - "integrity": "sha512-zBduHf/ja7/QRX4HdP1DSq5XrPgdN+jzLOwaTq/0qZjYfgETNFCKf9nOAp2j3hmom3oTbczuUzrzg9Hafh7hNg==", + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.10.tgz", + "integrity": "sha512-3QkTX/lK39FBNwARCQRSQr0TP9+ywSdxSX+LgbJ2M1WmveXP72anTbnp2yl5fH+dU6SUmBzNMrDHs80G8G2DZg==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/spy": "3.0.6", - "@vitest/utils": "3.0.6", - "chai": "^5.2.0", - "tinyrainbow": "^2.0.0" + "@standard-schema/spec": "^1.0.0", + "@types/chai": "^5.2.2", + "@vitest/spy": "4.0.10", + "@vitest/utils": "4.0.10", + "chai": "^6.2.1", + "tinyrainbow": "^3.0.3" }, "funding": { "url": "https://opencollective.com/vitest" } }, "node_modules/@vitest/mocker": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-3.0.6.tgz", - "integrity": "sha512-KPztr4/tn7qDGZfqlSPQoF2VgJcKxnDNhmfR3VgZ6Fy1bO8T9Fc1stUiTXtqz0yG24VpD00pZP5f8EOFknjNuQ==", + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.10.tgz", + "integrity": "sha512-e2OfdexYkjkg8Hh3L9NVEfbwGXq5IZbDovkf30qW2tOh7Rh9sVtmSr2ztEXOFbymNxS4qjzLXUQIvATvN4B+lg==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/spy": "3.0.6", + "@vitest/spy": "4.0.10", "estree-walker": "^3.0.3", - "magic-string": "^0.30.17" + "magic-string": "^0.30.21" }, "funding": { "url": "https://opencollective.com/vitest" }, "peerDependencies": { "msw": "^2.4.9", - "vite": "^5.0.0 || ^6.0.0" + "vite": "^6.0.0 || ^7.0.0-0" }, "peerDependenciesMeta": { "msw": { @@ -6735,26 +6650,26 @@ } }, "node_modules/@vitest/pretty-format": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.0.6.tgz", - "integrity": "sha512-Zyctv3dbNL+67qtHfRnUE/k8qxduOamRfAL1BurEIQSyOEFffoMvx2pnDSSbKAAVxY0Ej2J/GH2dQKI0W2JyVg==", + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.10.tgz", + "integrity": "sha512-99EQbpa/zuDnvVjthwz5bH9o8iPefoQZ63WV8+bsRJZNw3qQSvSltfut8yu1Jc9mqOYi7pEbsKxYTi/rjaq6PA==", "dev": true, "license": "MIT", "dependencies": { - "tinyrainbow": "^2.0.0" + "tinyrainbow": "^3.0.3" }, "funding": { "url": "https://opencollective.com/vitest" } }, "node_modules/@vitest/runner": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-3.0.6.tgz", - "integrity": "sha512-JopP4m/jGoaG1+CBqubV/5VMbi7L+NQCJTu1J1Pf6YaUbk7bZtaq5CX7p+8sY64Sjn1UQ1XJparHfcvTTdu9cA==", + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.10.tgz", + "integrity": "sha512-EXU2iSkKvNwtlL8L8doCpkyclw0mc/t4t9SeOnfOFPyqLmQwuceMPA4zJBa6jw0MKsZYbw7kAn+gl7HxrlB8UQ==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/utils": "3.0.6", + "@vitest/utils": "4.0.10", "pathe": "^2.0.3" }, "funding": { @@ -6762,14 +6677,14 @@ } }, "node_modules/@vitest/snapshot": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-3.0.6.tgz", - "integrity": "sha512-qKSmxNQwT60kNwwJHMVwavvZsMGXWmngD023OHSgn873pV0lylK7dwBTfYP7e4URy5NiBCHHiQGA9DHkYkqRqg==", + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.10.tgz", + "integrity": "sha512-2N4X2ZZl7kZw0qeGdQ41H0KND96L3qX1RgwuCfy6oUsF2ISGD/HpSbmms+CkIOsQmg2kulwfhJ4CI0asnZlvkg==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/pretty-format": "3.0.6", - "magic-string": "^0.30.17", + "@vitest/pretty-format": "4.0.10", + "magic-string": "^0.30.21", "pathe": "^2.0.3" }, "funding": { @@ -6777,28 +6692,24 @@ } }, "node_modules/@vitest/spy": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.0.6.tgz", - "integrity": "sha512-HfOGx/bXtjy24fDlTOpgiAEJbRfFxoX3zIGagCqACkFKKZ/TTOE6gYMKXlqecvxEndKFuNHcHqP081ggZ2yM0Q==", + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.10.tgz", + "integrity": "sha512-AsY6sVS8OLb96GV5RoG8B6I35GAbNrC49AO+jNRF9YVGb/g9t+hzNm1H6kD0NDp8tt7VJLs6hb7YMkDXqu03iw==", "dev": true, "license": "MIT", - "dependencies": { - "tinyspy": "^3.0.2" - }, "funding": { "url": "https://opencollective.com/vitest" } }, "node_modules/@vitest/utils": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.0.6.tgz", - "integrity": "sha512-18ktZpf4GQFTbf9jK543uspU03Q2qya7ZGya5yiZ0Gx0nnnalBvd5ZBislbl2EhLjM8A8rt4OilqKG7QwcGkvQ==", + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.10.tgz", + "integrity": "sha512-kOuqWnEwZNtQxMKg3WmPK1vmhZu9WcoX69iwWjVz+jvKTsF1emzsv3eoPcDr6ykA3qP2bsCQE7CwqfNtAVzsmg==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/pretty-format": "3.0.6", - "loupe": "^3.1.3", - "tinyrainbow": "^2.0.0" + "@vitest/pretty-format": "4.0.10", + "tinyrainbow": "^3.0.3" }, "funding": { "url": "https://opencollective.com/vitest" @@ -7338,6 +7249,25 @@ "node": ">=12" } }, + "node_modules/ast-v8-to-istanbul": { + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/ast-v8-to-istanbul/-/ast-v8-to-istanbul-0.3.8.tgz", + "integrity": "sha512-szgSZqUxI5T8mLKvS7WTjF9is+MVbOeLADU73IseOcrqhxr/VAvy6wfoVE39KnKzA7JRhjF5eUagNlHwvZPlKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.31", + "estree-walker": "^3.0.3", + "js-tokens": "^9.0.1" + } + }, + "node_modules/ast-v8-to-istanbul/node_modules/js-tokens": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", + "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==", + "dev": true, + "license": "MIT" + }, "node_modules/async-function": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/async-function/-/async-function-1.0.0.tgz", @@ -7502,16 +7432,6 @@ "license": "MIT", "peer": true }, - "node_modules/cac": { - "version": "6.7.14", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", - "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/call-bind": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", @@ -7613,20 +7533,13 @@ } }, "node_modules/chai": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/chai/-/chai-5.2.0.tgz", - "integrity": "sha512-mCuXncKXk5iCLhfhwTc0izo0gtEmpz5CtG2y8GiOINBlMVS6v8TMRc5TaLWKS6692m9+dVVfzgeVxR5UxWHTYw==", + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.1.tgz", + "integrity": "sha512-p4Z49OGG5W/WBCPSS/dH3jQ73kD6tiMmUM+bckNK6Jr5JHMG3k9bg/BvKR8lKmtVBKmOiuVaV2ws8s9oSbwysg==", "dev": true, "license": "MIT", - "dependencies": { - "assertion-error": "^2.0.1", - "check-error": "^2.1.1", - "deep-eql": "^5.0.1", - "loupe": "^3.1.0", - "pathval": "^2.0.0" - }, "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/chalk": { @@ -7686,16 +7599,6 @@ "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/check-error": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", - "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 16" - } - }, "node_modules/chokidar": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", @@ -8054,9 +7957,9 @@ } }, "node_modules/debug": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", - "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", "license": "MIT", "dependencies": { "ms": "^2.1.3" @@ -8098,16 +8001,6 @@ "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/deep-eql": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", - "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", @@ -8254,13 +8147,6 @@ "node": ">= 0.4" } }, - "node_modules/eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", - "dev": true, - "license": "MIT" - }, "node_modules/electron-to-chromium": { "version": "1.5.88", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.88.tgz", @@ -8268,13 +8154,6 @@ "license": "ISC", "peer": true }, - "node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true, - "license": "MIT" - }, "node_modules/encode-utf8": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/encode-utf8/-/encode-utf8-1.0.3.tgz", @@ -8394,9 +8273,9 @@ } }, "node_modules/es-module-lexer": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.6.0.tgz", - "integrity": "sha512-qqnD1yMU6tk/jnaMosogGySTZP8YtUgAffA9nMN+E/rjxcfRQ6IEk7IiozUjgxKoFHBGjTLnrHB/YC45r/59EQ==", + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", "license": "MIT" }, "node_modules/es-object-atoms": { @@ -9177,9 +9056,9 @@ } }, "node_modules/expect-type": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.1.0.tgz", - "integrity": "sha512-bFi65yM+xZgk+u/KRIpekdSYkTB5W1pEf0Lt8Q8Msh7b+eQ7LXVtIB1Bkm4fvclDEL1b2CZkMhv2mOeF8tMdkA==", + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.2.2.tgz", + "integrity": "sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==", "dev": true, "license": "Apache-2.0", "engines": { @@ -9403,23 +9282,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/foreground-child": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.0.tgz", - "integrity": "sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==", - "dev": true, - "license": "ISC", - "dependencies": { - "cross-spawn": "^7.0.0", - "signal-exit": "^4.0.1" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/form-data": { "version": "4.0.4", "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz", @@ -9594,27 +9456,6 @@ "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" } }, - "node_modules/glob": { - "version": "10.4.5", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", - "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", - "dev": true, - "license": "ISC", - "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" - }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", @@ -10593,9 +10434,9 @@ } }, "node_modules/istanbul-reports": { - "version": "3.1.7", - "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.7.tgz", - "integrity": "sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", + "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", "dev": true, "license": "BSD-3-Clause", "dependencies": { @@ -10606,22 +10447,6 @@ "node": ">=8" } }, - "node_modules/jackspeak": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", - "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "@isaacs/cliui": "^8.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - }, - "optionalDependencies": { - "@pkgjs/parseargs": "^0.11.0" - } - }, "node_modules/jest-worker": { "version": "27.5.1", "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", @@ -10669,9 +10494,9 @@ "license": "MIT" }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -10882,13 +10707,6 @@ "loose-envify": "cli.js" } }, - "node_modules/loupe": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.3.tgz", - "integrity": "sha512-kkIp7XSkP78ZxJEsSxW3712C6teJVoeHHwgo9zJ380de7IYyJ2ISlxojcH2pC5OFLewESmnRi/+XCDIEEVyoug==", - "dev": true, - "license": "MIT" - }, "node_modules/lru-cache": { "version": "10.4.3", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", @@ -10908,25 +10726,25 @@ } }, "node_modules/magic-string": { - "version": "0.30.17", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.17.tgz", - "integrity": "sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==", + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", "dev": true, "license": "MIT", "dependencies": { - "@jridgewell/sourcemap-codec": "^1.5.0" + "@jridgewell/sourcemap-codec": "^1.5.5" } }, "node_modules/magicast": { - "version": "0.3.5", - "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.3.5.tgz", - "integrity": "sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==", + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.5.1.tgz", + "integrity": "sha512-xrHS24IxaLrvuo613F719wvOIv9xPHFWQHuvGUBmPnCA/3MQxKI3b+r7n1jAoDHmsbC5bRhTZYR77invLAxVnw==", "dev": true, "license": "MIT", "dependencies": { - "@babel/parser": "^7.25.4", - "@babel/types": "^7.25.4", - "source-map-js": "^1.2.0" + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "source-map-js": "^1.2.1" } }, "node_modules/make-dir": { @@ -11936,16 +11754,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/minipass": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", - "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=16 || 14 >=14.17" - } - }, "node_modules/mnth": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/mnth/-/mnth-2.0.0.tgz", @@ -12311,13 +12119,6 @@ "node": ">=6" } }, - "node_modules/package-json-from-dist": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", - "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", - "dev": true, - "license": "BlueOak-1.0.0" - }, "node_modules/parent-module": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", @@ -12409,23 +12210,6 @@ "dev": true, "license": "MIT" }, - "node_modules/path-scurry": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", - "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "lru-cache": "^10.2.0", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" - }, - "engines": { - "node": ">=16 || 14 >=14.18" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/path-to-regexp": { "version": "6.3.0", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.3.0.tgz", @@ -12440,16 +12224,6 @@ "dev": true, "license": "MIT" }, - "node_modules/pathval": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.0.tgz", - "integrity": "sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 14.16" - } - }, "node_modules/picocolors": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", @@ -12528,11 +12302,10 @@ } }, "node_modules/prettier": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.1.tgz", - "integrity": "sha512-hPpFQvHwL3Qv5AdRvBFMhnKo4tYxp0ReXiPn2bxkiohEX6mBeBwEpBSQTkD458RaaDKQMYSp4hX4UtfUTA5wDw==", + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", "dev": true, - "license": "MIT", "bin": { "prettier": "bin/prettier.cjs" }, @@ -13937,9 +13710,9 @@ } }, "node_modules/std-env": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.8.0.tgz", - "integrity": "sha512-Bc3YwwCB+OzldMxOXJIIvC6cPRWr/LxOp48CdQTOkPyk/t4JWWJbrilwBd7RJzKV8QW7tJkcgAmeuLLJugl5/w==", + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", + "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", "dev": true, "license": "MIT" }, @@ -13950,76 +13723,6 @@ "dev": true, "license": "MIT" }, - "node_modules/string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "dev": true, - "license": "MIT", - "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/string-width-cjs": { - "name": "string-width", - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true, - "license": "MIT" - }, - "node_modules/string-width/node_modules/ansi-regex": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", - "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/string-width/node_modules/strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, "node_modules/string.prototype.trim": { "version": "1.2.10", "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.10.tgz", @@ -14105,20 +13808,6 @@ "node": ">=8" } }, - "node_modules/strip-ansi-cjs": { - "name": "strip-ansi", - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/strip-bom": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", @@ -14340,21 +14029,6 @@ "url": "https://opencollective.com/webpack" } }, - "node_modules/test-exclude": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-7.0.1.tgz", - "integrity": "sha512-pFYqmTw68LXVjeWJMST4+borgQP2AyMNbg1BpZh9LbyhUeNkeaPF9gzfPGUAnSMV3qPYdWUwDIjjCLiSDOl7vg==", - "dev": true, - "license": "ISC", - "dependencies": { - "@istanbuljs/schema": "^0.1.2", - "glob": "^10.4.1", - "minimatch": "^9.0.4" - }, - "engines": { - "node": ">=18" - } - }, "node_modules/tinybench": { "version": "2.9.0", "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", @@ -14370,14 +14044,14 @@ "license": "MIT" }, "node_modules/tinyglobby": { - "version": "0.2.13", - "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.13.tgz", - "integrity": "sha512-mEwzpUgrLySlveBwEVDMKk5B57bhLPYovRfPAXD5gA/98Opn0rCDj3GtLwFvCvH5RK9uPCExUROW5NjDwvqkxw==", + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", "dev": true, "license": "MIT", "dependencies": { - "fdir": "^6.4.4", - "picomatch": "^4.0.2" + "fdir": "^6.5.0", + "picomatch": "^4.0.3" }, "engines": { "node": ">=12.0.0" @@ -14387,11 +14061,14 @@ } }, "node_modules/tinyglobby/node_modules/fdir": { - "version": "6.4.4", - "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.4.tgz", - "integrity": "sha512-1NZP+GK4GfuAv3PqKvxQRDMjdSRZjnkq7KfhlNrCNNlZ0ygQFpebfrnfnq/W7fpUnAv9aGWmY1zKx7FYL3gwhg==", + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", "dev": true, "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, "peerDependencies": { "picomatch": "^3 || ^4" }, @@ -14402,9 +14079,9 @@ } }, "node_modules/tinyglobby/node_modules/picomatch": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", - "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", "engines": { @@ -14414,30 +14091,10 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, - "node_modules/tinypool": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.0.2.tgz", - "integrity": "sha512-al6n+QEANGFOMf/dmUMsuS5/r9B06uwlyNjZZql/zv8J7ybHCgoihBNORZCY2mzUuAnomQa2JdhyHKzZxPCrFA==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.0.0 || >=20.0.0" - } - }, "node_modules/tinyrainbow": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz", - "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/tinyspy": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-3.0.2.tgz", - "integrity": "sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.0.3.tgz", + "integrity": "sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==", "dev": true, "license": "MIT", "engines": { @@ -15057,29 +14714,6 @@ } } }, - "node_modules/vite-node": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-3.0.6.tgz", - "integrity": "sha512-s51RzrTkXKJrhNbUzQRsarjmAae7VmMPAsRT7lppVpIg6mK3zGthP9Hgz0YQQKuNcF+Ii7DfYk3Fxz40jRmePw==", - "dev": true, - "license": "MIT", - "dependencies": { - "cac": "^6.7.14", - "debug": "^4.4.0", - "es-module-lexer": "^1.6.0", - "pathe": "^2.0.3", - "vite": "^5.0.0 || ^6.0.0" - }, - "bin": { - "vite-node": "vite-node.mjs" - }, - "engines": { - "node": "^18.0.0 || ^20.0.0 || >=22.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, "node_modules/vite/node_modules/fdir": { "version": "6.4.4", "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.4.tgz", @@ -15109,38 +14743,38 @@ } }, "node_modules/vitest": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-3.0.6.tgz", - "integrity": "sha512-/iL1Sc5VeDZKPDe58oGK4HUFLhw6b5XdY1MYawjuSaDA4sEfYlY9HnS6aCEG26fX+MgUi7MwlduTBHHAI/OvMA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/expect": "3.0.6", - "@vitest/mocker": "3.0.6", - "@vitest/pretty-format": "^3.0.6", - "@vitest/runner": "3.0.6", - "@vitest/snapshot": "3.0.6", - "@vitest/spy": "3.0.6", - "@vitest/utils": "3.0.6", - "chai": "^5.2.0", - "debug": "^4.4.0", - "expect-type": "^1.1.0", - "magic-string": "^0.30.17", + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.10.tgz", + "integrity": "sha512-2Fqty3MM9CDwOVet/jaQalYlbcjATZwPYGcqpiYQqgQ/dLC7GuHdISKgTYIVF/kaishKxLzleKWWfbSDklyIKg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/expect": "4.0.10", + "@vitest/mocker": "4.0.10", + "@vitest/pretty-format": "4.0.10", + "@vitest/runner": "4.0.10", + "@vitest/snapshot": "4.0.10", + "@vitest/spy": "4.0.10", + "@vitest/utils": "4.0.10", + "debug": "^4.4.3", + "es-module-lexer": "^1.7.0", + "expect-type": "^1.2.2", + "magic-string": "^0.30.21", "pathe": "^2.0.3", - "std-env": "^3.8.0", + "picomatch": "^4.0.3", + "std-env": "^3.10.0", "tinybench": "^2.9.0", "tinyexec": "^0.3.2", - "tinypool": "^1.0.2", - "tinyrainbow": "^2.0.0", - "vite": "^5.0.0 || ^6.0.0", - "vite-node": "3.0.6", + "tinyglobby": "^0.2.15", + "tinyrainbow": "^3.0.3", + "vite": "^6.0.0 || ^7.0.0", "why-is-node-running": "^2.3.0" }, "bin": { "vitest": "vitest.mjs" }, "engines": { - "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + "node": "^20.0.0 || ^22.0.0 || >=24.0.0" }, "funding": { "url": "https://opencollective.com/vitest" @@ -15148,9 +14782,11 @@ "peerDependencies": { "@edge-runtime/vm": "*", "@types/debug": "^4.1.12", - "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", - "@vitest/browser": "3.0.6", - "@vitest/ui": "3.0.6", + "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0", + "@vitest/browser-playwright": "4.0.10", + "@vitest/browser-preview": "4.0.10", + "@vitest/browser-webdriverio": "4.0.10", + "@vitest/ui": "4.0.10", "happy-dom": "*", "jsdom": "*" }, @@ -15164,7 +14800,13 @@ "@types/node": { "optional": true }, - "@vitest/browser": { + "@vitest/browser-playwright": { + "optional": true + }, + "@vitest/browser-preview": { + "optional": true + }, + "@vitest/browser-webdriverio": { "optional": true }, "@vitest/ui": { @@ -15178,6 +14820,19 @@ } } }, + "node_modules/vitest/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, "node_modules/void-elements": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz", @@ -15516,47 +15171,6 @@ "node": ">=8" } }, - "node_modules/wrap-ansi-cjs": { - "name": "wrap-ansi", - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true, - "license": "MIT" - }, - "node_modules/wrap-ansi-cjs/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/wrap-ansi/node_modules/emoji-regex": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", diff --git a/source/ui-chat/package.json b/source/ui-chat/package.json index b4781b29..6f0e4bc3 100644 --- a/source/ui-chat/package.json +++ b/source/ui-chat/package.json @@ -1,7 +1,7 @@ { "name": "@amzn/gen-ai-app-builder-on-aws-ui-chat", "private": true, - "version": "3.0.7", + "version": "4.0.0", "type": "module", "author": { "name": "Amazon Web Services", @@ -11,7 +11,7 @@ "start": "vite", "build": "tsc && vite build", "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", - "format": "prettier --ignore-path ../.gitignore --write \"**/*.+(js|ts|tsx|json)\"", + "code-formatter": "prettier --config ../../.prettierrc.yml --ignore-path ../../.prettierignore --write '**/*.{js,jsx,ts,tsx,json,css,md}'", "preview": "vite preview", "test": "vitest run --coverage --silent", "test:watch": "vitest", @@ -57,7 +57,7 @@ "@typescript-eslint/eslint-plugin": "^8.24.1", "@typescript-eslint/parser": "^8.24.1", "@vitejs/plugin-react-swc": "^3.7.2", - "@vitest/coverage-v8": "^3.0.3", + "@vitest/coverage-v8": "^4.0.10", "eslint": "^9.20.1", "eslint-config-prettier": "^10.0.1", "eslint-plugin-header": "^3.1.1", @@ -72,10 +72,10 @@ "jsdom": "^26.0.0", "msw": "^2.7.0", "node-fetch": "^3.3.2", - "prettier": "^3.5.1", + "prettier": "^3.6.2", "typescript": "^5.7.3", "vite": "^6.0.11", - "vitest": "^3.0.3" + "vitest": "^4.0.10" }, "msw": { "workerDirectory": "public" diff --git a/source/ui-chat/src/__tests__/components/common/common-components.test.tsx b/source/ui-chat/src/__tests__/components/common/common-components.test.tsx index de02887d..375bec4a 100644 --- a/source/ui-chat/src/__tests__/components/common/common-components.test.tsx +++ b/source/ui-chat/src/__tests__/components/common/common-components.test.tsx @@ -6,7 +6,7 @@ import { render, screen } from '@testing-library/react'; import { AUTHORS } from '../../../pages/chat/config'; import '@cloudscape-design/chat-components/test-utils/dom'; import createWrapper from '@cloudscape-design/components/test-utils/dom'; -import { ScrollableContainer, ChatBubbleAvatar, Actions } from '../../../components/common/common-components'; +import { ScrollableContainer, ChatBubbleAvatar, Actions, ExternalLinkWarningModal } from '../../../components/common/common-components'; describe('Common Components', () => { describe('ScrollableContainer', () => { @@ -87,4 +87,24 @@ describe('Common Components', () => { expect(popover).toBeDefined(); }); }); + + describe('ExternalLinkWarningModal Export', () => { + it('exports ExternalLinkWarningModal component', () => { + expect(ExternalLinkWarningModal).toBeDefined(); + expect(typeof ExternalLinkWarningModal).toBe('function'); + }); + + it('renders ExternalLinkWarningModal when imported from common-components', () => { + const { container } = render( + {}} + externalLink="https://example.com" + resourceType="test link" + /> + ); + + expect(screen.getByTestId('external-link-warning-modal')).toBeInTheDocument(); + }); + }); }); diff --git a/source/ui-chat/src/__tests__/components/common/external-link-warning-modal.test.tsx b/source/ui-chat/src/__tests__/components/common/external-link-warning-modal.test.tsx new file mode 100644 index 00000000..2de87c1a --- /dev/null +++ b/source/ui-chat/src/__tests__/components/common/external-link-warning-modal.test.tsx @@ -0,0 +1,155 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import React from 'react'; +import { render, screen, fireEvent } from '@testing-library/react'; +import { describe, test, expect, vi } from 'vitest'; +import createWrapper from '@cloudscape-design/components/test-utils/dom'; +import { ExternalLinkWarningModal } from '../../../components/common/external-link-warning-modal'; + +describe('ExternalLinkWarningModal', () => { + const defaultProps = { + visible: true, + onDiscard: vi.fn(), + externalLink: 'https://example.com', + resourceType: 'external link' + }; + + beforeEach(() => { + vi.clearAllMocks(); + }); + + test('renders modal when visible is true', () => { + const { container } = render(); + + const wrapper = createWrapper(container); + const modal = wrapper.findModal(); + + expect(modal).toBeDefined(); + expect(screen.getByTestId('external-link-warning-modal')).toBeInTheDocument(); + }); + + test('does not render modal when visible is false', () => { + const { container } = render(); + + expect(screen.queryByTestId('external-link-warning-modal')).not.toBeInTheDocument(); + }); + + test('displays correct header text', () => { + render(); + + expect(screen.getByText('Leave page')).toBeInTheDocument(); + }); + + test('displays warning alert with correct message', () => { + render(); + + expect(screen.getByText(/Are you sure that you want to leave the current page/)).toBeInTheDocument(); + expect(screen.getByText(/You will be redirected to an external website/)).toBeInTheDocument(); + }); + + test('displays cancel button and calls onDiscard when clicked', () => { + const { container } = render(); + + const cancelButton = screen.getByText('Cancel'); + expect(cancelButton).toBeInTheDocument(); + + fireEvent.click(cancelButton); + expect(defaultProps.onDiscard).toHaveBeenCalledTimes(1); + }); + + test('displays open button with correct text and attributes', () => { + const { container } = render(); + + const openButton = screen.getByTestId('external-link-warning-modal-open-button'); + + expect(openButton).toHaveTextContent('Open external link'); + expect(openButton).toHaveAttribute('href', 'https://example.com'); + expect(openButton).toHaveAttribute('target', '_blank'); + expect(openButton).toHaveAttribute('aria-label', 'Open external link (opens new tab)'); + }); + + test('displays custom resource type in button text', () => { + render(); + + const openButton = screen.getByTestId('external-link-warning-modal-open-button'); + + expect(openButton).toHaveTextContent('Open AWS Console'); + expect(openButton).toHaveAttribute('aria-label', 'Open AWS Console (opens new tab)'); + }); + + test('uses default resource type when not provided', () => { + const propsWithoutResourceType = { + visible: true, + onDiscard: vi.fn(), + externalLink: 'https://example.com' + }; + + render(); + + const openButton = screen.getByTestId('external-link-warning-modal-open-button'); + + expect(openButton).toHaveTextContent('Open external link'); + expect(openButton).toHaveAttribute('aria-label', 'Open external link (opens new tab)'); + }); + + test('modal has onDismiss handler configured', () => { + const { container } = render(); + + const wrapper = createWrapper(container); + const modal = wrapper.findModal(); + + // Verify modal exists and has the correct test id + expect(modal).toBeDefined(); + expect(screen.getByTestId('external-link-warning-modal')).toBeInTheDocument(); + + // The onDiscard function should be properly passed to the modal + // We can't easily test the ESC key behavior in unit tests, but we can verify + // the component structure is correct + expect(typeof defaultProps.onDiscard).toBe('function'); + }); + + test('calls onDiscard when open button is clicked', () => { + render(); + + const openButton = screen.getByTestId('external-link-warning-modal-open-button'); + + fireEvent.click(openButton); + expect(defaultProps.onDiscard).toHaveBeenCalledTimes(1); + }); + + test('has correct modal structure and styling', () => { + const { container } = render(); + + const wrapper = createWrapper(container); + + expect(screen.getByText('Leave page')).toBeInTheDocument(); + expect(screen.getByTestId('external-link-warning-modal')).toBeInTheDocument(); + + // Check that the alert is in the modal content + const alert = wrapper.findAlert(); + expect(alert).toBeDefined(); + }); + + test('handles empty external link gracefully', () => { + render(); + + const openButton = screen.getByTestId('external-link-warning-modal-open-button'); + + // The button should still be rendered even with empty href + expect(openButton).toBeInTheDocument(); + expect(openButton).toHaveTextContent('Open external link'); + }); + + test('modal footer has correct layout with SpaceBetween', () => { + render(); + + // Check that both buttons are present + const cancelButton = screen.getByText('Cancel'); + const openButton = screen.getByTestId('external-link-warning-modal-open-button'); + + expect(cancelButton).toBeInTheDocument(); + expect(openButton).toBeInTheDocument(); + expect(openButton).toHaveTextContent('Open external link'); + }); +}); \ No newline at end of file diff --git a/source/ui-chat/src/__tests__/components/markdown/MarkdownContent.test.tsx b/source/ui-chat/src/__tests__/components/markdown/MarkdownContent.test.tsx index a9f3da3b..67ac7c5d 100644 --- a/source/ui-chat/src/__tests__/components/markdown/MarkdownContent.test.tsx +++ b/source/ui-chat/src/__tests__/components/markdown/MarkdownContent.test.tsx @@ -2,10 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 import { describe, test, expect, vi } from 'vitest'; -import { render, screen } from '@testing-library/react'; +import { render, screen, fireEvent, waitFor } from '@testing-library/react'; import MarkdownContent from '../../../components/markdown/MarkdownContent'; import '@cloudscape-design/code-view/test-utils/dom'; import { createWrapper } from '@cloudscape-design/test-utils-core/dom'; +import createComponentWrapper from '@cloudscape-design/components/test-utils/dom'; describe('MarkdownContent', () => { test('renders plain text content correctly', () => { @@ -151,4 +152,160 @@ describe('MarkdownContent', () => { expect(firstRender).toBe(secondRender); }); + + describe('External Link Handling', () => { + test('renders external HTTP links with click handler', () => { + const content = '[External Link](http://example.com)'; + const { container } = render(); + + const link = container.querySelector('a'); + expect(link).toBeInTheDocument(); + expect(link).toHaveAttribute('href', '#'); + expect(link).toHaveTextContent('External Link'); + }); + + test('renders external HTTPS links with click handler', () => { + const content = '[External Link](https://example.com)'; + const { container } = render(); + + const link = container.querySelector('a'); + expect(link).toBeInTheDocument(); + expect(link).toHaveAttribute('href', '#'); + expect(link).toHaveTextContent('External Link'); + }); + + test('shows external link warning modal when external link is clicked', async () => { + const content = '[External Link](https://example.com)'; + const { container } = render(); + + const link = container.querySelector('a'); + expect(link).toBeInTheDocument(); + + // Click the external link + fireEvent.click(link!); + + // Wait for modal to appear + await waitFor(() => { + expect(screen.getByTestId('external-link-warning-modal')).toBeInTheDocument(); + }); + + // Check modal content + expect(screen.getByText('Leave page')).toBeInTheDocument(); + expect(screen.getByText(/Are you sure that you want to leave the current page/)).toBeInTheDocument(); + }); + + test('modal displays correct external link URL', async () => { + const content = '[Test Link](https://test-example.com)'; + const { container } = render(); + + const link = container.querySelector('a'); + fireEvent.click(link!); + + await waitFor(() => { + const openButton = screen.getByTestId('external-link-warning-modal-open-button'); + expect(openButton).toHaveAttribute('href', 'https://test-example.com'); + }); + }); + + test('modal can be cancelled', async () => { + const content = '[External Link](https://example.com)'; + const { container } = render(); + + const link = container.querySelector('a'); + fireEvent.click(link!); + + await waitFor(() => { + expect(screen.getByTestId('external-link-warning-modal')).toBeInTheDocument(); + }); + + // Click cancel button + const cancelButton = screen.getByText('Cancel'); + fireEvent.click(cancelButton); + + // Modal should be hidden + await waitFor(() => { + expect(screen.queryByTestId('external-link-warning-modal')).not.toBeInTheDocument(); + }); + }); + + test('modal can be dismissed and reopened', async () => { + const content = '[External Link](https://example.com)'; + const { container, rerender } = render(); + + const link = container.querySelector('a'); + + // First click - show modal + fireEvent.click(link!); + await waitFor(() => { + expect(screen.getByTestId('external-link-warning-modal')).toBeInTheDocument(); + }); + + // Cancel modal + const cancelButton = screen.getByText('Cancel'); + fireEvent.click(cancelButton); + + await waitFor(() => { + expect(screen.queryByTestId('external-link-warning-modal')).not.toBeInTheDocument(); + }); + + // Force a re-render to ensure clean state + rerender(); + + // Second click - show modal again + const linkAfterRerender = container.querySelector('a'); + fireEvent.click(linkAfterRerender!); + await waitFor(() => { + expect(screen.getByTestId('external-link-warning-modal')).toBeInTheDocument(); + }); + }); + + test('handles multiple external links independently', async () => { + const content = '[First Link](https://first.com) and [Second Link](https://second.com)'; + const { container, rerender } = render(); + + const links = container.querySelectorAll('a'); + expect(links).toHaveLength(2); + + // Click first link + fireEvent.click(links[0]); + await waitFor(() => { + expect(screen.getByTestId('external-link-warning-modal')).toBeInTheDocument(); + const openButton = screen.getByTestId('external-link-warning-modal-open-button'); + expect(openButton).toHaveAttribute('href', 'https://first.com'); + }); + + // Cancel modal + const cancelButton = screen.getByText('Cancel'); + fireEvent.click(cancelButton); + + await waitFor(() => { + expect(screen.queryByTestId('external-link-warning-modal')).not.toBeInTheDocument(); + }); + + // Force a re-render to ensure clean state + rerender(); + + // Click second link + const linksAfterRerender = container.querySelectorAll('a'); + fireEvent.click(linksAfterRerender[1]); + await waitFor(() => { + expect(screen.getByTestId('external-link-warning-modal')).toBeInTheDocument(); + const openButton = screen.getByTestId('external-link-warning-modal-open-button'); + expect(openButton).toHaveAttribute('href', 'https://second.com'); + }); + }); + + test('prevents default behavior on external link clicks', () => { + const content = '[External Link](https://example.com)'; + const { container } = render(); + + const link = container.querySelector('a'); + const clickEvent = new MouseEvent('click', { bubbles: true, cancelable: true }); + const preventDefaultSpy = vi.spyOn(clickEvent, 'preventDefault'); + + link?.dispatchEvent(clickEvent); + + expect(preventDefaultSpy).toHaveBeenCalled(); + }); + }); }); diff --git a/source/ui-chat/src/__tests__/components/thinking/ThinkingIndicator.test.tsx b/source/ui-chat/src/__tests__/components/thinking/ThinkingIndicator.test.tsx new file mode 100644 index 00000000..c5b76ffb --- /dev/null +++ b/source/ui-chat/src/__tests__/components/thinking/ThinkingIndicator.test.tsx @@ -0,0 +1,74 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { render, screen } from '@testing-library/react'; +import { describe, it, expect } from 'vitest'; +import { ThinkingIndicator } from '../../../components/thinking/ThinkingIndicator'; +import { ThinkingMetadata } from '../../../pages/chat/types'; + +describe('ThinkingIndicator', () => { + describe('Basic Rendering', () => { + it('should render with completed thinking metadata', () => { + const thinking: ThinkingMetadata = { + duration: 5, + startTime: new Date().toISOString(), + endTime: new Date().toISOString() + }; + + render(); + + expect(screen.getByTestId('thinking-indicator')).toBeInTheDocument(); + }); + + it('should render with stripped content', () => { + const thinking: ThinkingMetadata = { + duration: 3, + startTime: new Date().toISOString(), + endTime: new Date().toISOString(), + strippedContent: 'Some thinking content' + }; + + render(); + + expect(screen.getByTestId('thinking-indicator')).toBeInTheDocument(); + }); + + it('should accept custom data-testid', () => { + const thinking: ThinkingMetadata = { + duration: 2, + startTime: new Date().toISOString(), + endTime: new Date().toISOString() + }; + + render(); + + expect(screen.getByTestId('custom-thinking')).toBeInTheDocument(); + }); + }); + + describe('Duration Display', () => { + it('should display duration in seconds', () => { + const thinking: ThinkingMetadata = { + duration: 5, + startTime: new Date().toISOString(), + endTime: new Date().toISOString() + }; + + render(); + + expect(screen.getByText(/5s/)).toBeInTheDocument(); + }); + + it('should display duration in minutes and seconds', () => { + const thinking: ThinkingMetadata = { + duration: 75, + startTime: new Date().toISOString(), + endTime: new Date().toISOString() + }; + + render(); + + expect(screen.getByText(/1m 15s/)).toBeInTheDocument(); + }); + }); +}); diff --git a/source/ui-chat/src/__tests__/hooks/use-chat-messages.test.tsx b/source/ui-chat/src/__tests__/hooks/use-chat-messages.test.tsx index 75dd3008..904fefc5 100644 --- a/source/ui-chat/src/__tests__/hooks/use-chat-messages.test.tsx +++ b/source/ui-chat/src/__tests__/hooks/use-chat-messages.test.tsx @@ -25,10 +25,15 @@ describe('useChatMessages', () => { isGenAiResponseLoading: false, sourceDocuments: [], conversationId: '', + isStreaming: false, + streamingMessageId: undefined, + thinking: undefined, + toolUsage: [], handleMessage: expect.any(Function), addUserMessage: expect.any(Function), resetChat: expect.any(Function), setMessages: expect.any(Function), + setConversationId: expect.any(Function), setIsGenAiResponseLoading: expect.any(Function) }); }); @@ -258,7 +263,7 @@ describe('useChatMessages', () => { ); expect(result.current.messages[0]).not.toHaveProperty('rephrasedQuery'); }); -}); + it('should handle AI response with messageId', () => { const { result } = renderHook(() => useChatMessages(), { wrapper: createTestWrapper() @@ -314,3 +319,337 @@ describe('useChatMessages', () => { }) ); }); + + it('should handle streaming response initialization', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const streamingResponse: ChatResponse = { + data: 'Hello', + isStreaming: true, + messageId: 'stream-msg-123' + }; + + act(() => { + result.current.handleMessage(streamingResponse); + }); + + expect(result.current.isStreaming).toBe(true); + expect(result.current.streamingMessageId).toBe('stream-msg-123'); + expect(result.current.messages[0]).toEqual( + expect.objectContaining({ + content: 'Hello', + messageId: 'stream-msg-123' + }) + ); + }); + + it('should accumulate streaming chunks', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const responses: ChatResponse[] = [ + { data: 'Hello', isStreaming: true, messageId: 'stream-msg-123' }, + { data: ' world', isStreaming: true, messageId: 'stream-msg-123' }, + { data: '!', isStreaming: true, messageId: 'stream-msg-123' } + ]; + + act(() => { + responses.forEach(response => result.current.handleMessage(response)); + }); + + expect(result.current.isStreaming).toBe(true); + expect(result.current.messages[0]).toEqual( + expect.objectContaining({ + content: 'Hello world!', + messageId: 'stream-msg-123' + }) + ); + }); + + it('should complete streaming with END_CONVERSATION_TOKEN', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + act(() => { + result.current.handleMessage({ data: 'Hello', isStreaming: true, messageId: 'stream-msg-123' }); + }); + + act(() => { + result.current.handleMessage({ data: ' world', isStreaming: true, messageId: 'stream-msg-123' }); + }); + + act(() => { + result.current.handleMessage({ data: '##END_CONVERSATION##' }); + }); + + expect(result.current.isStreaming).toBe(false); + expect(result.current.streamingMessageId).toBeUndefined(); + expect(result.current.isGenAiResponseLoading).toBe(false); + expect(result.current.messages[0]).toEqual( + expect.objectContaining({ + content: 'Hello world', + avatarLoading: false + }) + ); + }); + + it('should complete streaming with streamComplete flag', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const responses: ChatResponse[] = [ + { data: 'Hello', isStreaming: true, messageId: 'stream-msg-123' }, + { data: ' world', isStreaming: true, messageId: 'stream-msg-123' }, + { streamComplete: true } + ]; + + act(() => { + responses.forEach(response => result.current.handleMessage(response)); + }); + + expect(result.current.isStreaming).toBe(false); + expect(result.current.streamingMessageId).toBeUndefined(); + expect(result.current.isGenAiResponseLoading).toBe(false); + }); + + it('should handle streaming errors gracefully', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const responses: ChatResponse[] = [ + { data: 'Hello', isStreaming: true, messageId: 'stream-msg-123' }, + { errorMessage: 'Streaming error occurred' } + ]; + + act(() => { + responses.forEach(response => result.current.handleMessage(response)); + }); + + expect(result.current.isStreaming).toBe(false); + expect(result.current.messages[result.current.messages.length - 1]).toEqual({ + type: 'alert', + header: 'Error', + content: 'Streaming error occurred' + }); + }); + + it('should handle non-streaming responses normally', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const response: ChatResponse = { + data: 'Hello world', + conversationId: 'conv-123', + messageId: 'msg-123' + }; + + act(() => { + result.current.handleMessage(response); + }); + + expect(result.current.isStreaming).toBe(false); + expect(result.current.messages[0]).toEqual( + expect.objectContaining({ + content: 'Hello world', + messageId: 'msg-123' + }) + ); + }); + +it('should filter out PROCESSING messages', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const response: ChatResponse = { + data: 'PROCESSING', + conversationId: 'conv-123' + }; + + act(() => { + result.current.handleMessage(response); + }); + + expect(result.current.messages).toHaveLength(0); + expect(result.current.currentResponse).toBe(''); + }); + + it('should filter out KEEP ALIVE messages', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const response: ChatResponse = { + data: 'KEEP ALIVE', + conversationId: 'conv-123' + }; + + act(() => { + result.current.handleMessage(response); + }); + + expect(result.current.messages).toHaveLength(0); + expect(result.current.currentResponse).toBe(''); + }); + + it('should filter out KEEP_ALIVE messages', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const response: ChatResponse = { + data: 'KEEP_ALIVE', + conversationId: 'conv-123' + }; + + act(() => { + result.current.handleMessage(response); + }); + + expect(result.current.messages).toHaveLength(0); + expect(result.current.currentResponse).toBe(''); + }); + + it('should filter out KEEPALIVE messages', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const response: ChatResponse = { + data: 'KEEPALIVE', + conversationId: 'conv-123' + }; + + act(() => { + result.current.handleMessage(response); + }); + + expect(result.current.messages).toHaveLength(0); + expect(result.current.currentResponse).toBe(''); + }); + + it('should filter system messages case-insensitively', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const testCases = ['processing', 'Processing', 'keep alive', 'Keep Alive', 'keep_alive']; + + testCases.forEach((testCase) => { + const response: ChatResponse = { + data: testCase, + conversationId: 'conv-123' + }; + + act(() => { + result.current.handleMessage(response); + }); + + expect(result.current.messages).toHaveLength(0); + expect(result.current.currentResponse).toBe(''); + }); + }); + + it('should filter system messages with whitespace', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const response: ChatResponse = { + data: ' PROCESSING ', + conversationId: 'conv-123' + }; + + act(() => { + result.current.handleMessage(response); + }); + + expect(result.current.messages).toHaveLength(0); + expect(result.current.currentResponse).toBe(''); + }); + + it('should not filter normal messages', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const response: ChatResponse = { + data: 'Hello, this is a normal message', + conversationId: 'conv-123' + }; + + act(() => { + result.current.handleMessage(response); + }); + + expect(result.current.currentResponse).toBe('Hello, this is a normal message'); + expect(result.current.messages).toHaveLength(1); + }); + + it('should not filter messages containing system keywords as part of larger text', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const response: ChatResponse = { + data: 'The system is PROCESSING your request', + conversationId: 'conv-123' + }; + + act(() => { + result.current.handleMessage(response); + }); + + expect(result.current.currentResponse).toBe('The system is PROCESSING your request'); + expect(result.current.messages).toHaveLength(1); + }); + + it('should handle empty message data', () => { + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const response: ChatResponse = { + data: '', + conversationId: 'conv-123' + }; + + act(() => { + result.current.handleMessage(response); + }); + + // Empty messages should not be filtered, but also won't add content + expect(result.current.currentResponse).toBe(''); + }); + + it('should return early when filtering system messages without processing other fields', () => { + const consoleSpy = vi.spyOn(console, 'debug').mockImplementation(() => {}); + const { result } = renderHook(() => useChatMessages(), { + wrapper: createTestWrapper() + }); + + const response: ChatResponse = { + data: 'PROCESSING', + conversationId: 'conv-123' + }; + + act(() => { + result.current.handleMessage(response); + }); + + // Message should be filtered, and we return early so conversationId is not set + expect(result.current.conversationId).toBe(''); + expect(result.current.messages).toHaveLength(0); + consoleSpy.mockRestore(); + }); + + // Thinking state is now managed as message metadata, not as separate hook state + // See IncomingMessage component for thinking indicator implementation +}); diff --git a/source/ui-chat/src/__tests__/models/response.test.ts b/source/ui-chat/src/__tests__/models/response.test.ts new file mode 100644 index 00000000..8ea0a8c8 --- /dev/null +++ b/source/ui-chat/src/__tests__/models/response.test.ts @@ -0,0 +1,154 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { describe, it, expect } from 'vitest'; +import { + ChatResponse, + AgentBuilderChatResponse, + ToolUsageInfo, + ThinkingState +} from '../../models/api/response'; + +describe('ChatResponse Extended Types', () => { + describe('ToolUsageInfo', () => { + it('should create a valid ToolUsageInfo object', () => { + const toolUsage: ToolUsageInfo = { + toolName: 'test-tool', + status: 'completed', + startTime: '2025-10-06T00:00:00Z', + endTime: '2025-10-06T00:00:05Z', + toolInput: { param: 'value' }, + toolOutput: 'result', + mcpServerName: 'test-server' + }; + + expect(toolUsage.toolName).toBe('test-tool'); + expect(toolUsage.status).toBe('completed'); + expect(toolUsage.mcpServerName).toBe('test-server'); + }); + + it('should support all status types', () => { + const statuses: ToolUsageInfo['status'][] = ['started', 'in_progress', 'completed', 'failed']; + statuses.forEach((status) => { + const toolUsage: ToolUsageInfo = { + toolName: 'test', + status, + startTime: '2025-10-06T00:00:00Z' + }; + expect(toolUsage.status).toBe(status); + }); + }); + }); + + describe('ThinkingState', () => { + it('should create a valid ThinkingState object', () => { + const thinking: ThinkingState = { + isThinking: true, + thinkingMessage: 'Analyzing your request...', + startTime: '2025-10-06T00:00:00Z' + }; + + expect(thinking.isThinking).toBe(true); + expect(thinking.startTime).toBe('2025-10-06T00:00:00Z'); + }); + }); + + describe('ChatResponse with extended fields', () => { + it('should create a ChatResponse with streaming fields', () => { + const response: ChatResponse = { + data: 'partial response', + conversationId: 'conv-123', + messageId: 'msg-123', + isStreaming: true, + streamComplete: false + }; + + expect(response.isStreaming).toBe(true); + expect(response.streamComplete).toBe(false); + }); + + it('should create a ChatResponse with toolUsage', () => { + const response: ChatResponse = { + data: 'response', + conversationId: 'conv-123', + toolUsage: { + toolName: 'search', + status: 'completed', + startTime: '2025-10-06T00:00:00Z' + } + }; + + expect(response.toolUsage?.toolName).toBe('search'); + expect(response.toolUsage?.status).toBe('completed'); + }); + + it('should create a ChatResponse with thinking state', () => { + const response: ChatResponse = { + conversationId: 'conv-123', + thinking: { + isThinking: true, + startTime: '2025-10-06T00:00:00Z' + } + }; + + expect(response.thinking?.isThinking).toBe(true); + expect(response.thinking?.startTime).toBe('2025-10-06T00:00:00Z'); + }); + + it('should create a ChatResponse with all advanced features', () => { + const response: ChatResponse = { + data: 'comprehensive response', + conversationId: 'conv-123', + messageId: 'msg-123', + isStreaming: false, + streamComplete: true, + toolUsage: { + toolName: 'calculator', + status: 'completed', + startTime: '2025-10-06T00:00:00Z', + endTime: '2025-10-06T00:00:02Z' + }, + thinking: { + isThinking: false, + startTime: '2025-10-06T00:00:00Z' + } + }; + + expect(response.data).toBe('comprehensive response'); + expect(response.streamComplete).toBe(true); + expect(response.toolUsage?.status).toBe('completed'); + expect(response.thinking?.isThinking).toBe(false); + }); + }); + + describe('AgentBuilderChatResponse', () => { + it('should create a valid AgentBuilderChatResponse', () => { + const response: AgentBuilderChatResponse = { + data: 'agent response', + conversationId: 'conv-123', + messageId: 'msg-123', + isStreaming: true, + toolUsage: { + toolName: 'mcp-tool', + status: 'in_progress', + startTime: '2025-10-06T00:00:00Z', + mcpServerName: 'my-mcp-server' + } + }; + + expect(response.data).toBe('agent response'); + expect(response.isStreaming).toBe(true); + expect(response.toolUsage?.mcpServerName).toBe('my-mcp-server'); + }); + + it('should be compatible with ChatResponse', () => { + const agentResponse: AgentBuilderChatResponse = { + data: 'test', + conversationId: 'conv-123' + }; + + const chatResponse: ChatResponse = agentResponse; + expect(chatResponse.data).toBe('test'); + }); + }); +}); diff --git a/source/ui-chat/src/__tests__/pages/chat/ChatPage.test.tsx b/source/ui-chat/src/__tests__/pages/chat/ChatPage.test.tsx index 899009f7..163029ee 100644 --- a/source/ui-chat/src/__tests__/pages/chat/ChatPage.test.tsx +++ b/source/ui-chat/src/__tests__/pages/chat/ChatPage.test.tsx @@ -185,7 +185,7 @@ describe('ChatPage', () => { // Verify the message was sent await waitFor(() => { - expect(addUserMessageMock).toHaveBeenCalledWith('Hello, this is a test message'); + expect(addUserMessageMock).toHaveBeenCalledWith('Hello, this is a test message', undefined); expect(sendJsonMessageMock).toHaveBeenCalled(); }); }); @@ -352,16 +352,18 @@ describe('ChatPage', () => { userContextValue: errorUserContext }); - expect(store.getState().notifications.notifications).toEqual( - expect.arrayContaining([ - expect.objectContaining({ - 'content': 'Failed to fetch data. Please try again or contact a system administrator.', - 'header': 'Error', - 'id': 'data-fetch-error', - 'type': 'error' - }) - ]) - ); + await waitFor(() => { + expect(store.getState().notifications.notifications).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + 'content': 'Failed to load deployment', + 'header': 'Error', + 'id': 'data-fetch-error', + 'type': 'error' + }) + ]) + ); + }); expect(screen.queryByTestId('chat-content-layout')).not.toBeInTheDocument(); }); diff --git a/source/ui-chat/src/__tests__/pages/chat/components/input/ChatInput.test.tsx b/source/ui-chat/src/__tests__/pages/chat/components/input/ChatInput.test.tsx index 320313ed..b9d45e6e 100644 --- a/source/ui-chat/src/__tests__/pages/chat/components/input/ChatInput.test.tsx +++ b/source/ui-chat/src/__tests__/pages/chat/components/input/ChatInput.test.tsx @@ -1,13 +1,123 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -import { describe, expect, vi } from 'vitest'; +import { describe, expect, vi, beforeEach, test } from 'vitest'; +import { waitFor } from '@testing-library/react'; +import * as React from 'react'; import createWrapper from '@cloudscape-design/components/test-utils/dom'; import { TestStoreFactory, testStoreFactory } from '@/__tests__/utils/test-redux-store-factory'; import { ChatInput } from '@/pages/chat/components/input/ChatInput'; import { CONSTRAINT_TEXT_ERROR_COLOR, DOCS_LINKS } from '@/utils/constants'; import { DEFAULT_AGENT_CONFIG, DEFAULT_TEXT_CONFIG } from '@/__tests__/utils/test-configs'; +vi.mock('@cloudscape-design/components', async () => { + const actual = await vi.importActual('@cloudscape-design/components'); + return { + ...actual, + FileInput: vi.fn(({ onChange, ...props }: any) => { + const handleChange = (files: File[]) => { + if (onChange) { + onChange({ detail: { value: files } }); + } + }; + + (global as any).__mockFileInputChange = handleChange; + + return React.createElement( + 'div', + { + className: 'awsui-file-input', + 'data-testid': 'file-input-wrapper' + }, + [ + React.createElement('input', { + key: 'native-input', + ...props, + type: 'file', + 'data-testid': 'mock-file-input', + className: 'awsui-file-input-native', + onChange: () => {} + }) + ] + ); + }) + }; +}); + +vi.mock('@/services/fileUploadService', () => ({ + uploadFiles: vi.fn().mockImplementation((files: File[]) => + Promise.resolve({ + results: files.map((file) => ({ + success: true, + fileName: file.name, + fileKey: `test-key-${file.name}`, + error: null, + attempts: 1 + })), + allSuccessful: true, + successCount: files.length, + failureCount: 0, + uploadedFiles: files.map((file) => ({ + key: `test-key-${file.name}`, + fileName: file.name, + fileContentType: file.type, + fileExtension: file.name.split('.').pop() || '', + fileSize: file.size, + messageId: 'test-message-id', + conversationId: 'test-conversation-id' + })), + messageId: 'test-message-id' + }) + ), + deleteFiles: vi.fn().mockResolvedValue({ + results: [], + allSuccessful: true, + successCount: 0, + failureCount: 0 + }) +})); + +// Mock the file upload utilities +vi.mock('@/utils/file-upload', () => ({ + validateFile: vi.fn(() => null), // Return null for valid files + validateFiles: vi.fn(() => []), // Return empty array for no errors + generateConversationId: vi.fn(() => 'test-conversation-id'), + isFileCountExceeded: vi.fn(() => ({ exceeded: false })) // Return no count exceeded by default +})); + +// Mock the useFileUpload hook +vi.mock('@/hooks/use-file-upload', () => ({ + useFileUpload: vi.fn(() => ({ + files: [], + uploadedFiles: [], + isUploading: false, + isDeleting: false, + uploadProgress: {}, + uploadErrors: {}, + deleteErrors: {}, + addFiles: vi.fn(), + removeFile: vi.fn(), + clearFiles: vi.fn(), + uploadFiles: vi.fn(), + deleteUploadedFiles: vi.fn(), + generateConversationId: vi.fn(() => 'test-conversation-id'), + generateMessageId: vi.fn(() => 'test-message-id') + })) +})); + +vi.mock('@/contexts/UserContext', () => ({ + useUser: vi.fn(() => ({ + getAccessToken: vi.fn(() => Promise.resolve('mock-token')) + })) +})); + +const simulateFileSelection = (fileInput: any, files: File[]) => { + const changeHandler = (global as any).__mockFileInputChange; + if (changeHandler) { + changeHandler(files); + } +}; + describe('ChatInput', () => { test('renders prompt input with correct default props', () => { const onSend = vi.fn(); @@ -220,15 +330,406 @@ describe('ChatInput', () => { expect(constraint?.getElement().textContent).toContain('0/10k characters.'); }); - test('calls onSend with input value when action button is clicked', () => { - const onSend = vi.fn(); - const { container } = testStoreFactory.renderWithStore(); - const wrapper = createWrapper(container); + describe('Multimodal File Upload', () => { + const multimodalConfig = { + ...DEFAULT_AGENT_CONFIG, + UseCaseConfig: { + UseCaseName: 'test-agent-builder-use-case', + UseCaseType: 'AgentBuilder' as const, + LlmParams: { + RAGEnabled: false, + MultimodalParams: { + MultimodalEnabled: true + } + }, + FeedbackParams: { + FeedbackEnabled: true + } + } + }; - const promptInput = wrapper.findPromptInput(); - promptInput?.setTextareaValue('test message'); - promptInput?.findActionButton()?.click(); + beforeEach(() => { + vi.clearAllMocks(); + }); - expect(onSend).toHaveBeenCalledWith('test message'); + test('shows file input when multimodal is enabled', () => { + const onSend = vi.fn(); + const { container } = testStoreFactory.renderWithStore(, { + config: { runtimeConfig: multimodalConfig } + }); + const wrapper = createWrapper(container); + + const fileInput = wrapper.find('[data-testid="file-input-wrapper"]'); + expect(fileInput).toBeTruthy(); + const element = fileInput?.getElement(); + expect(element).toBeTruthy(); + }); + + test('does not show file input when multimodal is disabled', () => { + const onSend = vi.fn(); + const { container } = testStoreFactory.renderWithStore(); + const wrapper = createWrapper(container); + + const fileInput = wrapper.findFileInput(); + expect(fileInput).toBeFalsy(); + }); + + test('generates conversation ID when files are added', async () => { + const onSend = vi.fn(); + const onSetConversationId = vi.fn(); + const { uploadFiles } = await import('@/services/fileUploadService'); + const { validateFile } = await import('@/utils/file-upload'); + + vi.mocked(validateFile).mockReturnValue(null); + vi.mocked(uploadFiles).mockResolvedValue({ + results: [], + allSuccessful: true, + successCount: 1, + failureCount: 0, + uploadedFiles: [ + { + key: 'test-key', + fileName: 'test.jpg', + fileContentType: 'image/jpeg', + fileExtension: 'jpg', + fileSize: 1024 + } + ], + messageId: 'test-message-id' + }); + + const { container } = testStoreFactory.renderWithStore( + , + { config: { runtimeConfig: multimodalConfig } } + ); + const wrapper = createWrapper(container); + + const fileInput = wrapper.findFileInput(); + const testFile = new File(['test'], 'test.jpg', { type: 'image/jpeg' }); + + simulateFileSelection(fileInput, [testFile]); + + await waitFor(() => { + expect(onSetConversationId).toHaveBeenCalledWith('test-conversation-id'); + }); + }); + + test('displays file validation errors', async () => { + const onSend = vi.fn(); + const { validateFile } = await import('@/utils/file-upload'); + + vi.mocked(validateFile).mockReturnValue({ + fileName: 'invalid.txt', + error: new Error('File type not supported') + }); + + const { container } = testStoreFactory.renderWithStore(, { + config: { runtimeConfig: multimodalConfig } + }); + const wrapper = createWrapper(container); + + const fileInput = wrapper.findFileInput(); + const testFile = new File(['test'], 'invalid.txt', { type: 'text/plain' }); + + simulateFileSelection(fileInput, [testFile]); + + await waitFor(() => { + const fileTokenGroup = wrapper.find('[role="group"]'); + expect(fileTokenGroup).toBeTruthy(); + }); + }); + + test('shows dismiss button for uploaded files', async () => { + const onSend = vi.fn(); + const { validateFile } = await import('@/utils/file-upload'); + + vi.mocked(validateFile).mockReturnValue(null); + + const { container } = testStoreFactory.renderWithStore( + , + { config: { runtimeConfig: multimodalConfig } } + ); + const wrapper = createWrapper(container); + + const fileInput = wrapper.findFileInput(); + const testFile = new File(['test'], 'test.jpg', { type: 'image/jpeg' }); + + simulateFileSelection(fileInput, [testFile]); + + await waitFor(() => { + const fileTokens = container.querySelectorAll('[role="group"]'); + expect(fileTokens.length).toBeGreaterThan(0); + }); + + const dismissButton = wrapper.find('[aria-label*="Remove file"]'); + expect(dismissButton).toBeTruthy(); + }); + + test('shows file count in constraint text when files are present', () => { + const onSend = vi.fn(); + const { container } = testStoreFactory.renderWithStore(, { + config: { runtimeConfig: multimodalConfig } + }); + const wrapper = createWrapper(container); + + const formField = wrapper.findFormField(); + const constraint = formField?.findConstraint(); + expect(constraint).toBeTruthy(); + }); + + test('changes placeholder text when files are present', () => { + const onSend = vi.fn(); + const { container } = testStoreFactory.renderWithStore(, { + config: { runtimeConfig: multimodalConfig } + }); + const wrapper = createWrapper(container); + + const promptInput = wrapper.findPromptInput(); + expect(promptInput?.findNativeTextarea()?.getElement()).toHaveAttribute('placeholder', 'Ask a question'); + }); + + test('handles file selection and shows file tokens', async () => { + const onSend = vi.fn(); + const onSetConversationId = vi.fn(); + const validateFile = vi.mocked((await import('@/utils/file-upload')).validateFile); + const validateFiles = vi.mocked((await import('@/utils/file-upload')).validateFiles); + + validateFile.mockReturnValue(null); + validateFiles.mockReturnValue([]); + + const { container } = testStoreFactory.renderWithStore( + , + { config: { runtimeConfig: multimodalConfig } } + ); + const wrapper = createWrapper(container); + + const fileInput = wrapper.findFileInput(); + + // Upload file + const testFile = new File(['test content'], 'document.txt', { type: 'text/plain' }); + simulateFileSelection(fileInput, [testFile]); + + // Verify file is added to the UI + await waitFor(() => { + const fileTokens = container.querySelectorAll('[role="group"]'); + expect(fileTokens.length).toBeGreaterThan(0); + }); + + // Verify conversation ID is generated + expect(onSetConversationId).toHaveBeenCalledWith('test-conversation-id'); + }); + + test('shows dropzone when files are being dragged', async () => { + const onSend = vi.fn(); + const onSetConversationId = vi.fn(); + + const { container } = testStoreFactory.renderWithStore( + , + { config: { runtimeConfig: multimodalConfig } } + ); + + const fileInput = container.querySelector('[data-testid="file-input-wrapper"]'); + expect(fileInput).toBeTruthy(); + + const chatInput = container.querySelector('[data-testid="chat-input"]'); + expect(chatInput).toBeTruthy(); + }); + + test('shows error state for failed file uploads', async () => { + const onSend = vi.fn(); + const onSetConversationId = vi.fn(); + const { validateFile } = await import('@/utils/file-upload'); + + vi.mocked(validateFile).mockReturnValue({ + fileName: 'failed-file.txt', + error: new Error('Upload failed') + }); + + const { container } = testStoreFactory.renderWithStore( + , + { config: { runtimeConfig: multimodalConfig } } + ); + const wrapper = createWrapper(container); + + const fileInput = wrapper.findFileInput(); + const testFile = new File(['test content'], 'failed-file.txt', { type: 'text/plain' }); + + simulateFileSelection(fileInput, [testFile]); + + await waitFor(() => { + const fileTokens = container.querySelectorAll('[role="group"]'); + expect(fileTokens.length).toBeGreaterThan(0); + }); + + const errorElements = container.querySelectorAll('[class*="error"]'); + expect(errorElements.length).toBeGreaterThan(0); + }); + + test('replaces files with same name when uploading', async () => { + const onSend = vi.fn(); + const onSetConversationId = vi.fn(); + const { uploadFiles } = await import('@/services/fileUploadService'); + const { validateFile } = await import('@/utils/file-upload'); + + vi.mocked(validateFile).mockReturnValue(null); + + vi.mocked(uploadFiles).mockResolvedValueOnce({ + results: [{ success: true, fileName: 'document.txt', fileKey: 'test-key-1', error: null, attempts: 1 }], + allSuccessful: true, + successCount: 1, + failureCount: 0, + uploadedFiles: [ + { + key: 'test-key-1', + fileName: 'document.txt', + fileContentType: 'text/plain', + fileExtension: 'txt', + fileSize: 1024, + messageId: 'test-message-id-1' + } + ], + messageId: 'test-message-id-1' + }); + + // second upload (replacement) + vi.mocked(uploadFiles).mockResolvedValueOnce({ + results: [{ success: true, fileName: 'document.txt', fileKey: 'test-key-2', error: null, attempts: 1 }], + allSuccessful: true, + successCount: 1, + failureCount: 0, + uploadedFiles: [ + { + key: 'test-key-2', + fileName: 'document.txt', + fileContentType: 'text/plain', + fileExtension: 'txt', + fileSize: 2048, + messageId: 'test-message-id-2' + } + ], + messageId: 'test-message-id-2' + }); + + const { container } = testStoreFactory.renderWithStore( + , + { config: { runtimeConfig: multimodalConfig } } + ); + const wrapper = createWrapper(container); + + const fileInput = wrapper.findFileInput(); + + const originalFile = new File(['original content'], 'document.txt', { type: 'text/plain' }); + simulateFileSelection(fileInput, [originalFile]); + + await waitFor(() => { + const fileTokens = container.querySelectorAll('[role="group"]'); + expect(fileTokens.length).toBeGreaterThan(0); + }); + + // Upload replacement file with same name + const replacementFile = new File(['new content'], 'document.txt', { type: 'text/plain' }); + simulateFileSelection(fileInput, [replacementFile]); + + // File should still be present in UI + await waitFor(() => { + const fileTokens = container.querySelectorAll('[role="group"]'); + expect(fileTokens.length).toBeGreaterThan(0); + }); + }); + + test('does not show file input when multimodal is disabled', async () => { + const onSend = vi.fn(); + + const nonMultimodalConfig = { + ...DEFAULT_TEXT_CONFIG, + UseCaseConfig: { + ...DEFAULT_TEXT_CONFIG.UseCaseConfig, + LlmParams: { + ...DEFAULT_TEXT_CONFIG.UseCaseConfig.LlmParams, + MultimodalParams: { + MultimodalEnabled: false + } + } + } + }; + + const { container } = testStoreFactory.renderWithStore( + , + { config: { runtimeConfig: nonMultimodalConfig } } + ); + const wrapper = createWrapper(container); + + const fileInput = wrapper.find('[data-testid="file-input-wrapper"]'); + expect(fileInput).toBeNull(); + + const textarea = wrapper.find('textarea'); + expect(textarea).toBeTruthy(); + expect(textarea?.getElement().getAttribute('placeholder')).toBe('Ask a question'); + }); + + test('should sort files with errors first in the UI', async () => { + const onSend = vi.fn(); + const onSetConversationId = vi.fn(); + const { validateFile } = await import('@/utils/file-upload'); + + vi.mocked(validateFile) + .mockReturnValueOnce(null) + .mockReturnValueOnce({ + fileName: 'error-file.txt', + error: new Error('Invalid file type') + }) + .mockReturnValueOnce(null); + + const { container } = testStoreFactory.renderWithStore( + , + { config: { runtimeConfig: multimodalConfig } } + ); + const wrapper = createWrapper(container); + + const fileInput = wrapper.findFileInput(); + const testFiles = [ + new File(['content1'], 'valid-file.txt', { type: 'text/plain' }), + new File(['content2'], 'error-file.txt', { type: 'text/plain' }), + new File(['content3'], 'another-valid.txt', { type: 'text/plain' }) + ]; + + simulateFileSelection(fileInput, testFiles); + + await waitFor(() => { + const fileTokens = container.querySelectorAll('[role="group"]'); + expect(fileTokens.length).toBeGreaterThan(0); + }); + + const fileElements = container.querySelectorAll('[role="group"]'); + expect(fileElements.length).toBeGreaterThanOrEqual(3); + }); + + describe('Global Count Validation', () => { + test('should validate file count limits', async () => { + const { isFileCountExceeded } = await import('@/utils/file-upload'); + + const validFiles = [ + new File(['content1'], 'doc1.pdf', { type: 'application/pdf' }), + new File(['content2'], 'image1.jpg', { type: 'image/jpeg' }) + ]; + + vi.mocked(isFileCountExceeded).mockReturnValueOnce({ exceeded: false }); + const withinLimits = isFileCountExceeded(validFiles); + expect(withinLimits.exceeded).toBe(false); + + const tooManyFiles = Array.from( + { length: 10 }, + (_, i) => new File([`content${i}`], `image${i}.jpg`, { type: 'image/jpeg' }) + ); + + vi.mocked(isFileCountExceeded).mockReturnValueOnce({ + exceeded: true, + message: '10 images attached. Only 5 images allowed.' + }); + const exceedsLimits = isFileCountExceeded(tooManyFiles); + expect(exceedsLimits.exceeded).toBe(true); + expect(exceedsLimits.message).toBeDefined(); + }); + }); }); }); diff --git a/source/ui-chat/src/__tests__/pages/chat/components/messages/IncomingMessage.test.tsx b/source/ui-chat/src/__tests__/pages/chat/components/messages/IncomingMessage.test.tsx index cebef4ae..5adb7ec6 100644 --- a/source/ui-chat/src/__tests__/pages/chat/components/messages/IncomingMessage.test.tsx +++ b/source/ui-chat/src/__tests__/pages/chat/components/messages/IncomingMessage.test.tsx @@ -262,3 +262,234 @@ it('resets feedback form when messageId changes', () => { expect(customMockUseFeedback.setShowFeedbackForm).toHaveBeenCalledWith(false); }); + +describe('ThinkingIndicator rendering', () => { + const mockUseFeedback = { + showFeedbackForm: false, + setShowFeedbackForm: vi.fn(), + feedbackType: 'helpful' as const, + setFeedbackType: vi.fn(), + feedbackSubmitted: false, + feedbackError: null, + isSubmittingFeedback: false, + handleFeedbackButtonClick: vi.fn(), + handleFeedbackSubmit: vi.fn() + }; + + beforeEach(() => { + vi.spyOn(useFeedbackModule, 'useFeedback').mockReturnValue(mockUseFeedback); + }); + + it('renders thinking indicator for AgentBuilder messages with thinking metadata', () => { + const messageWithThinking = { + type: 'chat-bubble' as const, + authorId: 'assistant-1', + content: 'Response content', + timestamp: '2024-01-01T12:00:00Z', + thinking: { + duration: 5, + type: 'analyzing' as const, + startTime: '2024-01-01T12:00:00Z', + endTime: '2024-01-01T12:00:05Z', + strippedContent: 'Thinking content here' + } + }; + + const mockAuthor = { + type: 'assistant' as const, + name: 'AI Assistant' + }; + + const props = { + message: messageWithThinking, + author: mockAuthor, + showActions: true, + conversationId: 'test-id' + }; + + // Set use case type to AgentBuilder using proper config structure + testStoreFactory.renderWithStore(, { + config: { + runtimeConfig: { + IsInternalUser: 'false', + ModelProviderName: 'Bedrock', + UserPoolId: 'test-pool', + SocketRoutes: [], + UserPoolClientId: 'test-client', + CognitoRedirectUrl: 'http://localhost', + ApiEndpoint: 'http://localhost', + SocketURL: 'ws://localhost', + AwsRegion: 'us-east-1', + CognitoDomain: 'test-domain', + UseCaseConfigKey: 'test-key', + UseCaseId: 'test-id', + RestApiEndpoint: 'http://localhost', + UseCaseConfig: { + UseCaseType: 'AgentBuilder', + UseCaseName: 'test-agent' + } as any + } + } + }); + + expect(screen.getByTestId('message-thinking-indicator')).toBeInTheDocument(); + expect(screen.getByText(/Thought for/)).toBeInTheDocument(); + }); + + it('renders thinking indicator for Workflow messages with thinking metadata', () => { + const messageWithThinking = { + type: 'chat-bubble' as const, + authorId: 'assistant-1', + content: 'Response content', + timestamp: '2024-01-01T12:00:00Z', + thinking: { + duration: 5, + startTime: '2024-01-01T12:00:00Z', + endTime: '2024-01-01T12:00:05Z', + strippedContent: 'Thinking content here' + } + }; + + const mockAuthor = { + type: 'assistant' as const, + name: 'AI Assistant' + }; + + const props = { + message: messageWithThinking, + author: mockAuthor, + showActions: true, + conversationId: 'test-id' + }; + + // Set use case type to Workflow using proper config structure + testStoreFactory.renderWithStore(, { + config: { + runtimeConfig: { + IsInternalUser: 'false', + ModelProviderName: 'Bedrock', + UserPoolId: 'test-pool', + SocketRoutes: [], + UserPoolClientId: 'test-client', + CognitoRedirectUrl: 'http://localhost', + ApiEndpoint: 'http://localhost', + SocketURL: 'ws://localhost', + AwsRegion: 'us-east-1', + CognitoDomain: 'test-domain', + UseCaseConfigKey: 'test-key', + UseCaseId: 'test-id', + RestApiEndpoint: 'http://localhost', + UseCaseConfig: { + UseCaseType: 'Workflow', + UseCaseName: 'test-workflow' + } as any + } + } + }); + + expect(screen.getByTestId('message-thinking-indicator')).toBeInTheDocument(); + expect(screen.getByText(/Thought for/)).toBeInTheDocument(); + }); + + it('does not render thinking indicator for Text use case', () => { + const messageWithThinking = { + type: 'chat-bubble' as const, + authorId: 'assistant-1', + content: 'Response content', + timestamp: '2024-01-01T12:00:00Z', + thinking: { + duration: 5, + type: 'analyzing' as const, + startTime: '2024-01-01T12:00:00Z', + endTime: '2024-01-01T12:00:05Z' + } + }; + + const mockAuthor = { + type: 'assistant' as const, + name: 'AI Assistant' + }; + + const props = { + message: messageWithThinking, + author: mockAuthor, + showActions: true, + conversationId: 'test-id' + }; + + // Set use case type to Text using proper config structure + testStoreFactory.renderWithStore(, { + config: { + runtimeConfig: { + IsInternalUser: 'false', + ModelProviderName: 'Bedrock', + UserPoolId: 'test-pool', + SocketRoutes: [], + UserPoolClientId: 'test-client', + CognitoRedirectUrl: 'http://localhost', + ApiEndpoint: 'http://localhost', + SocketURL: 'ws://localhost', + AwsRegion: 'us-east-1', + CognitoDomain: 'test-domain', + UseCaseConfigKey: 'test-key', + UseCaseId: 'test-id', + RestApiEndpoint: 'http://localhost', + UseCaseConfig: { + UseCaseType: 'Text', + UseCaseName: 'test-text' + } as any + } + } + }); + + expect(screen.queryByTestId('message-thinking-indicator')).not.toBeInTheDocument(); + }); + + it('does not render thinking indicator when message has no thinking metadata', () => { + const messageWithoutThinking = { + type: 'chat-bubble' as const, + authorId: 'assistant-1', + content: 'Response content', + timestamp: '2024-01-01T12:00:00Z' + }; + + const mockAuthor = { + type: 'assistant' as const, + name: 'AI Assistant' + }; + + const props = { + message: messageWithoutThinking, + author: mockAuthor, + showActions: true, + conversationId: 'test-id' + }; + + // Set use case type to AgentBuilder using proper config structure + testStoreFactory.renderWithStore(, { + config: { + runtimeConfig: { + IsInternalUser: 'false', + ModelProviderName: 'Bedrock', + UserPoolId: 'test-pool', + SocketRoutes: [], + UserPoolClientId: 'test-client', + CognitoRedirectUrl: 'http://localhost', + ApiEndpoint: 'http://localhost', + SocketURL: 'ws://localhost', + AwsRegion: 'us-east-1', + CognitoDomain: 'test-domain', + UseCaseConfigKey: 'test-key', + UseCaseId: 'test-id', + RestApiEndpoint: 'http://localhost', + UseCaseConfig: { + UseCaseType: 'AgentBuilder', + UseCaseName: 'test-agent' + } as any + } + } + }); + + expect(screen.queryByTestId('message-thinking-indicator')).not.toBeInTheDocument(); + }); +}); diff --git a/source/ui-chat/src/__tests__/pages/chat/components/messages/OutgoingMessage.test.tsx b/source/ui-chat/src/__tests__/pages/chat/components/messages/OutgoingMessage.test.tsx index f3f191b1..c48ba4b9 100644 --- a/source/ui-chat/src/__tests__/pages/chat/components/messages/OutgoingMessage.test.tsx +++ b/source/ui-chat/src/__tests__/pages/chat/components/messages/OutgoingMessage.test.tsx @@ -3,13 +3,46 @@ import '@cloudscape-design/chat-components/test-utils/dom'; -import { describe, it, expect, vi } from 'vitest'; +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; import { fireEvent, render, screen } from '@testing-library/react'; import createWrapper from '@cloudscape-design/components/test-utils/dom'; import { ChatBubbleMessage } from '../../../../../pages/chat/types'; import { OutgoingMessageProps } from '../../../../../pages/chat/components/messages/types'; import { OutgoingMessage } from '../../../../../pages/chat/components/messages/OutgoingMessage'; +import { UploadedFile } from '../../../../../types/file-upload'; import { act } from 'react'; +import { UserProvider } from '../../../../../contexts/UserContext'; + +vi.mock('../../../../../contexts/UserContext', async () => { + const actual = await vi.importActual('../../../../../contexts/UserContext'); + return { + ...actual, + useUser: vi.fn(() => ({ + getAccessToken: vi.fn(() => Promise.resolve('mock-token')) + })), + UserProvider: ({ children }: { children: React.ReactNode }) => children + }; +}); + +vi.mock('@reduxjs/toolkit/query', () => ({ + createApi: vi.fn(), + fetchBaseQuery: vi.fn() +})); + +vi.mock('../../../../../store/solutionApi', () => ({ + useGetDeploymentQuery: vi.fn(() => ({ data: null, error: null })), + useLazyGetFileDownloadUrlQuery: () => [vi.fn(), {}] +})); + +vi.mock('react-redux', () => ({ + useDispatch: vi.fn(() => vi.fn()), + useSelector: vi.fn(() => ({})) +})); + +vi.mock('@aws-amplify/auth', () => ({ + getCurrentUser: vi.fn(() => Promise.resolve({ userId: 'test-user', username: 'testuser' })), + fetchUserAttributes: vi.fn(() => Promise.resolve({ name: 'Test User', email: 'test@example.com' })) +})); const customScreen = { ...screen, @@ -245,4 +278,75 @@ describe('OutgoingMessage', () => { vi.restoreAllMocks(); }); }); + + describe('FileDisplay', () => { + const mockFiles: UploadedFile[] = [ + { + key: 'file-key-1', + fileName: 'document.pdf', + fileContentType: 'application/pdf', + fileExtension: 'pdf', + fileSize: 1024000 + } + ]; + + it('renders FileDisplay component when files are present', () => { + const messageWithFiles: ChatBubbleMessage = { + ...mockMessage, + files: mockFiles + }; + + const propsWithFiles: OutgoingMessageProps = { + ...mockProps, + message: messageWithFiles + }; + + const { container } = render( + + + + ); + const fileDisplay = container.querySelector('[data-testid="file-display"]'); + expect(fileDisplay).toBeInTheDocument(); + }); + + it('does not render FileDisplay when no files are present', () => { + const { container } = render( + + + + ); + const fileDisplay = container.querySelector('[data-testid="file-display"]'); + expect(fileDisplay).not.toBeInTheDocument(); + }); + + it('renders files above message content in correct order', () => { + const messageWithFiles: ChatBubbleMessage = { + ...mockMessage, + content: 'Message with files attached', + files: mockFiles + }; + + const propsWithFiles: OutgoingMessageProps = { + ...mockProps, + message: messageWithFiles + }; + + const { container } = render( + + + + ); + + const fileDisplay = container.querySelector('[data-testid="file-display"]'); + const messageContent = container.querySelector('.outgoing-message__content-wrapper'); + + expect(fileDisplay).toBeInTheDocument(); + expect(messageContent).toBeInTheDocument(); + + if (fileDisplay && messageContent) { + expect(fileDisplay.compareDocumentPosition(messageContent)).toBe(Node.DOCUMENT_POSITION_FOLLOWING); + } + }); + }); }); diff --git a/source/ui-chat/src/__tests__/pages/chat/types.test.ts b/source/ui-chat/src/__tests__/pages/chat/types.test.ts new file mode 100644 index 00000000..6243cf2b --- /dev/null +++ b/source/ui-chat/src/__tests__/pages/chat/types.test.ts @@ -0,0 +1,72 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { ChatActionTypes, ChatActionType } from '../../../pages/chat/types'; + +describe('ChatActionTypes', () => { + it('should include all existing action types', () => { + expect(ChatActionTypes.ADD_USER_MESSAGE).toBe('ADD_USER_MESSAGE'); + expect(ChatActionTypes.UPDATE_AI_RESPONSE).toBe('UPDATE_AI_RESPONSE'); + expect(ChatActionTypes.COMPLETE_AI_RESPONSE).toBe('COMPLETE_AI_RESPONSE'); + expect(ChatActionTypes.SET_CONVERSATION_ID).toBe('SET_CONVERSATION_ID'); + expect(ChatActionTypes.ADD_SOURCE_DOCUMENT).toBe('ADD_SOURCE_DOCUMENT'); + expect(ChatActionTypes.SET_ERROR).toBe('SET_ERROR'); + expect(ChatActionTypes.SET_MESSAGES).toBe('SET_MESSAGES'); + expect(ChatActionTypes.ADD_REPHRASED_QUERY).toBe('ADD_REPHRASED_QUERY'); + expect(ChatActionTypes.RESET_CHAT).toBe('RESET_CHAT'); + }); + + it('should include streaming action types', () => { + expect(ChatActionTypes.START_STREAMING).toBe('START_STREAMING'); + expect(ChatActionTypes.UPDATE_STREAMING_CHUNK).toBe('UPDATE_STREAMING_CHUNK'); + expect(ChatActionTypes.COMPLETE_STREAMING).toBe('COMPLETE_STREAMING'); + }); + + it('should include tool usage action types', () => { + expect(ChatActionTypes.UPDATE_TOOL_USAGE).toBe('UPDATE_TOOL_USAGE'); + expect(ChatActionTypes.ADD_TOOL_USAGE).toBe('ADD_TOOL_USAGE'); + expect(ChatActionTypes.CLEAR_TOOL_USAGE).toBe('CLEAR_TOOL_USAGE'); + }); + + // Thinking is now managed as message metadata, not as separate actions + + it('should have ChatActionType union type that includes all action types', () => { + // This is a compile-time check - if this compiles, the union type is correct + const testActionType: ChatActionType = ChatActionTypes.START_STREAMING; + expect(testActionType).toBe('START_STREAMING'); + + const testActionType2: ChatActionType = ChatActionTypes.ADD_TOOL_USAGE; + expect(testActionType2).toBe('ADD_TOOL_USAGE'); + + const testActionType3: ChatActionType = ChatActionTypes.CLEAR_TOOL_USAGE; + expect(testActionType3).toBe('CLEAR_TOOL_USAGE'); + }); + + it('should have all action types as const values', () => { + // Verify that the object is readonly + const actionTypes = ChatActionTypes; + expect(Object.isFrozen(actionTypes)).toBe(false); // as const doesn't freeze, but makes readonly + + // Verify all keys exist + const expectedKeys = [ + 'ADD_USER_MESSAGE', + 'UPDATE_AI_RESPONSE', + 'COMPLETE_AI_RESPONSE', + 'SET_CONVERSATION_ID', + 'ADD_SOURCE_DOCUMENT', + 'SET_ERROR', + 'SET_MESSAGES', + 'ADD_REPHRASED_QUERY', + 'RESET_CHAT', + 'START_STREAMING', + 'UPDATE_STREAMING_CHUNK', + 'COMPLETE_STREAMING', + 'UPDATE_TOOL_USAGE', + 'ADD_TOOL_USAGE', + 'CLEAR_TOOL_USAGE' + ]; + + const actualKeys = Object.keys(actionTypes); + expect(actualKeys).toEqual(expectedKeys); + }); +}); diff --git a/source/ui-chat/src/__tests__/reducers/chat-reducer.test.ts b/source/ui-chat/src/__tests__/reducers/chat-reducer.test.ts index 90e7fafb..4b5c59b6 100644 --- a/source/ui-chat/src/__tests__/reducers/chat-reducer.test.ts +++ b/source/ui-chat/src/__tests__/reducers/chat-reducer.test.ts @@ -6,8 +6,9 @@ import { ChatState } from '../../hooks/use-chat-message'; import type { SourceDocument } from '../../models'; import { ChatAction, chatReducer } from '../../reducers/chat-reducer'; -import { AlertMessage, ChatBubbleMessage, Message } from '../../pages/chat/types'; +import { AlertMessage, ChatBubbleMessage, AgentBuilderChatBubbleMessage, Message } from '../../pages/chat/types'; import { CHAT_LOADING_DEFAULT_MESSAGE } from '../../utils/constants'; +import { UploadedFile } from '../../types/file-upload'; describe('chatReducer', () => { const initialState: ChatState = { @@ -15,9 +16,30 @@ describe('chatReducer', () => { currentResponse: '', isGenAiResponseLoading: false, sourceDocuments: [], - conversationId: '' + conversationId: '', + isStreaming: false, + streamingMessageId: undefined, + thinking: undefined, + toolUsage: [] }; + const mockFiles: UploadedFile[] = [ + { + key: 'file-key-1', + fileName: 'document.pdf', + fileContentType: 'application/pdf', + fileExtension: 'pdf', + fileSize: 1024000 + }, + { + key: 'file-key-2', + fileName: 'image.jpg', + fileContentType: 'image/jpeg', + fileExtension: 'jpg', + fileSize: 512000 + } + ]; + it('should handle UPDATE_AI_RESPONSE with messageId', () => { const state: ChatState = { ...initialState, @@ -30,7 +52,7 @@ describe('chatReducer', () => { }; const newState = chatReducer(state, action); - + const message = newState.messages[0] as ChatBubbleMessage; expect(message.content).toBe('Hello, how can I help?'); expect(message.authorId).toBe('assistant'); @@ -166,12 +188,18 @@ describe('chatReducer', () => { timestamp: expect.any(String) }; - const expectedLoadingMessage: ChatBubbleMessage = { + const expectedLoadingMessage: AgentBuilderChatBubbleMessage = { type: 'chat-bubble', authorId: 'assistant', - content: CHAT_LOADING_DEFAULT_MESSAGE, + content: '', timestamp: expect.any(String), - avatarLoading: true + avatarLoading: true, + thinking: { + duration: 0, + startTime: expect.any(String), + endTime: '', + strippedContent: undefined + } }; expect(newState.messages[0]).toEqual(expectedUserMessage); @@ -179,6 +207,76 @@ describe('chatReducer', () => { expect(newState.isGenAiResponseLoading).toBe(true); }); + it('should handle ADD_USER_MESSAGE with files', () => { + const action: ChatAction = { + type: 'ADD_USER_MESSAGE', + payload: { content: 'Hello with files', authorId: 'user-1', files: mockFiles } + }; + + const newState = chatReducer(initialState, action); + + const expectedUserMessage: ChatBubbleMessage = { + type: 'chat-bubble', + authorId: 'user-1', + content: 'Hello with files', + timestamp: expect.any(String), + files: mockFiles + }; + + const expectedLoadingMessage: AgentBuilderChatBubbleMessage = { + type: 'chat-bubble', + authorId: 'assistant', + content: '', + timestamp: expect.any(String), + avatarLoading: true, + thinking: { + duration: 0, + startTime: expect.any(String), + endTime: '', + strippedContent: undefined + } + }; + + expect(newState.messages[0]).toEqual(expectedUserMessage); + expect(newState.messages[1]).toEqual(expectedLoadingMessage); + expect(newState.isGenAiResponseLoading).toBe(true); + }); + + it('should handle ADD_USER_MESSAGE with empty files array', () => { + const action: ChatAction = { + type: 'ADD_USER_MESSAGE', + payload: { content: 'Hello', authorId: 'user-1', files: [] } + }; + + const newState = chatReducer(initialState, action); + + const expectedUserMessage: ChatBubbleMessage = { + type: 'chat-bubble', + authorId: 'user-1', + content: 'Hello', + timestamp: expect.any(String), + files: [] + }; + + expect(newState.messages[0]).toEqual(expectedUserMessage); + expect(newState.isGenAiResponseLoading).toBe(true); + }); + + it('should handle ADD_USER_MESSAGE with single file', () => { + const singleFile = [mockFiles[0]]; + const action: ChatAction = { + type: 'ADD_USER_MESSAGE', + payload: { content: 'Hello with one file', authorId: 'user-1', files: singleFile } + }; + + const newState = chatReducer(initialState, action); + + const userMessage = newState.messages[0] as ChatBubbleMessage; + expect(userMessage.files).toEqual(singleFile); + expect(userMessage.files).toHaveLength(1); + expect(userMessage.files![0].fileName).toBe('document.pdf'); + }); + it('should handle SET_MESSAGES', () => { const messages: Message[] = [ { @@ -199,6 +297,34 @@ describe('chatReducer', () => { expect(newState.messages).toEqual(messages); }); + it('should handle SET_MESSAGES with files', () => { + const messagesWithFiles: Message[] = [ + { + type: 'chat-bubble', + authorId: 'user-1', + content: 'Hello with files', + timestamp: new Date().toISOString(), + files: mockFiles + }, + { + type: 'chat-bubble', + authorId: 'assistant', + content: 'Response to files', + timestamp: new Date().toISOString() + } + ]; + + const action: ChatAction = { + type: 'SET_MESSAGES', + payload: messagesWithFiles + }; + + const newState = chatReducer(initialState, action); + + expect(newState.messages).toEqual(messagesWithFiles); + expect((newState.messages[0] as ChatBubbleMessage).files).toEqual(mockFiles); + }); + it('should handle RESET_CHAT', () => { const message: ChatBubbleMessage = { type: 'chat-bubble', @@ -212,7 +338,11 @@ describe('chatReducer', () => { currentResponse: 'Hello', isGenAiResponseLoading: true, sourceDocuments: [{ excerpt: 'Test content' }], - conversationId: 'conv-123' + conversationId: 'conv-123', + isStreaming: false, + streamingMessageId: undefined, + thinking: undefined, + toolUsage: [] }; const action: ChatAction = { @@ -370,16 +500,20 @@ describe('chatReducer', () => { expect(newState).toEqual(initialState); expect(newState.messages).toHaveLength(0); }); -}); + it('should update messageId when none exists in the AI message', () => { const initialChatState: ChatState = { messages: [], currentResponse: '', isGenAiResponseLoading: false, sourceDocuments: [], - conversationId: '' + conversationId: '', + isStreaming: false, + streamingMessageId: undefined, + thinking: undefined, + toolUsage: [] }; - + const message: ChatBubbleMessage = { type: 'chat-bubble', authorId: 'assistant', @@ -406,3 +540,85 @@ describe('chatReducer', () => { expect(updatedMessage.content).toBe('Hello, how can I help?'); expect(updatedMessage.messageId).toBe('new-message-id'); // Should add the messageId }); + + it('should handle SET_CONVERSATION_ID', () => { + const action: ChatAction = { + type: 'SET_CONVERSATION_ID', + payload: 'new-conversation-id' + }; + + const newState = chatReducer(initialState, action); + + expect(newState.conversationId).toBe('new-conversation-id'); + }); + + it('should preserve conversation ID when adding messages', () => { + const stateWithConversationId: ChatState = { + ...initialState, + conversationId: 'existing-conversation-id' + }; + + const action: ChatAction = { + type: 'ADD_USER_MESSAGE', + payload: { content: 'Hello', authorId: 'user-1' } + }; + + const newState = chatReducer(stateWithConversationId, action); + + expect(newState.conversationId).toBe('existing-conversation-id'); + }); + + // Edge cases and error handling + it('should handle unknown action types gracefully', () => { + const action = { + type: 'UNKNOWN_ACTION', + payload: 'test' + } as any; + + const newState = chatReducer(initialState, action); + + expect(newState).toEqual(initialState); + }); + + it('should handle COMPLETE_AI_RESPONSE with files in messages', () => { + const assistantMessage: ChatBubbleMessage = { + type: 'chat-bubble', + authorId: 'assistant', + content: 'Hello', + timestamp: new Date().toISOString(), + avatarLoading: true + }; + + const userMessage: ChatBubbleMessage = { + type: 'chat-bubble', + authorId: 'user', + content: 'Question with files', + timestamp: new Date().toISOString(), + files: mockFiles + }; + + const state: ChatState = { + ...initialState, + messages: [userMessage, assistantMessage], + isGenAiResponseLoading: true, + currentResponse: 'Hello' + }; + + const action: ChatAction = { + type: 'COMPLETE_AI_RESPONSE' + }; + + const newState = chatReducer(state, action); + + expect(newState.isGenAiResponseLoading).toBe(false); + expect(newState.currentResponse).toBe(''); + + // Verify files are preserved in user message + const preservedUserMessage = newState.messages[0] as ChatBubbleMessage; + expect(preservedUserMessage.files).toEqual(mockFiles); + + // Verify assistant message is updated + const updatedAssistantMessage = newState.messages[1] as ChatBubbleMessage; + expect(updatedAssistantMessage.avatarLoading).toBe(false); + }); +}); diff --git a/source/ui-chat/src/__tests__/utils/construct-api-payload.test.ts b/source/ui-chat/src/__tests__/utils/construct-api-payload.test.ts index 32f72c14..f5620f37 100644 --- a/source/ui-chat/src/__tests__/utils/construct-api-payload.test.ts +++ b/source/ui-chat/src/__tests__/utils/construct-api-payload.test.ts @@ -9,12 +9,30 @@ import { shouldIncludePromptTemplate } from '../../utils/construct-api-payload'; import { USE_CASE_TYPES, USE_CASE_TYPES_ROUTE } from '../../utils/constants'; -import { AgentUseCaseConfig, TextUseCaseConfig } from '../../models'; +import { AgentBuilderUseCaseConfig, AgentUseCaseConfig, TextUseCaseConfig, WorkflowUseCaseConfig } from '../../models'; +import { UploadedFile } from '../../types/file-upload'; describe('constructPayload', () => { const baseConversationId = 'test-conversation-123'; const baseMessage = 'Hello, how are you?'; + const mockFiles: UploadedFile[] = [ + { + key: 'usecase-11111111-1111-1111-1111-111111111111/user-11111111-1111-1111-1111-111111111111/conv-11111111-1111-1111-1111-111111111111/msg-11111111-1111-1111-1111-111111111111/11111111-1111-1111-1111-111111111111.pdf', + fileName: 'document.pdf', + fileContentType: 'application/pdf', + fileExtension: 'pdf', + fileSize: 1024000 + }, + { + key: 'usecase-11111111-1111-1111-1111-111111111111/user-11111111-1111-1111-1111-111111111111/conv-11111111-1111-1111-1111-111111111111/msg-11111111-1111-1111-1111-111111111111/22222222-2222-2222-2222-222222222222.jpg', + fileName: 'image.jpg', + fileContentType: 'image/jpeg', + fileExtension: 'jpg', + fileSize: 512000 + } + ]; + describe('AGENT use case', () => { const agentConfig = { UseCaseType: USE_CASE_TYPES.AGENT @@ -180,84 +198,350 @@ describe('constructPayload', () => { }); }); }); -}); -describe('Helper functions', () => { - describe('shouldIncludePromptTemplate', () => { - const baseConfig = { - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - PromptParams: { - UserPromptEditingEnabled: true, - PromptTemplate: 'default template' - }, - RAGEnabled: false - } - } as TextUseCaseConfig; + describe('AGENT_BUILDER use case', () => { + const baseAgentBuilderConfig = { + UseCaseType: USE_CASE_TYPES.AGENT_BUILDER + } as AgentBuilderUseCaseConfig; - it('should return true when all conditions are met', () => { - const result = shouldIncludePromptTemplate(baseConfig, 'custom template'); - expect(result).toBe(true); + it('should construct correct payload for AGENT_BUILDER use case', () => { + const result = constructPayload({ + useCaseConfig: baseAgentBuilderConfig, + message: baseMessage, + conversationId: baseConversationId + }); + + expect(result).toEqual({ + action: USE_CASE_TYPES_ROUTE.AGENT_BUILDER, + inputText: baseMessage, + conversationId: baseConversationId + }); }); - it('should return false when UserPromptEditingEnabled is false', () => { - const config = { - ...baseConfig, - LlmParams: { - ...baseConfig.LlmParams, - PromptParams: { - ...baseConfig.LlmParams.PromptParams, - UserPromptEditingEnabled: false - } + it('should include files in AGENT_BUILDER payload when provided', () => { + const useCaseId = 'test-use-case-123'; + const result = constructPayload({ + useCaseConfig: baseAgentBuilderConfig, + message: baseMessage, + conversationId: baseConversationId, + files: mockFiles, + useCaseId + }); + + const expectedFiles = [ + { fileReference: '11111111-1111-1111-1111-111111111111.pdf', fileName: 'document.pdf' }, + { fileReference: '22222222-2222-2222-2222-222222222222.jpg', fileName: 'image.jpg' } + ]; + + expect(result).toEqual({ + action: USE_CASE_TYPES_ROUTE.AGENT_BUILDER, + inputText: baseMessage, + conversationId: baseConversationId, + files: expectedFiles + }); + }); + + it('should throw error when useCaseId is not provided and files are present', () => { + expect(() => { + constructPayload({ + useCaseConfig: baseAgentBuilderConfig, + message: baseMessage, + conversationId: baseConversationId, + files: mockFiles + }); + }).toThrow(/useCaseId is required when files are present/); + }); + + it('should extract filename from file key structure', () => { + const realisticFiles: UploadedFile[] = [ + { + key: 'usecase-11111111-1111-1111-1111-111111111111/user-11111111-1111-1111-1111-111111111111/conv-11111111-1111-1111-1111-111111111111/msg-11111111-1111-1111-1111-111111111111/11111111-1111-1111-1111-111111111111.pdf', + fileName: 'document.pdf', + fileContentType: 'application/pdf', + fileExtension: 'pdf', + fileSize: 1024000 } - }; - const result = shouldIncludePromptTemplate(config, 'custom template'); - expect(result).toBe(false); + ]; + + const result = constructPayload({ + useCaseConfig: baseAgentBuilderConfig, + message: baseMessage, + conversationId: baseConversationId, + files: realisticFiles, + useCaseId: 'test-use-case-123' + }); + + const expectedFiles = [ + { fileReference: '11111111-1111-1111-1111-111111111111.pdf', fileName: 'document.pdf' } + ]; + + expect(result).toEqual({ + action: USE_CASE_TYPES_ROUTE.AGENT_BUILDER, + inputText: baseMessage, + conversationId: baseConversationId, + files: expectedFiles + }); }); - it('should return false when promptTemplate matches default', () => { - const result = shouldIncludePromptTemplate(baseConfig, 'default template'); - expect(result).toBe(false); + it('should not include files in AGENT_BUILDER payload when empty array', () => { + const result = constructPayload({ + useCaseConfig: baseAgentBuilderConfig, + message: baseMessage, + conversationId: baseConversationId, + files: [] + }); + + expect(result).toEqual({ + action: USE_CASE_TYPES_ROUTE.AGENT_BUILDER, + inputText: baseMessage, + conversationId: baseConversationId + }); + expect(result).not.toHaveProperty('files'); }); - it('should return false when promptTemplate is undefined', () => { - const result = shouldIncludePromptTemplate(baseConfig, undefined); - expect(result).toBe(false); + it('should include messageId in AGENT_BUILDER payload when provided', () => { + const messageId = 'message-uuid-123'; + const result = constructPayload({ + useCaseConfig: baseAgentBuilderConfig, + message: baseMessage, + conversationId: baseConversationId, + messageId + }); + + expect(result).toEqual({ + action: USE_CASE_TYPES_ROUTE.AGENT_BUILDER, + inputText: baseMessage, + conversationId: baseConversationId, + messageId + }); }); }); - describe('shouldIncludeAuthToken', () => { - const baseConfig = { - UseCaseType: USE_CASE_TYPES.TEXT, - LlmParams: { - PromptParams: { - UserPromptEditingEnabled: false, - PromptTemplate: 'default template' - }, - RAGEnabled: true - } - } as TextUseCaseConfig; + describe('WORKFLOW use case', () => { + const baseWorkflowConfig = { + UseCaseType: USE_CASE_TYPES.WORKFLOW + } as WorkflowUseCaseConfig; - it('should return true when RAGEnabled and authToken are present', () => { - const result = shouldIncludeAuthToken(baseConfig, 'test-token'); - expect(result).toBe(true); + it('should construct correct payload for WORKFLOW use case', () => { + const result = constructPayload({ + useCaseConfig: baseWorkflowConfig, + message: baseMessage, + conversationId: baseConversationId + }); + + expect(result).toEqual({ + action: USE_CASE_TYPES_ROUTE.WORKFLOW, + inputText: baseMessage, + conversationId: baseConversationId + }); }); - it('should return false when RAGEnabled is false', () => { - const config = { - ...baseConfig, + it('should include files in WORKFLOW payload when provided', () => { + const useCaseId = 'test-use-case-123'; + const result = constructPayload({ + useCaseConfig: baseWorkflowConfig, + message: baseMessage, + conversationId: baseConversationId, + files: mockFiles, + useCaseId + }); + + const expectedFiles = [ + { fileReference: '11111111-1111-1111-1111-111111111111.pdf', fileName: 'document.pdf' }, + { fileReference: '22222222-2222-2222-2222-222222222222.jpg', fileName: 'image.jpg' } + ]; + + expect(result).toEqual({ + action: USE_CASE_TYPES_ROUTE.WORKFLOW, + inputText: baseMessage, + conversationId: baseConversationId, + files: expectedFiles + }); + }); + + it('should throw error when useCaseId is not provided and files are present', () => { + expect(() => { + constructPayload({ + useCaseConfig: baseWorkflowConfig, + message: baseMessage, + conversationId: baseConversationId, + files: mockFiles + }); + }).toThrow(/useCaseId is required when files are present/); + }); + + it('should not include files in WORKFLOW payload when empty array', () => { + const result = constructPayload({ + useCaseConfig: baseWorkflowConfig, + message: baseMessage, + conversationId: baseConversationId, + files: [] + }); + + expect(result).toEqual({ + action: USE_CASE_TYPES_ROUTE.WORKFLOW, + inputText: baseMessage, + conversationId: baseConversationId + }); + expect(result).not.toHaveProperty('files'); + }); + + it('should include messageId in WORKFLOW payload when provided', () => { + const messageId = 'message-uuid-123'; + const result = constructPayload({ + useCaseConfig: baseWorkflowConfig, + message: baseMessage, + conversationId: baseConversationId, + messageId + }); + + expect(result).toEqual({ + action: USE_CASE_TYPES_ROUTE.WORKFLOW, + inputText: baseMessage, + conversationId: baseConversationId, + messageId + }); + }); + + describe('Error handling', () => { + it('should throw error for invalid use case type', () => { + const invalidConfig = { + UseCaseType: 'INVALID_TYPE' + } as any; + + expect(() => { + constructPayload({ + useCaseConfig: invalidConfig, + message: baseMessage, + conversationId: baseConversationId + }); + }).toThrow('Invalid use case type.'); + }); + }); + + describe('File handling edge cases', () => { + const workflowConfig = { + UseCaseType: USE_CASE_TYPES.WORKFLOW + } as WorkflowUseCaseConfig; + + it('should handle undefined files parameter', () => { + const result = constructPayload({ + useCaseConfig: workflowConfig, + message: baseMessage, + conversationId: baseConversationId, + files: undefined + }); + + expect(result).toEqual({ + action: USE_CASE_TYPES_ROUTE.WORKFLOW, + inputText: baseMessage, + conversationId: baseConversationId + }); + expect(result).not.toHaveProperty('files'); + }); + + it('should handle single file in array', () => { + const singleFile = [mockFiles[0]]; + const result = constructPayload({ + useCaseConfig: workflowConfig, + message: baseMessage, + conversationId: baseConversationId, + files: singleFile, + useCaseId: 'test-use-case-id' + }); + + expect(result).toEqual({ + action: USE_CASE_TYPES_ROUTE.WORKFLOW, + inputText: baseMessage, + conversationId: baseConversationId, + files: [ + { + fileReference: '11111111-1111-1111-1111-111111111111.pdf', + fileName: 'document.pdf' + } + ] + }); + }); + }); + }); + + describe('Helper functions', () => { + describe('shouldIncludePromptTemplate', () => { + const baseConfig = { + UseCaseType: USE_CASE_TYPES.TEXT, LlmParams: { - ...baseConfig.LlmParams, + PromptParams: { + UserPromptEditingEnabled: true, + PromptTemplate: 'default template' + }, RAGEnabled: false } - }; - const result = shouldIncludeAuthToken(config, 'test-token'); - expect(result).toBe(false); + } as TextUseCaseConfig; + + it('should return true when all conditions are met', () => { + const result = shouldIncludePromptTemplate(baseConfig, 'custom template'); + expect(result).toBe(true); + }); + + it('should return false when UserPromptEditingEnabled is false', () => { + const config = { + ...baseConfig, + LlmParams: { + ...baseConfig.LlmParams, + PromptParams: { + ...baseConfig.LlmParams.PromptParams, + UserPromptEditingEnabled: false + } + } + }; + const result = shouldIncludePromptTemplate(config, 'custom template'); + expect(result).toBe(false); + }); + + it('should return false when promptTemplate matches default', () => { + const result = shouldIncludePromptTemplate(baseConfig, 'default template'); + expect(result).toBe(false); + }); + + it('should return false when promptTemplate is undefined', () => { + const result = shouldIncludePromptTemplate(baseConfig, undefined); + expect(result).toBe(false); + }); }); - it('should return false when authToken is undefined', () => { - const result = shouldIncludeAuthToken(baseConfig, undefined); - expect(result).toBe(false); + describe('shouldIncludeAuthToken', () => { + const baseConfig = { + UseCaseType: USE_CASE_TYPES.TEXT, + LlmParams: { + PromptParams: { + UserPromptEditingEnabled: false, + PromptTemplate: 'default template' + }, + RAGEnabled: true + } + } as TextUseCaseConfig; + + it('should return true when RAGEnabled and authToken are present', () => { + const result = shouldIncludeAuthToken(baseConfig, 'test-token'); + expect(result).toBe(true); + }); + + it('should return false when RAGEnabled is false', () => { + const config = { + ...baseConfig, + LlmParams: { + ...baseConfig.LlmParams, + RAGEnabled: false + } + }; + const result = shouldIncludeAuthToken(config, 'test-token'); + expect(result).toBe(false); + }); + + it('should return false when authToken is undefined', () => { + const result = shouldIncludeAuthToken(baseConfig, undefined); + expect(result).toBe(false); + }); }); }); }); diff --git a/source/ui-chat/src/__tests__/utils/file-upload.test.ts b/source/ui-chat/src/__tests__/utils/file-upload.test.ts new file mode 100644 index 00000000..8e09d424 --- /dev/null +++ b/source/ui-chat/src/__tests__/utils/file-upload.test.ts @@ -0,0 +1,339 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { describe, expect, vi, beforeEach, test } from 'vitest'; +import { + validateFile, + validateFiles, + isFileSizeError, + getFileCounts, + isFileCountExceeded +} from '../../utils/file-upload'; +import { + MULTIMODAL_MAX_FILENAME_LENGTH, + MULTIMODAL_MAX_IMAGES, + MULTIMODAL_MAX_DOCUMENTS, + MULTIMODAL_MAX_IMAGE_SIZE, + MULTIMODAL_MAX_DOCUMENT_SIZE, + MULTIMODAL_SUPPORTED_IMAGE_FORMATS, + MULTIMODAL_SUPPORTED_DOCUMENT_FORMATS +} from '../../utils/constants'; + +describe('file-upload utilities', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + describe('validateFile - Valid Files', () => { + test('returns null for valid image files', () => { + const imageFiles = MULTIMODAL_SUPPORTED_IMAGE_FORMATS.map( + (format) => + new File(['test'], `test.${format}`, { type: `image/${format === 'jpg' ? 'jpeg' : format}` }) + ); + + imageFiles.forEach((file) => { + const result = validateFile(file); + expect(result).toBeNull(); + }); + }); + + test('returns null for valid document files', () => { + const mimeTypeMap: Record = { + 'pdf': 'application/pdf', + 'txt': 'text/plain', + 'doc': 'application/msword', + 'docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', + 'csv': 'text/csv', + 'html': 'text/html', + 'md': 'text/markdown', + 'xls': 'application/vnd.ms-excel', + 'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' + }; + + const documentFiles = MULTIMODAL_SUPPORTED_DOCUMENT_FORMATS.map( + (format) => + new File(['test'], `document.${format}`, { + type: mimeTypeMap[format] || 'application/octet-stream' + }) + ); + + documentFiles.forEach((file) => { + const result = validateFile(file); + expect(result).toBeNull(); + }); + }); + + test('allows valid file names with pattern', () => { + const validNames = [ + { name: 'simple document', fileName: 'document.pdf' }, + { name: 'hyphens and numbers', fileName: 'data-file-v2.csv' }, + { name: 'Spaces and numbers', fileName: 'report\x202024.xlsx' }, + { name: 'Spaces and hyphens', fileName: 'Hello\x20-\x20World.docx' }, + { name: 'underscores', fileName: 'my_file.png' }, + { name: 'file with mixed valid chars', fileName: 'Project_Report-v2_Final.docx' } + ]; + + validNames.forEach(({ name, fileName }) => { + const file = new File(['test'], fileName, { type: 'application/pdf' }); + const result = validateFile(file); + + expect(result).toBeNull(); + }); + }); + }); + + describe('validateFile - File Size Validation', () => { + test('returns error for files exceeding size limits', () => { + // Image exceeding maximum image size limit + const largeImageSize = MULTIMODAL_MAX_IMAGE_SIZE + 1024; // Slightly over limit + const largeImage = new File(['x'.repeat(largeImageSize)], 'large.jpg', { type: 'image/jpeg' }); + Object.defineProperty(largeImage, 'size', { value: largeImageSize }); + + const imageResult = validateFile(largeImage); + expect(imageResult).not.toBeNull(); + expect(isFileSizeError(imageResult!.error)).toBe(true); + expect(imageResult?.error.message).toContain('File size exceeds maximum limit'); + + // Document exceeding maximum document size limit + const largeDocSize = MULTIMODAL_MAX_DOCUMENT_SIZE + 1024; // Slightly over limit + const largeDoc = new File(['x'.repeat(largeDocSize)], 'large.pdf', { type: 'application/pdf' }); + Object.defineProperty(largeDoc, 'size', { value: largeDocSize }); + + const docResult = validateFile(largeDoc); + expect(docResult).not.toBeNull(); + expect(docResult?.error.message).toContain('File size exceeds maximum limit'); + }); + + test('returns error for empty files', () => { + const emptyFile = new File([''], 'empty.txt', { type: 'text/plain' }); + Object.defineProperty(emptyFile, 'size', { value: 0 }); + + const result = validateFile(emptyFile); + expect(result).not.toBeNull(); + expect(result?.error.message).toContain('File is empty'); + }); + }); + + describe('validateFile - Filename Pattern Validation', () => { + test('validates malicious file names for XSS prevention', () => { + const maliciousNames = [ + '../../../etc/passwd.txt', + '..\\..\\windows\\system32\\config.txt', + 'file\x00.jpg', + '.jpg', + 'file with spaces and special chars!@#$.jpg', + 'file\n\r\t.jpg', + 'file/path/traversal.jpg', + 'file\\path\\traversal.jpg' + ]; + + maliciousNames.forEach((fileName) => { + const file = new File(['test'], fileName, { type: 'text/plain' }); + const result = validateFile(file); + + expect(result).not.toBeNull(); + expect(result?.error.message).toBe('Invalid file name'); + }); + }); + + test('validates extremely long file names', () => { + const longFileName = 'a'.repeat(MULTIMODAL_MAX_FILENAME_LENGTH + 1) + '.jpg'; + const longFile = new File(['test'], longFileName, { type: 'image/jpeg' }); + const result = validateFile(longFile); + + expect(result).not.toBeNull(); + expect(result?.error.message).toBe('Invalid file name'); + }); + + test('rejects file names that do not match pattern', () => { + const invalidNames = [ + // Pattern structure violations + { name: 'dots in filename', fileName: 'file.name.jpg' }, + { name: 'ends with space before extension', fileName: 'file .jpg' }, + { name: 'multiple consecutive spaces', fileName: 'file name.jpg' }, + { name: 'starts with non-alphanumeric character', fileName: '-file.jpg' }, + { name: 'parentheses in filename', fileName: 'file(1).png' }, + { name: 'starting with parenthesis', fileName: '(file).png' }, + { name: 'ending with parenthesis', fileName: 'file(.jpg' }, + { name: 'parentheses with spaces', fileName: 'Report (Final).docx' }, + + // Invalid characters + { name: 'invalid characters brackets', fileName: 'file[name].jpg' }, + { name: 'invalid characters braces', fileName: 'file{name}.png' }, + { name: 'contains @ symbol', fileName: 'file@domain.jpg' }, + { name: 'contains # symbol', fileName: 'file#hash.jpg' }, + { name: 'contains % symbol', fileName: 'file%percent.jpg' }, + { name: 'contains & symbol', fileName: 'file&and.jpg' }, + + // Unicode whitespace + { name: 'non-breaking space (\\u00A0)', fileName: 'file\u00A0name.pdf' }, + { name: 'tab character (\\t)', fileName: 'file\tname.jpg' }, + { name: 'newline character (\\n)', fileName: 'file\nname.txt' }, + { name: 'zero-width space (\\u200B)', fileName: 'file\u200Bname.png' }, + { name: 'leading space', fileName: ' file.pdf' }, + + // Security violations + { name: 'XSS quote attack', fileName: 'file" onmouseover="alert(1)".pdf' }, + { name: 'single quote attack', fileName: "file' onclick='alert(1)'.jpg" }, + { name: 'script tag attack', fileName: 'file.png' }, + { name: 'angle bracket attack', fileName: 'file>script.txt' }, + { name: 'path traversal', fileName: '../file.png' }, + { name: 'command injection', fileName: 'file;rm.txt' } + ]; + + invalidNames.forEach(({ fileName }) => { + const file = new File(['test'], fileName, { type: 'image/jpeg' }); + const result = validateFile(file); + + expect(result).not.toBeNull(); + expect(result?.error.message).toBe('Invalid file name'); + }); + }); + }); + + describe('validateFile - Extension Validation', () => { + test('accepts supported image formats', () => { + const supportedImageFormats = ['png', 'jpeg', 'jpg', 'gif', 'webp']; + + supportedImageFormats.forEach((ext) => { + const file = new File(['test'], `image.${ext}`, { type: `image/${ext}` }); + const result = validateFile(file); + + expect(result).toBeNull(); + }); + }); + + test('accepts supported document formats', () => { + const supportedDocFormats = ['pdf', 'csv', 'doc', 'docx', 'xls', 'xlsx', 'html', 'txt', 'md']; + + supportedDocFormats.forEach((ext) => { + const file = new File(['test'], `document.${ext}`, { type: 'application/octet-stream' }); + const result = validateFile(file); + + expect(result).toBeNull(); + }); + }); + + test('rejects unsupported file extensions', () => { + const unsupportedFormats = ['exe', 'bat', 'sh', 'zip', 'rar', 'dmg', 'iso']; + + unsupportedFormats.forEach((ext) => { + const file = new File(['test'], `file.${ext}`, { type: 'application/octet-stream' }); + const result = validateFile(file); + + expect(result).not.toBeNull(); + expect(result?.error.message).toContain('Unsupported file type'); + }); + }); + + test('rejects uppercase extensions even for supported file types', () => { + const uppercaseFiles = [ + new File(['test'], 'image.JPG', { type: 'image/jpeg' }), + new File(['test'], 'document.PDF', { type: 'application/pdf' }), + new File(['test'], 'text.TXT', { type: 'text/plain' }) + ]; + + uppercaseFiles.forEach((file) => { + const result = validateFile(file); + expect(result).not.toBeNull(); + expect(result?.error.message).toContain('Unsupported file type'); + }); + }); + }); + + describe('validateFiles - Batch Validation', () => { + test('returns empty array for all valid files', () => { + const validFiles = [ + new File(['test1'], 'image1.jpg', { type: 'image/jpeg' }), + new File(['test2'], 'document.pdf', { type: 'application/pdf' }), + new File(['test3'], 'text.txt', { type: 'text/plain' }) + ]; + + const result = validateFiles(validFiles); + expect(result).toEqual([]); + }); + + test('returns errors for invalid files', () => { + const mixedFiles = [ + new File(['test1'], 'valid.jpg', { type: 'image/jpeg' }), + new File(['test2'], 'invalid.zip', { type: 'application/zip' }), + new File([''], 'empty.txt', { type: 'text/plain' }) + ]; + + Object.defineProperty(mixedFiles[2], 'size', { value: 0 }); + + const result = validateFiles(mixedFiles); + + expect(result).toHaveLength(2); + expect(result.some((r) => r.fileName === 'invalid.zip')).toBe(true); + expect(result.some((r) => r.fileName === 'empty.txt')).toBe(true); + }); + + test('handles empty file array', () => { + const result = validateFiles([]); + expect(result).toEqual([]); + }); + + test('handles duplicate file names', () => { + const duplicateFiles = [ + new File(['test1'], 'duplicate.jpg', { type: 'image/jpeg' }), + new File(['test2'], 'duplicate.jpg', { type: 'image/jpeg' }), + new File(['test3'], 'unique.jpg', { type: 'image/jpeg' }) + ]; + + const result = validateFiles(duplicateFiles); + + expect(result.some((r) => r.error.message.includes('Duplicate file name'))).toBe(true); + }); + }); + + describe('Global Count Validation', () => { + test('should count files correctly', () => { + const files = [ + new File(['content'], 'image1.jpg', { type: 'image/jpeg' }), + new File(['content'], 'image2.png', { type: 'image/png' }), + new File(['content'], 'doc1.pdf', { type: 'application/pdf' }), + new File(['content'], 'doc2.docx', { + type: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' + }) + ]; + + const { imageCount, documentCount } = getFileCounts(files); + expect(imageCount).toBe(2); + expect(documentCount).toBe(2); + }); + + test('should detect when image count is exceeded', () => { + const manyImages = Array.from( + { length: MULTIMODAL_MAX_IMAGES + 1 }, + (_, i) => new File(['test'], `image${i}.jpg`, { type: 'image/jpeg' }) + ); + + const result = isFileCountExceeded(manyImages); + expect(result.exceeded).toBe(true); + expect(result.message).toContain('images allowed'); + }); + + test('should detect when document count is exceeded', () => { + const manyDocuments = Array.from( + { length: MULTIMODAL_MAX_DOCUMENTS + 1 }, + (_, i) => new File(['test'], `doc${i}.pdf`, { type: 'application/pdf' }) + ); + + const result = isFileCountExceeded(manyDocuments); + expect(result.exceeded).toBe(true); + expect(result.message).toContain('documents allowed'); + }); + + test('should return no error when counts are within limits', () => { + const files = [ + new File(['content'], 'image.jpg', { type: 'image/jpeg' }), + new File(['content'], 'doc.pdf', { type: 'application/pdf' }) + ]; + + const result = isFileCountExceeded(files); + expect(result.exceeded).toBe(false); + expect(result.message).toBeUndefined(); + }); + }); +}); diff --git a/source/ui-chat/src/__tests__/utils/validation.test.ts b/source/ui-chat/src/__tests__/utils/validation.test.ts index e15c030a..ccecb453 100644 --- a/source/ui-chat/src/__tests__/utils/validation.test.ts +++ b/source/ui-chat/src/__tests__/utils/validation.test.ts @@ -262,4 +262,40 @@ describe('parseTraceId', () => { expect(result.rootId).toBe('1-2345'); expect(result.parentId).toBe('abcd'); }); + + it('should parse new AgentCore format with trace ID', () => { + const errorMessage = 'Chat service failed to respond. Please contact your administrator for support and quote the following trace id: 1-abc123-def456'; + const expected: TraceDetails = { + rootId: '1-abc123-def456', + parentId: '', + sampled: false, + lineage: '', + message: 'Chat service failed to respond.' + }; + expect(parseTraceId(errorMessage)).toEqual(expected); + }); + + it('should parse new AgentCore format with different error message', () => { + const errorMessage = 'AgentCore invocation failed. Please contact your administrator for support and quote the following trace id: trace-xyz789'; + const expected: TraceDetails = { + rootId: 'trace-xyz789', + parentId: '', + sampled: false, + lineage: '', + message: 'AgentCore invocation failed.' + }; + expect(parseTraceId(errorMessage)).toEqual(expected); + }); + + it('should handle case insensitive trace ID matching', () => { + const errorMessage = 'Error occurred. Please contact your administrator for support and Quote The Following Trace ID: test-123'; + const expected: TraceDetails = { + rootId: 'test-123', + parentId: '', + sampled: false, + lineage: '', + message: 'Error occurred.' + }; + expect(parseTraceId(errorMessage)).toEqual(expected); + }); }); diff --git a/source/ui-chat/src/components/common/common-components.tsx b/source/ui-chat/src/components/common/common-components.tsx index 18307f84..5c5c57de 100644 --- a/source/ui-chat/src/components/common/common-components.tsx +++ b/source/ui-chat/src/components/common/common-components.tsx @@ -5,6 +5,7 @@ import { forwardRef } from 'react'; import { Avatar } from '@cloudscape-design/chat-components'; import { ButtonGroup, StatusIndicator } from '@cloudscape-design/components'; import { AuthorAvatarProps, AUTHORS } from '../../pages/chat/config'; +export { ExternalLinkWarningModal } from './external-link-warning-modal'; /** * Base props interface for container components diff --git a/source/ui-chat/src/components/common/external-link-warning-modal.tsx b/source/ui-chat/src/components/common/external-link-warning-modal.tsx new file mode 100644 index 00000000..81bf49fb --- /dev/null +++ b/source/ui-chat/src/components/common/external-link-warning-modal.tsx @@ -0,0 +1,54 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import React from 'react'; +import { Box, Modal, Button, SpaceBetween, Alert } from '@cloudscape-design/components'; + +interface ExternalLinkWarningModalProps { + visible: boolean; + onDiscard: () => void; + externalLink: string; + resourceType?: string; +} + +export function ExternalLinkWarningModal({ + visible, + onDiscard, + externalLink, + resourceType = 'external link' +}: ExternalLinkWarningModalProps) { + return ( + visible && ( + + + + + + + } + data-testid="external-link-warning-modal" + > + + Are you sure that you want to leave the current page? You will be redirected to an external website. + + + ) + ); +} \ No newline at end of file diff --git a/source/ui-chat/src/components/markdown/MarkdownContent.tsx b/source/ui-chat/src/components/markdown/MarkdownContent.tsx index 8139fd64..0e6ea032 100644 --- a/source/ui-chat/src/components/markdown/MarkdownContent.tsx +++ b/source/ui-chat/src/components/markdown/MarkdownContent.tsx @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -import React, { memo } from 'react'; +import React, { memo, useState } from 'react'; import ReactMarkdown, { Components } from 'react-markdown'; import remarkGfm from 'remark-gfm'; import remarkMath from 'remark-math'; @@ -13,6 +13,7 @@ import typescriptHighlight from '@cloudscape-design/code-view/highlight/typescri import javascriptHighlight from '@cloudscape-design/code-view/highlight/javascript'; import pythonHighlight from '@cloudscape-design/code-view/highlight/python'; import javaHighlight from '@cloudscape-design/code-view/highlight/java'; +import { ExternalLinkWarningModal } from '../common/common-components'; import './MarkdownContent.scss'; /** @@ -106,17 +107,76 @@ interface MarkdownContentProps { content: string; } +const stripThinkingTags = (text: string): string => { + // Remove thinking tags and everything between them (case-insensitive, global, multiline) + return text.replace(/[\s\S]*?<\/thinking>/gi, '').trim(); +}; + /** * Component that renders Markdown content with custom styling and components * Supports GitHub Flavored Markdown and math expressions * Memoized to prevent unnecessary re-renders */ const MarkdownContent = memo(({ content }: MarkdownContentProps) => { + const [showExternalLinkModal, setShowExternalLinkModal] = useState(false); + const [pendingExternalLink, setPendingExternalLink] = useState(''); + + const handleExternalLinkClick = (href: string) => { + setPendingExternalLink(href); + setShowExternalLinkModal(true); + }; + + const handleModalDiscard = () => { + setShowExternalLinkModal(false); + setPendingExternalLink(''); + }; + + // Strip thinking tags from content before rendering + const cleanedContent = stripThinkingTags(content); + + // Create components with access to modal state + const markdownComponents: Components = { + ...MARKDOWN_COMPONENTS, + /** + * Renders links with external link warning modal + */ + a({ href, children, ...props }) { + const isExternal = href && (href.startsWith('http://') || href.startsWith('https://')); + + if (isExternal) { + return ( + { + e.preventDefault(); + handleExternalLinkClick(href); + }} + {...props} + > + {children} + + ); + } + + return ( + + {children} + + ); + } + }; + return (
- - {content} + + {cleanedContent} +
); }); diff --git a/source/ui-chat/src/components/multimodal/FileDisplay.tsx b/source/ui-chat/src/components/multimodal/FileDisplay.tsx new file mode 100644 index 00000000..308fa3d2 --- /dev/null +++ b/source/ui-chat/src/components/multimodal/FileDisplay.tsx @@ -0,0 +1,121 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import ExpandableSection from '@cloudscape-design/components/expandable-section'; +import SpaceBetween from '@cloudscape-design/components/space-between'; +import { Icon } from '@cloudscape-design/components'; +import { UploadedFile } from '../../types/file-upload'; +import { formatFileNameForDisplay } from '../../utils/file-upload'; +import { useLazyGetFileDownloadUrlQuery } from '../../store/solutionApi'; +import { useSelector } from 'react-redux'; +import { RootState } from '../../store/store'; +import { useState } from 'react'; + +interface FileDisplayProps { + readonly files: UploadedFile[]; + readonly hasError?: boolean; +} + +interface FileTagProps { + file: UploadedFile; + hasError?: boolean; + showDownload?: boolean; +} + +const FileTag = ({ file, hasError, showDownload = false }: FileTagProps) => { + const displayName = formatFileNameForDisplay(file.fileName); + const isNameTruncated = displayName !== file.fileName; + const [getDownloadUrl] = useLazyGetFileDownloadUrlQuery(); + const useCaseId = useSelector((state: RootState) => state.config.runtimeConfig?.UseCaseId); + const [isHovered, setIsHovered] = useState(false); + + const handleDownload = async () => { + if (!useCaseId || !file.conversationId || !file.messageId) { + console.error('Missing required parameters for download:', { + useCaseId, + conversationId: file.conversationId, + messageId: file.messageId + }); + return; + } + + try { + const result = await getDownloadUrl({ + useCaseId, + conversationId: file.conversationId, + messageId: file.messageId, + fileName: file.fileName + }).unwrap(); + + window.open(result.downloadUrl, '_blank'); + } catch (error) { + console.error('Failed to get download URL:', error); + } + }; + + const tagStyle: React.CSSProperties = { + display: 'inline-flex', + alignItems: 'center', + gap: '4px', + padding: '2px 8px', + backgroundColor: '#f5f5f5', + borderRadius: '4px', + fontSize: '12px', + lineHeight: '16px', + color: '#333', + cursor: showDownload ? 'pointer' : 'default', + transition: 'all 0.2s ease', + maxWidth: '200px' + }; + + const tagHoverStyle: React.CSSProperties = { + ...tagStyle, + backgroundColor: showDownload && isHovered ? '#e8f4fd' : tagStyle.backgroundColor + }; + + if (hasError) { + return ( +
+ + + {displayName} + +
+ ); + } + + return ( +
setIsHovered(true)} + onMouseLeave={() => setIsHovered(false)} + onClick={showDownload ? handleDownload : undefined} + title={isNameTruncated ? file.fileName : undefined} + > + + + {displayName} + + {showDownload && isHovered && } +
+ ); +}; + +export const FileDisplay = ({ files, hasError = false }: FileDisplayProps) => { + if (!files?.length) return null; + + return ( + + + {files.map((file) => ( + + ))} + + + ); +}; diff --git a/source/ui-chat/src/components/multimodal/FileTokenGroup.tsx b/source/ui-chat/src/components/multimodal/FileTokenGroup.tsx new file mode 100644 index 00000000..eb2bf91d --- /dev/null +++ b/source/ui-chat/src/components/multimodal/FileTokenGroup.tsx @@ -0,0 +1,126 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { useMemo } from 'react'; +import FileTokenGroup from '@cloudscape-design/components/file-token-group'; +import { FileTokenGroupProps } from '@cloudscape-design/components/file-token-group'; +import { Box, SpaceBetween, Icon } from '@cloudscape-design/components'; +import { UploadedFile } from '../../types/file-upload'; + +type FileWithLoading = File & { loading?: boolean }; +type UploadedFileWithLoading = UploadedFile & { loading?: boolean }; +type SupportedFile = File | UploadedFile | FileWithLoading | UploadedFileWithLoading; + +interface FileTokenListProps { + readonly files: SupportedFile[]; + readonly onDismiss?: (fileIndex: number) => void; + readonly readOnly?: boolean; + readonly alignment?: FileTokenGroupProps.Alignment; + readonly showFileSize?: boolean; + readonly uploadErrors?: Record; + readonly deleteErrors?: Record; + readonly limit?: number; + readonly label?: string; + readonly showLabel?: boolean; +} + +const convertToFileObject = (file: SupportedFile): File => { + if (file instanceof File) { + return file; + } + + const uploadedFile = file as UploadedFile; + const fileObj = new File([''], uploadedFile.fileName, { + type: uploadedFile.fileContentType, + lastModified: Date.now() + }); + + if (uploadedFile.fileSize !== undefined) { + Object.defineProperty(fileObj, 'size', { + value: uploadedFile.fileSize, + writable: false, + enumerable: true, + configurable: false + }); + } + + return fileObj; +}; + +export const FileTokenList = ({ + files, + onDismiss, + readOnly = false, + alignment = 'horizontal', + showFileSize = true, + uploadErrors = {}, + deleteErrors = {}, + limit = 5, + label = 'Attached files', + showLabel = false +}: FileTokenListProps) => { + if (!files?.length) { + return null; + } + + const items = useMemo(() => { + return files.map((file) => { + const fileName = file instanceof File ? file.name : (file as UploadedFile).fileName; + const fileObj = convertToFileObject(file); + const hasUploadError = uploadErrors[fileName]; + const hasDeleteError = deleteErrors[fileName]; + + const errorText = hasDeleteError?.message || hasUploadError?.message; + + return { + file: fileObj, + ...(errorText && { errorText }), + ...(!readOnly && (file as FileWithLoading).loading === true && { loading: true }) + }; + }); + }, [files, uploadErrors, deleteErrors, readOnly]); + + const handleDismiss = ({ detail }: { detail: { fileIndex: number } }) => { + if (onDismiss) { + onDismiss(detail.fileIndex); + } + }; + + const fileTokenGroup = ( + `Remove file ${fileIndex + 1}`, + limitShowFewer: 'Show fewer files', + limitShowMore: 'Show more files', + errorIconAriaLabel: 'Error', + warningIconAriaLabel: 'Warning' + }} + /> + ); + + if (showLabel) { + const labelContent = ( + + + + {label} + + + ); + + return ( + + {labelContent} + {fileTokenGroup} + + ); + } + + return fileTokenGroup; +}; diff --git a/source/ui-chat/src/components/multimodal/__tests__/FileDisplay.test.tsx b/source/ui-chat/src/components/multimodal/__tests__/FileDisplay.test.tsx new file mode 100644 index 00000000..ad67c568 --- /dev/null +++ b/source/ui-chat/src/components/multimodal/__tests__/FileDisplay.test.tsx @@ -0,0 +1,256 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { describe, expect, test, vi, beforeEach, afterEach } from 'vitest'; +import { render, screen, fireEvent, waitFor } from '@testing-library/react'; +import { Provider } from 'react-redux'; +import { configureStore } from '@reduxjs/toolkit'; +import { FileDisplay } from '../FileDisplay'; +import { UploadedFile } from '../../../types/file-upload'; +import { solutionApi } from '../../../store/solutionApi'; + +const mockGetFileDownloadUrl = vi.fn(); +vi.mock('../../../store/solutionApi', () => ({ + solutionApi: { + reducer: (state = {}) => state, + reducerPath: 'solution-api', + middleware: vi.fn(() => (next: any) => (action: any) => next(action)) + }, + useLazyGetFileDownloadUrlQuery: () => [mockGetFileDownloadUrl, { isLoading: false }] +})); + +const mockUploadedFile: UploadedFile = { + key: 'test-key-1', + fileName: 'test-document.pdf', + fileContentType: 'application/pdf', + fileExtension: 'pdf', + fileSize: 1024, + messageId: 'test-message-id', + conversationId: 'test-conversation-id' +}; + +const createMockStore = (useCaseId?: string) => { + return configureStore({ + reducer: { + config: (state = { runtimeConfig: { UseCaseId: useCaseId } }) => state, + [solutionApi.reducerPath]: solutionApi.reducer + }, + middleware: (getDefaultMiddleware) => getDefaultMiddleware().concat(solutionApi.middleware) + }); +}; + +const renderWithProvider = (component: React.ReactElement, useCaseId?: string) => { + const store = createMockStore(useCaseId); + return render({component}); +}; + +describe('FileDisplay', () => { + beforeEach(() => { + vi.clearAllMocks(); + global.document.createElement = vi.fn((tagName) => { + if (tagName === 'a') { + return { + href: '', + download: '', + target: '', + click: vi.fn(), + style: {} + } as any; + } + return {} as any; + }); + global.document.body.appendChild = vi.fn(); + global.document.body.removeChild = vi.fn(); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('Basic Rendering', () => { + test('renders nothing when no files provided', () => { + renderWithProvider(); + expect(screen.queryByTestId('file-display')).not.toBeInTheDocument(); + }); + + test('renders expandable section with correct header when files provided', () => { + renderWithProvider(); + + expect(screen.getByTestId('file-display')).toBeInTheDocument(); + expect(screen.getByText('Attached Files')).toBeInTheDocument(); + }); + + test('renders multiple files correctly', () => { + const secondFile: UploadedFile = { ...mockUploadedFile, key: 'test-key-2', fileName: 'second-file.pdf' }; + const multipleFiles = [mockUploadedFile, secondFile]; + renderWithProvider(); + + expect(screen.getByText('test-document.pdf')).toBeInTheDocument(); + expect(screen.getByText('second-file.pdf')).toBeInTheDocument(); + }); + }); + + describe('Error States', () => { + test('shows error status indicator when hasError is true', () => { + renderWithProvider(); + + const errorIndicator = screen.getByRole('img', { name: 'File processing failed' }); + expect(errorIndicator).toBeInTheDocument(); + expect(screen.getByText('test-document.pdf')).toBeInTheDocument(); + }); + + test('does not show download functionality when file has error', () => { + renderWithProvider(, 'test-use-case-id'); + + const fileTag = screen.getByText('test-document.pdf').closest('div'); + expect(fileTag).toHaveStyle('cursor: default'); + + // Hover should not show download icon for error state + fireEvent.mouseEnter(fileTag!); + expect(screen.queryByRole('img', { name: 'download' })).not.toBeInTheDocument(); + }); + }); + + describe('Normal File Display', () => { + test('shows normal file display when hasError is false', () => { + renderWithProvider(); + + expect(screen.queryByRole('img', { name: 'File processing failed' })).not.toBeInTheDocument(); + expect(screen.getByRole('img', { name: 'file' })).toBeInTheDocument(); + expect(screen.getByText('test-document.pdf')).toBeInTheDocument(); + }); + + test('shows download icon on hover', async () => { + renderWithProvider(, 'test-use-case-id'); + + const fileTag = screen.getByText('test-document.pdf').closest('div'); + expect(fileTag).toBeInTheDocument(); + expect(screen.queryByRole('img', { name: 'download' })).not.toBeInTheDocument(); + fireEvent.mouseEnter(fileTag!); + await waitFor(() => { + expect(screen.getByRole('img', { name: 'download' })).toBeInTheDocument(); + }); + fireEvent.mouseLeave(fileTag!); + + await waitFor(() => { + expect(screen.queryByRole('img', { name: 'download' })).not.toBeInTheDocument(); + }); + }); + + test('shows multiple file tags correctly', () => { + const secondFile: UploadedFile = { ...mockUploadedFile, key: 'test-key-2', fileName: 'second-file.pdf' }; + const multipleFiles = [mockUploadedFile, secondFile]; + renderWithProvider(, 'test-use-case-id'); + + expect(screen.getByText('test-document.pdf')).toBeInTheDocument(); + expect(screen.getByText('second-file.pdf')).toBeInTheDocument(); + + const firstTag = screen.getByText('test-document.pdf').closest('div'); + const secondTag = screen.getByText('second-file.pdf').closest('div'); + + expect(firstTag).toHaveStyle('cursor: pointer'); + expect(secondTag).toHaveStyle('cursor: pointer'); + }); + }); + + describe('File Name Display', () => { + test('shows file title tooltip for truncated file names', () => { + const longNameFile: UploadedFile = { + ...mockUploadedFile, + fileName: 'very-long-file-name-that-exceeds-display-limit.pdf' + }; + renderWithProvider(); + + const fileElement = screen.getByText(/very-long-file-name/).closest('div'); + expect(fileElement).toHaveAttribute('title', longNameFile.fileName); + }); + + test('does not show tooltip for non-truncated file names', () => { + renderWithProvider(); + + const fileElement = screen.getByText('test-document.pdf').closest('div'); + expect(fileElement).not.toHaveAttribute('title'); + }); + }); + + describe('Download Functionality', () => { + test('file tag is not clickable when useCaseId is not available', () => { + renderWithProvider(); // No useCaseId + + const fileTag = screen.getByText('test-document.pdf').closest('div'); + expect(fileTag).toHaveStyle('cursor: default'); + }); + + test('calls download API when file tag is clicked', async () => { + const mockDownloadResponse = { downloadUrl: 'https://example.com/download/test-file' }; + mockGetFileDownloadUrl.mockReturnValue({ + unwrap: vi.fn().mockResolvedValue(mockDownloadResponse) + }); + + renderWithProvider(, 'test-use-case-id'); + + const fileTag = screen.getByText('test-document.pdf').closest('div'); + fireEvent.click(fileTag!); + + await waitFor(() => { + expect(mockGetFileDownloadUrl).toHaveBeenCalledWith({ + useCaseId: 'test-use-case-id', + conversationId: 'test-conversation-id', + messageId: 'test-message-id', + fileName: 'test-document.pdf' + }); + }); + }); + + test('creates and clicks download link when API call succeeds', async () => { + const mockDownloadResponse = { downloadUrl: 'https://example.com/download/test-file' }; + const mockLink = { + href: '', + download: '', + target: '', + click: vi.fn(), + style: {} + }; + + mockGetFileDownloadUrl.mockReturnValue({ + unwrap: vi.fn().mockResolvedValue(mockDownloadResponse) + }); + + global.document.createElement = vi.fn().mockReturnValue(mockLink); + + renderWithProvider(, 'test-use-case-id'); + + const fileTag = screen.getByText('test-document.pdf').closest('div'); + fireEvent.click(fileTag!); + + await waitFor(() => { + expect(mockLink.href).toBe('https://example.com/download/test-file'); + expect(mockLink.download).toBe('test-document.pdf'); + expect(mockLink.target).toBe('_blank'); + expect(mockLink.click).toHaveBeenCalled(); + expect(global.document.body.appendChild).toHaveBeenCalledWith(mockLink); + expect(global.document.body.removeChild).toHaveBeenCalledWith(mockLink); + }); + }); + + test('logs error when download API call fails', async () => { + const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + const mockError = new Error('Download failed'); + + mockGetFileDownloadUrl.mockReturnValue({ + unwrap: vi.fn().mockRejectedValue(mockError) + }); + + renderWithProvider(, 'test-use-case-id'); + + const fileTag = screen.getByText('test-document.pdf').closest('div'); + fireEvent.click(fileTag!); + + await waitFor(() => { + expect(consoleErrorSpy).toHaveBeenCalledWith('Failed to get download URL:', mockError); + }); + + consoleErrorSpy.mockRestore(); + }); + }); +}); diff --git a/source/ui-chat/src/components/thinking/ExpandableContent.scss b/source/ui-chat/src/components/thinking/ExpandableContent.scss new file mode 100644 index 00000000..066a6498 --- /dev/null +++ b/source/ui-chat/src/components/thinking/ExpandableContent.scss @@ -0,0 +1,29 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +.expandable-content { + display: inline; + + &__text { + word-break: break-word; + } + + &__toggle { + background: none; + border: none; + color: #0073bb; + cursor: pointer; + padding: 0 0 0 4px; + font-size: inherit; + text-decoration: underline; + + &:hover { + color: #005a9e; + } + + &:focus { + outline: 2px solid #0073bb; + outline-offset: 2px; + } + } +} \ No newline at end of file diff --git a/source/ui-chat/src/components/thinking/ExpandableContent.tsx b/source/ui-chat/src/components/thinking/ExpandableContent.tsx new file mode 100644 index 00000000..e9fbfe1b --- /dev/null +++ b/source/ui-chat/src/components/thinking/ExpandableContent.tsx @@ -0,0 +1,39 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import React, { useState } from 'react'; +import './ExpandableContent.scss'; + +interface ExpandableContentProps { + content: string; + maxLength?: number; + 'data-testid'?: string; +} + +export const ExpandableContent: React.FC = ({ + content, + maxLength = 200, + 'data-testid': dataTestId +}) => { + const [isExpanded, setIsExpanded] = useState(false); + const needsTruncation = content.length > maxLength; + const displayContent = needsTruncation && !isExpanded + ? content.substring(0, maxLength) + '...' + : content; + + return ( +
+ {displayContent} + {needsTruncation && ( + + )} +
+ ); +}; diff --git a/source/ui-chat/src/components/thinking/ThinkingIndicator.scss b/source/ui-chat/src/components/thinking/ThinkingIndicator.scss new file mode 100644 index 00000000..252bce22 --- /dev/null +++ b/source/ui-chat/src/components/thinking/ThinkingIndicator.scss @@ -0,0 +1,118 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +.thinking-indicator { + opacity: 1; + transform: translateY(0); + margin-bottom: 8px; + + &--completed { + opacity: 0.6; + } + + &__content { + display: inline-flex; + align-items: center; + gap: 6px; + } + + &__toggle { + display: inline-flex; + align-items: center; + gap: 4px; + padding: 2px 6px; + background: transparent; + border: none; + border-radius: 4px; + cursor: pointer; + transition: background 0.15s ease; + font-size: 13px; + color: var(--color-text-body-secondary, #5f6b7a); + + &:hover { + background: var(--color-background-container-header, #f9fafb); + } + + &:focus { + outline: 2px solid var(--color-border-item-focused, #0972d3); + outline-offset: 1px; + } + + &:active { + background: var(--color-background-control-default, #e9ebed); + } + } + + &__text { + display: inline-flex; + align-items: center; + gap: 6px; + font-weight: 400; + } + + &__duration { + font-size: 12px; + color: var(--color-text-status-info, #0972d3); + font-weight: 500; + } + + &__details { + display: flex; + flex-direction: column; + gap: 4px; + margin-top: 4px; + margin-left: 24px; + padding: 8px 12px; + background: var(--color-background-container-content, #ffffff); + border: 1px solid var(--color-border-divider-default, #e9ebed); + border-radius: 4px; + font-size: 12px; + animation: slideDown 0.15s ease-out; + } + + &__list { + list-style: disc; + margin: 0; + padding-left: 20px; + color: var(--color-text-body-default, #000716); + } + + &__list-item { + margin-bottom: 4px; + line-height: 1.5; + + &:last-child { + margin-bottom: 0; + } + } +} + +@keyframes slideDown { + from { + opacity: 0; + transform: translateY(-2px); + } + + to { + opacity: 1; + transform: translateY(0); + } +} + +// Responsive design for mobile devices +@media (max-width: 768px) { + .thinking-indicator { + &__details { + margin-left: 0; + font-size: 11px; + } + + &__toggle { + font-size: 12px; + } + + &__duration { + font-size: 11px; + } + } +} \ No newline at end of file diff --git a/source/ui-chat/src/components/thinking/ThinkingIndicator.tsx b/source/ui-chat/src/components/thinking/ThinkingIndicator.tsx new file mode 100644 index 00000000..902d3a56 --- /dev/null +++ b/source/ui-chat/src/components/thinking/ThinkingIndicator.tsx @@ -0,0 +1,114 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { useState, useEffect } from 'react'; +import { Icon } from '@cloudscape-design/components'; +import { ThinkingMetadata } from '../../pages/chat/types'; +import { ExpandableContent } from './ExpandableContent'; +import './ThinkingIndicator.scss'; + +export interface ThinkingIndicatorProps { + thinking: ThinkingMetadata; + 'data-testid'?: string; +} + +const formatDuration = (seconds: number): string => { + if (isNaN(seconds) || seconds < 0) { + return '0s'; + } + if (seconds < 60) { + return `${seconds}s`; + } + const minutes = Math.floor(seconds / 60); + const remainingSeconds = seconds % 60; + return `${minutes}m ${remainingSeconds}s`; +}; + +/** + * ThinkingIndicator component displays thinking status for a message + * + * @param thinking - Thinking metadata from the message + * @param data-testid - Optional test id for the component + */ +export function ThinkingIndicator({ thinking, 'data-testid': dataTestId = 'thinking-indicator' }: ThinkingIndicatorProps) { + const [isExpanded, setIsExpanded] = useState(false); + + const isCompleted = thinking.duration > 0; + + const calculateElapsed = () => { + if (!thinking.startTime) return 0; + const start = new Date(thinking.startTime).getTime(); + const now = Date.now(); + return Math.floor((now - start) / 1000); + }; + + const [liveElapsed, setLiveElapsed] = useState(() => calculateElapsed()); + + useEffect(() => { + if (!isCompleted && thinking.startTime) { + const updateElapsed = () => { + const start = new Date(thinking.startTime).getTime(); + const now = Date.now(); + const elapsed = Math.floor((now - start) / 1000); + setLiveElapsed(elapsed); + }; + + updateElapsed(); + + const interval = setInterval(updateElapsed, 1000); + + return () => clearInterval(interval); + } + }, [isCompleted, thinking.startTime]); + + const hasContent = thinking.strippedContent && thinking.strippedContent.length > 0; + const displayDuration = isCompleted ? thinking.duration : liveElapsed; + const formattedDuration = formatDuration(displayDuration); + const displayText = isCompleted ? 'Thought for' : 'Thinking for'; + + return ( +
+
+ +
+ {isExpanded && hasContent && thinking.strippedContent && ( +
+
    + {thinking.strippedContent.split('\n').filter(line => line.trim()).map((line, index) => ( +
  • + +
  • + ))} +
+
+ )} +
+ ); +} diff --git a/source/ui-chat/src/components/thinking/index.ts b/source/ui-chat/src/components/thinking/index.ts new file mode 100644 index 00000000..7ac65818 --- /dev/null +++ b/source/ui-chat/src/components/thinking/index.ts @@ -0,0 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export { ThinkingIndicator } from './ThinkingIndicator'; +export type { ThinkingIndicatorProps } from './ThinkingIndicator'; diff --git a/source/ui-chat/src/components/tool-usage/ToolUsageList.scss b/source/ui-chat/src/components/tool-usage/ToolUsageList.scss new file mode 100644 index 00000000..48b07a7e --- /dev/null +++ b/source/ui-chat/src/components/tool-usage/ToolUsageList.scss @@ -0,0 +1,9 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +.tool-usage-list { + display: flex; + flex-direction: column; + gap: 4px; + margin: 0; +} \ No newline at end of file diff --git a/source/ui-chat/src/components/tool-usage/ToolUsageList.tsx b/source/ui-chat/src/components/tool-usage/ToolUsageList.tsx new file mode 100644 index 00000000..0d1e9750 --- /dev/null +++ b/source/ui-chat/src/components/tool-usage/ToolUsageList.tsx @@ -0,0 +1,43 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { ToolUsageInfo } from '../../models/api/response'; +import { ToolUsageIndicator } from '../tools/ToolUsageIndicator'; +import './ToolUsageList.scss'; + +export interface ToolUsageListProps { + toolUsage: ToolUsageInfo[]; + 'data-testid'?: string; +} + +/** + * ToolUsageList component displays a list of tool usage indicators + * + * @param toolUsage - Array of tool usage information + * @param data-testid - Optional test id for the component + */ +export function ToolUsageList({ + toolUsage, + 'data-testid': dataTestId = 'tool-usage-list' +}: ToolUsageListProps) { + if (!toolUsage || toolUsage.length === 0) { + return null; + } + + return ( +
+ {toolUsage.map((tool, index) => ( + + ))} +
+ ); +} diff --git a/source/ui-chat/src/components/tool-usage/index.ts b/source/ui-chat/src/components/tool-usage/index.ts new file mode 100644 index 00000000..dcdc7918 --- /dev/null +++ b/source/ui-chat/src/components/tool-usage/index.ts @@ -0,0 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export { ToolUsageList } from './ToolUsageList'; +export type { ToolUsageListProps } from './ToolUsageList'; diff --git a/source/ui-chat/src/components/tools/HelpPanelContent.tsx b/source/ui-chat/src/components/tools/HelpPanelContent.tsx index a0157a2a..72544e87 100644 --- a/source/ui-chat/src/components/tools/HelpPanelContent.tsx +++ b/source/ui-chat/src/components/tools/HelpPanelContent.tsx @@ -2,6 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 import HelpPanel from '@cloudscape-design/components/help-panel'; +import { + MULTIMODAL_MAX_IMAGES, + MULTIMODAL_MAX_DOCUMENTS, + MULTIMODAL_SUPPORTED_IMAGE_FORMATS, + MULTIMODAL_SUPPORTED_DOCUMENT_FORMATS +} from '../../utils/constants'; export const NoHelp = () => ( Help panel}> @@ -9,6 +15,37 @@ export const NoHelp = () => ( ); +export const FileUploadHelp = () => ( + File Upload}> +

+ Filenames must start with an alphanumeric character, up to 255 characters long and can contain: +

    +
  • Spaces
  • +
  • Hyphens ( - )
  • +
  • Underscores ( _ )
  • +
+

+

+ Not allowed: +

    +
  • Unicode or any other special characters
  • +
  • + Uppercase extensions (e.g. png is allowed but not PNG) +
  • +
+

+

+ Supported file extensions: +

+

+ {[...MULTIMODAL_SUPPORTED_IMAGE_FORMATS, ...MULTIMODAL_SUPPORTED_DOCUMENT_FORMATS].join(', ')} +

+

+ Only supports up to {MULTIMODAL_MAX_IMAGES} images and {MULTIMODAL_MAX_DOCUMENTS} documents per conversation. +

+
+); + export const HomeHelp = () => ( Dashboard}>

diff --git a/source/ui-chat/src/components/tools/ToolUsageIndicator.scss b/source/ui-chat/src/components/tools/ToolUsageIndicator.scss new file mode 100644 index 00000000..9ea79cca --- /dev/null +++ b/source/ui-chat/src/components/tools/ToolUsageIndicator.scss @@ -0,0 +1,182 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +.tool-usage-indicator { + opacity: 1; + transform: translateY(0); + margin-bottom: 8px; + + &--completed { + opacity: 0.6; + } + + &--failed { + opacity: 1; + } + + &__content { + display: inline-flex; + align-items: center; + gap: 6px; + } + + &__toggle { + display: inline-flex; + align-items: center; + gap: 4px; + padding: 2px 6px; + background: transparent; + border: none; + border-radius: 4px; + cursor: pointer; + transition: background 0.15s ease; + font-size: 13px; + color: var(--color-text-body-secondary, #5f6b7a); + + &:hover { + background: var(--color-background-container-header, #f9fafb); + } + + &:focus { + outline: 2px solid var(--color-border-item-focused, #0972d3); + outline-offset: 1px; + } + + &:active { + background: var(--color-background-control-default, #e9ebed); + } + + &:disabled { + cursor: default; + + &:hover { + background: transparent; + } + } + } + + &__text { + display: inline-flex; + align-items: baseline; + gap: 4px; + font-weight: 400; + } + + &__name { + font-weight: 500; + color: var(--color-text-body-default, #000716); + display: inline; + } + + &__mcp { + font-size: 12px; + color: var(--color-text-status-info, #0972d3); + font-weight: 400; + } + + &__duration { + font-size: 12px; + color: var(--color-text-status-info, #0972d3); + font-weight: 500; + } + + &--failed &__toggle { + color: var(--color-text-status-error, #d91515); + } + + &--failed &__name { + color: var(--color-text-status-error, #d91515); + } + + &__details { + display: flex; + flex-direction: column; + gap: 4px; + margin-top: 4px; + margin-left: 24px; + padding: 8px 12px; + background: var(--color-background-container-content, #ffffff); + border: 1px solid var(--color-border-divider-default, #e9ebed); + border-radius: 4px; + font-size: 12px; + animation: slideDown 0.15s ease-out; + } + + &__section { + margin-bottom: 8px; + + &:last-child { + margin-bottom: 0; + } + } + + &__section-title { + font-size: 11px; + font-weight: 600; + color: var(--color-text-body-secondary, #5f6b7a); + text-transform: uppercase; + letter-spacing: 0.5px; + margin-bottom: 4px; + } + + &__error { + font-size: 12px; + color: var(--color-text-status-error, #d91515); + line-height: 1.5; + padding: 6px 8px; + background: var(--color-background-status-error, #fef6f6); + border-radius: 4px; + } + + &__code { + background-color: var(--color-background-code-block, #f2f3f3); + border: 1px solid var(--color-border-divider-default, #e9ebed); + border-radius: 4px; + padding: 8px; + font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', 'Consolas', 'source-code-pro', monospace; + font-size: 11px; + line-height: 1.5; + color: var(--color-text-body-default, #000716); + overflow-x: auto; + white-space: pre-wrap; + word-wrap: break-word; + margin: 0; + max-height: 300px; + overflow-y: auto; + } +} + +@keyframes slideDown { + from { + opacity: 0; + transform: translateY(-2px); + } + + to { + opacity: 1; + transform: translateY(0); + } +} + +// Responsive design for mobile devices +@media (max-width: 768px) { + .tool-usage-indicator { + &__details { + margin-left: 0; + font-size: 11px; + } + + &__toggle { + font-size: 12px; + } + + &__duration, + &__mcp { + font-size: 11px; + } + + &__code { + font-size: 10px; + } + } +} \ No newline at end of file diff --git a/source/ui-chat/src/components/tools/ToolUsageIndicator.tsx b/source/ui-chat/src/components/tools/ToolUsageIndicator.tsx new file mode 100644 index 00000000..c3bc4712 --- /dev/null +++ b/source/ui-chat/src/components/tools/ToolUsageIndicator.tsx @@ -0,0 +1,96 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { useState } from 'react'; +import { Icon } from '@cloudscape-design/components'; +import { ToolUsageInfo } from '../../models/api/response'; +import './ToolUsageIndicator.scss'; + +export interface ToolUsageIndicatorProps { + toolUsage: ToolUsageInfo; + 'data-testid'?: string; +} + +/** + * ToolUsageIndicator component displays tool invocation information in a subtle, expandable format + * + * @param toolUsage - Tool usage information + * @param data-testid - Optional test id for the component + */ +export function ToolUsageIndicator({ + toolUsage, + 'data-testid': dataTestId = 'tool-usage-indicator' +}: ToolUsageIndicatorProps) { + const [isExpanded, setIsExpanded] = useState(false); + + const isCompleted = toolUsage.status === 'completed' || toolUsage.status === 'failed'; + const isFailed = toolUsage.status === 'failed'; + + // Check if there's content to expand + const hasContent = !!(toolUsage.toolInput || toolUsage.toolOutput || toolUsage.error); + + return ( +

+
+ +
+ {isExpanded && hasContent && ( +
+ {toolUsage.error && ( +
+
Error
+
+ {toolUsage.error} +
+
+ )} + {toolUsage.toolInput && ( +
+
Input
+
+                                {JSON.stringify(toolUsage.toolInput, null, 2)}
+                            
+
+ )} + {toolUsage.toolOutput && ( +
+
Output
+
+                                {toolUsage.toolOutput}
+                            
+
+ )} +
+ )} +
+ ); +} diff --git a/source/ui-chat/src/components/tools/ToolsContent.tsx b/source/ui-chat/src/components/tools/ToolsContent.tsx index 1e37d139..567e783f 100644 --- a/source/ui-chat/src/components/tools/ToolsContent.tsx +++ b/source/ui-chat/src/components/tools/ToolsContent.tsx @@ -1,10 +1,15 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -import { HomeHelp, NoHelp, ProjectsDetailsHelp, ProjectsOverviewHelp } from './HelpPanelContent.tsx'; +import { useSelector } from 'react-redux'; +import { HomeHelp, NoHelp, FileUploadHelp, ProjectsDetailsHelp, ProjectsOverviewHelp } from './HelpPanelContent.tsx'; import { Route, Routes } from 'react-router-dom'; +import { getMultimodalEnabledState } from '../../store/configSlice'; +import { RootState } from '../../store/store'; export const ToolsContent = () => { + const isMultimodalEnabled = useSelector((state: RootState) => getMultimodalEnabledState(state)); + return ( <> @@ -12,7 +17,7 @@ export const ToolsContent = () => { } /> } /> } /> - } /> + : } /> ); diff --git a/source/ui-chat/src/components/tools/index.ts b/source/ui-chat/src/components/tools/index.ts new file mode 100644 index 00000000..2c104858 --- /dev/null +++ b/source/ui-chat/src/components/tools/index.ts @@ -0,0 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export { ToolUsageIndicator } from './ToolUsageIndicator'; +export type { ToolUsageIndicatorProps } from './ToolUsageIndicator'; diff --git a/source/ui-chat/src/hooks/__tests__/use-file-upload.test.ts b/source/ui-chat/src/hooks/__tests__/use-file-upload.test.ts new file mode 100644 index 00000000..268df1f4 --- /dev/null +++ b/source/ui-chat/src/hooks/__tests__/use-file-upload.test.ts @@ -0,0 +1,238 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { describe, expect, vi, beforeEach, test } from 'vitest'; +import { renderHook, act } from '@testing-library/react'; +import { useFileUpload } from '../use-file-upload'; + +vi.mock('../../services/fileUploadService', () => ({ + uploadFiles: vi.fn(), + deleteFiles: vi.fn() +})); + +vi.mock('../../utils/file-upload', () => ({ + validateFiles: vi.fn(() => []), + isValidFileName: vi.fn((fileName: string) => { + // reject obviously malicious patterns + return ( + fileName && + typeof fileName === 'string' && + fileName.length > 0 && + fileName.length <= 255 && + !fileName.includes('../') && + !fileName.includes('..\\') && + /^[a-zA-Z0-9](?:[a-zA-Z0-9._-]*[a-zA-Z0-9])?\.[a-zA-Z0-9]+$/.test(fileName) + ); + }) +})); + +describe('useFileUpload', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + test('initializes with empty state', () => { + const { result } = renderHook(() => useFileUpload()); + + expect(result.current.files).toEqual([]); + expect(result.current.uploadedFiles).toEqual([]); + expect(result.current.isUploading).toBe(false); + expect(result.current.isDeleting).toBe(false); + expect(result.current.uploadProgress).toEqual({}); + expect(result.current.uploadErrors).toEqual({}); + expect(result.current.deleteErrors).toEqual({}); + }); + + test('adds valid files', async () => { + const { result } = renderHook(() => useFileUpload()); + const testFile = new File(['test'], 'test.txt', { type: 'text/plain' }); + + await act(async () => { + await result.current.addFiles([testFile]); + }); + + expect(result.current.files).toHaveLength(1); + expect(result.current.files[0]).toBe(testFile); + }); + + test('removes files by name', async () => { + const { result } = renderHook(() => useFileUpload()); + const testFile = new File(['test'], 'test.txt', { type: 'text/plain' }); + + await act(async () => { + await result.current.addFiles([testFile]); + }); + + expect(result.current.files).toHaveLength(1); + + act(() => { + result.current.removeFile('test.txt'); + }); + + expect(result.current.files).toHaveLength(0); + }); + + test('rejects invalid fileName in removeFile - null/undefined', async () => { + const { result } = renderHook(() => useFileUpload()); + const testFile = new File(['test'], 'test.txt', { type: 'text/plain' }); + const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + + await act(async () => { + await result.current.addFiles([testFile]); + }); + + expect(result.current.files).toHaveLength(1); + + // Try to remove with null/undefined fileName + act(() => { + result.current.removeFile(null as any); + }); + + act(() => { + result.current.removeFile(undefined as any); + }); + + // File should still be there + expect(result.current.files).toHaveLength(1); + expect(consoleSpy).toHaveBeenCalledWith('Invalid fileName provided to removeFile:', null); + expect(consoleSpy).toHaveBeenCalledWith('Invalid fileName provided to removeFile:', undefined); + + consoleSpy.mockRestore(); + }); + + test('rejects invalid fileName in removeFile - malicious patterns', async () => { + const { result } = renderHook(() => useFileUpload()); + const testFile = new File(['test'], 'test.txt', { type: 'text/plain' }); + const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + + await act(async () => { + await result.current.addFiles([testFile]); + }); + + expect(result.current.files).toHaveLength(1); + + // Try to remove with malicious fileName patterns + const maliciousNames = [ + '../../../etc/passwd', + '..\\..\\windows\\system32', + 'file.txt', + 'file\x00.txt', + '' + ]; + + maliciousNames.forEach((maliciousName) => { + act(() => { + result.current.removeFile(maliciousName); + }); + }); + + // File should still be there + expect(result.current.files).toHaveLength(1); + expect(consoleSpy).toHaveBeenCalledTimes(maliciousNames.length); + + consoleSpy.mockRestore(); + }); + + test('rejects non-string fileName in removeFile', async () => { + const { result } = renderHook(() => useFileUpload()); + const testFile = new File(['test'], 'test.txt', { type: 'text/plain' }); + const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + + await act(async () => { + await result.current.addFiles([testFile]); + }); + + expect(result.current.files).toHaveLength(1); + + // Try to remove with non-string fileName + act(() => { + result.current.removeFile(123 as any); + }); + + act(() => { + result.current.removeFile({} as any); + }); + + act(() => { + result.current.removeFile([] as any); + }); + + // File should still be there + expect(result.current.files).toHaveLength(1); + expect(consoleSpy).toHaveBeenCalledTimes(3); + + consoleSpy.mockRestore(); + }); + + test('clears all files', async () => { + const { result } = renderHook(() => useFileUpload()); + const testFile = new File(['test'], 'test.txt', { type: 'text/plain' }); + + await act(async () => { + await result.current.addFiles([testFile]); + }); + + expect(result.current.files).toHaveLength(1); + + act(() => { + result.current.clearFiles(); + }); + + expect(result.current.files).toHaveLength(0); + expect(result.current.uploadedFiles).toHaveLength(0); + expect(result.current.uploadProgress).toEqual({}); + expect(result.current.uploadErrors).toEqual({}); + expect(result.current.deleteErrors).toEqual({}); + }); + + test('handles file validation errors', async () => { + const { validateFiles } = await import('../../utils/file-upload'); + vi.mocked(validateFiles).mockReturnValue([ + { fileName: 'invalid.txt', error: new Error('File type not supported') } + ]); + + const { result } = renderHook(() => useFileUpload()); + const invalidFile = new File(['test'], 'invalid.txt', { type: 'text/plain' }); + + await act(async () => { + await result.current.addFiles([invalidFile]); + }); + + expect(result.current.files).toHaveLength(0); + expect(result.current.uploadErrors['invalid.txt']).toBe('File type not supported'); + }); + + test('uploads files successfully', async () => { + const { uploadFiles } = await import('../../services/fileUploadService'); + vi.mocked(uploadFiles).mockResolvedValue({ + results: [{ success: true, fileName: 'test.txt', fileKey: 'test-key', error: null, attempts: 1 }], + allSuccessful: true, + successCount: 1, + failureCount: 0, + uploadedFiles: [ + { + key: 'test-key', + fileName: 'test.txt', + fileContentType: 'text/plain', + fileExtension: 'txt', + fileSize: 1024 + } + ], + messageId: 'test-message-id' + }); + + const { result } = renderHook(() => useFileUpload()); + const testFile = new File(['test'], 'test.txt', { type: 'text/plain' }); + + await act(async () => { + await result.current.addFiles([testFile]); + }); + + await act(async () => { + const uploadedFiles = await result.current.uploadFiles('conversation-id', 'use-case-id', 'auth-token'); + expect(uploadedFiles).toHaveLength(1); + }); + + expect(result.current.uploadedFiles).toHaveLength(1); + }); +}); diff --git a/source/ui-chat/src/hooks/use-chat-message.ts b/source/ui-chat/src/hooks/use-chat-message.ts index 5e2d99bc..47c13395 100644 --- a/source/ui-chat/src/hooks/use-chat-message.ts +++ b/source/ui-chat/src/hooks/use-chat-message.ts @@ -1,12 +1,54 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -import { useCallback, useReducer } from 'react'; -import { ChatResponse, isChatSuccessResponse, isErrorResponse, SourceDocument } from '../models'; +import { useCallback, useReducer, useRef } from 'react'; +import { ChatResponse, isChatSuccessResponse, isErrorResponse, SourceDocument, ToolUsageInfo } from '../models'; import { ChatActionTypes, Message } from '../pages/chat/types'; import { useUser } from '../contexts/UserContext'; import { chatReducer } from '../reducers/chat-reducer'; import { END_CONVERSATION_TOKEN } from '../utils/constants'; +import { UploadedFile } from '../types/file-upload'; + +/** + * System messages that should be filtered out from display + * These messages are used for internal state management and should not be shown to users + */ +const SYSTEM_MESSAGES_TO_FILTER = new Set([ + 'PROCESSING', + 'KEEP ALIVE', + 'KEEP_ALIVE', + 'KEEPALIVE', + '##PROCESSING##', + '##KEEP_ALIVE##', + '##KEEP ALIVE##', + '##KEEPALIVE##' +]); + +/** + * Checks if a message is a system message that should be filtered out + * @param {string} message - The message content to check + * @returns {boolean} True if the message should be filtered, false otherwise + */ +const shouldFilterSystemMessage = (message: string): boolean => { + if (!message) { + return false; + } + + const trimmed = message.trim(); + const messageUpper = trimmed.toUpperCase(); + + if (SYSTEM_MESSAGES_TO_FILTER.has(messageUpper)) { + return true; + } + + if (trimmed.startsWith('##') && trimmed.endsWith('##')) { + const content = trimmed.slice(2, -2).toUpperCase(); + return SYSTEM_MESSAGES_TO_FILTER.has(content) || + SYSTEM_MESSAGES_TO_FILTER.has(`##${content}##`); + } + + return false; +}; /** * Interface representing the state of a chat conversation @@ -15,6 +57,10 @@ import { END_CONVERSATION_TOKEN } from '../utils/constants'; * @property isGenAiResponseLoading - Flag indicating if AI is generating a response * @property sourceDocuments - Array of source documents referenced in the conversation * @property conversationId - Unique identifier for the conversation + * @property isStreaming - Flag indicating if response is being streamed + * @property streamingMessageId - ID of the message being streamed + * @property thinking - Thinking state for agent processing indicators + * @property toolUsage - Array of tool usage information for tracking agent tool invocations */ export interface ChatState { messages: Message[]; @@ -22,6 +68,14 @@ export interface ChatState { isGenAiResponseLoading: boolean; sourceDocuments: SourceDocument[]; conversationId: string; + isStreaming: boolean; + streamingMessageId?: string; + thinking?: { + isThinking: boolean; + thinkingMessage?: string; + startTime: string; + }; + toolUsage: ToolUsageInfo[]; } /** @@ -32,7 +86,11 @@ const initialState: ChatState = { currentResponse: '', isGenAiResponseLoading: false, sourceDocuments: [], - conversationId: '' + conversationId: '', + isStreaming: false, + streamingMessageId: undefined, + thinking: undefined, + toolUsage: [] }; /** @@ -42,6 +100,8 @@ const initialState: ChatState = { export const useChatMessages = () => { const [state, dispatch] = useReducer(chatReducer, initialState); const { userId } = useUser(); + const streamingStateRef = useRef(false); + const thinkingTimeoutRef = useRef(null); /** * Handles incoming chat responses and updates state accordingly @@ -55,11 +115,18 @@ export const useChatMessages = () => { return; } + // Filter system messages (defense-in-depth filtering) + if (response.data && shouldFilterSystemMessage(response.data)) { + return; + } + if (isErrorResponse(response) && response.errorMessage) { + streamingStateRef.current = false; dispatch({ type: ChatActionTypes.SET_ERROR, payload: response.errorMessage }); + return; } if (isChatSuccessResponse(response) && response.conversationId) { @@ -74,20 +141,69 @@ export const useChatMessages = () => { dispatch({ type: ChatActionTypes.ADD_REPHRASED_QUERY, payload: response.rephrased_query }); } - if (response.data !== undefined) { - if (response.data === END_CONVERSATION_TOKEN) { - dispatch({ type: ChatActionTypes.COMPLETE_AI_RESPONSE }); + + if (response.toolUsage) { + const existingToolIndex = state.toolUsage.findIndex( + tool => tool.toolName === response.toolUsage!.toolName && + tool.startTime === response.toolUsage!.startTime + ); + + if (existingToolIndex >= 0) { + dispatch({ + type: ChatActionTypes.UPDATE_TOOL_USAGE, + payload: { index: existingToolIndex, toolUsage: response.toolUsage } + }); } else { - // Send only the new data token, not the concatenated response dispatch({ - type: ChatActionTypes.UPDATE_AI_RESPONSE, - payload: { - content: response.data, - messageId: response.messageId - } + type: ChatActionTypes.ADD_TOOL_USAGE, + payload: response.toolUsage }); } } + + if (response.isStreaming === true && !streamingStateRef.current) { + streamingStateRef.current = true; + dispatch({ + type: ChatActionTypes.START_STREAMING, + payload: { messageId: response.messageId } + }); + } + + const isStreamingResponse = response.isStreaming === true || streamingStateRef.current; + + if (response.data !== undefined) { + if (response.data === END_CONVERSATION_TOKEN) { + if (isStreamingResponse) { + streamingStateRef.current = false; + dispatch({ type: ChatActionTypes.COMPLETE_STREAMING }); + } else { + dispatch({ type: ChatActionTypes.COMPLETE_AI_RESPONSE }); + } + } else { + if (isStreamingResponse) { + dispatch({ + type: ChatActionTypes.UPDATE_STREAMING_CHUNK, + payload: { + content: response.data, + messageId: response.messageId + } + }); + } else { + dispatch({ + type: ChatActionTypes.UPDATE_AI_RESPONSE, + payload: { + content: response.data, + messageId: response.messageId + } + }); + } + } + } + + if (response.streamComplete === true && (streamingStateRef.current || response.isStreaming)) { + streamingStateRef.current = false; + dispatch({ type: ChatActionTypes.COMPLETE_STREAMING }); + } } catch (error) { dispatch({ type: ChatActionTypes.SET_ERROR, @@ -95,13 +211,14 @@ export const useChatMessages = () => { }); } }, - [state.currentResponse] + [] ); /** * Resets the chat state to initial values */ const resetChat = useCallback(() => { + streamingStateRef.current = false; dispatch({ type: ChatActionTypes.RESET_CHAT }); }, []); @@ -116,9 +233,13 @@ export const useChatMessages = () => { /** * Adds a new user message to the chat * @param {string} userInput - The message content from the user + * @param {UploadedFile[]} files - Optional files attached to the message */ - const handleAddUserMessage = (userInput: string) => { - dispatch({ type: ChatActionTypes.ADD_USER_MESSAGE, payload: { content: userInput, authorId: userId } }); + const handleAddUserMessage = (userInput: string, files?: UploadedFile[]) => { + dispatch({ + type: ChatActionTypes.ADD_USER_MESSAGE, + payload: { content: userInput, authorId: userId, files } + }); }; return { @@ -127,14 +248,25 @@ export const useChatMessages = () => { isGenAiResponseLoading: state.isGenAiResponseLoading, sourceDocuments: state.sourceDocuments, conversationId: state.conversationId, + isStreaming: state.isStreaming, + streamingMessageId: state.streamingMessageId, + thinking: state.thinking, + toolUsage: state.toolUsage, handleMessage, addUserMessage: handleAddUserMessage, resetChat, setMessages: handleSetMessages, setIsGenAiResponseLoading: (loading: boolean) => { if (!loading) { - dispatch({ type: ChatActionTypes.COMPLETE_AI_RESPONSE }); + if (state.isStreaming) { + dispatch({ type: ChatActionTypes.COMPLETE_STREAMING }); + } else { + dispatch({ type: ChatActionTypes.COMPLETE_AI_RESPONSE }); + } } + }, + setConversationId: (conversationId: string) => { + dispatch({ type: ChatActionTypes.SET_CONVERSATION_ID, payload: conversationId }); } }; }; diff --git a/source/ui-chat/src/hooks/use-file-upload.ts b/source/ui-chat/src/hooks/use-file-upload.ts new file mode 100644 index 00000000..b19f1aad --- /dev/null +++ b/source/ui-chat/src/hooks/use-file-upload.ts @@ -0,0 +1,262 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { useCallback, useReducer } from 'react'; +import { FileUploadState, UploadedFile, FileUploadActionType, FileUploadActionTypes } from '../types/file-upload'; +import { validateFiles, isValidFileName } from '../utils/file-upload'; +import { v4 as uuidv4 } from 'uuid'; +import { uploadFiles as serviceUploadFiles, deleteFiles } from '../services/fileUploadService'; + +interface FileUploadAction { + type: FileUploadActionType; + payload?: any; +} + +const initialState: FileUploadState = { + files: [], + uploadedFiles: [], + isUploading: false, + isDeleting: false, + uploadProgress: {}, + uploadErrors: {}, + deleteErrors: {} +}; + +const fileUploadReducer = (state: FileUploadState, action: FileUploadAction): FileUploadState => { + switch (action.type) { + case FileUploadActionTypes.ADD_FILES: + return { + ...state, + files: [...state.files, ...action.payload] + }; + case FileUploadActionTypes.UPDATE_FILE_PROGRESS: + return { + ...state, + uploadProgress: { + ...state.uploadProgress, + [action.payload.fileName]: action.payload.progress + } + }; + case FileUploadActionTypes.SET_FILE_ERROR: + return { + ...state, + uploadErrors: { + ...state.uploadErrors, + [action.payload.fileName]: action.payload.error + } + }; + case FileUploadActionTypes.SET_DELETE_ERROR: + return { + ...state, + deleteErrors: { + ...state.deleteErrors, + [action.payload.fileName]: action.payload.error + } + }; + case FileUploadActionTypes.CLEAR_DELETE_ERROR: + return { + ...state, + deleteErrors: Object.fromEntries( + Object.entries(state.deleteErrors).filter(([key]) => key !== action.payload) + ) + }; + case FileUploadActionTypes.REMOVE_FILE: + return { + ...state, + files: state.files.filter((file: File) => file.name !== action.payload), + uploadedFiles: state.uploadedFiles.filter((file: UploadedFile) => file.fileName !== action.payload), + uploadProgress: Object.fromEntries( + Object.entries(state.uploadProgress).filter(([key]) => key !== action.payload) + ), + uploadErrors: Object.fromEntries( + Object.entries(state.uploadErrors).filter(([key]) => key !== action.payload) + ), + deleteErrors: Object.fromEntries( + Object.entries(state.deleteErrors).filter(([key]) => key !== action.payload) + ) + }; + case FileUploadActionTypes.CLEAR_FILES: + return initialState; + case FileUploadActionTypes.SET_UPLOADED_FILE: + return { + ...state, + uploadedFiles: [...state.uploadedFiles, action.payload] + }; + case FileUploadActionTypes.SET_UPLOADING: + return { + ...state, + isUploading: action.payload + }; + case FileUploadActionTypes.SET_DELETING: + return { + ...state, + isDeleting: action.payload + }; + default: + return state; + } +}; + +export const useFileUpload = () => { + const [state, dispatch] = useReducer(fileUploadReducer, initialState); + const addFiles = useCallback(async (newFiles: File[]) => { + const validationErrors = validateFiles(newFiles); + + if (validationErrors.length > 0) { + validationErrors.forEach((error) => { + dispatch({ + type: FileUploadActionTypes.SET_FILE_ERROR, + payload: { fileName: error.fileName, error: error.error } + }); + }); + return; + } + + const validFiles: File[] = newFiles; + + if (validFiles.length > 0) { + dispatch({ type: FileUploadActionTypes.ADD_FILES, payload: validFiles }); + } + }, []); + + const removeFile = useCallback((fileName: string) => { + if (!isValidFileName(fileName)) { + console.error('Invalid fileName provided to removeFile:', fileName); + return; + } + + dispatch({ type: FileUploadActionTypes.REMOVE_FILE, payload: fileName }); + }, []); + + const clearFiles = useCallback(() => { + dispatch({ type: FileUploadActionTypes.CLEAR_FILES }); + }, []); + + const uploadFiles = useCallback( + async (conversationId: string, useCaseId: string, authToken: string): Promise => { + if (state.files.length === 0) return []; + + dispatch({ type: FileUploadActionTypes.SET_UPLOADING, payload: true }); + + try { + const result = await serviceUploadFiles( + state.files, + conversationId, + useCaseId, + (fileName: string, progress: number) => { + dispatch({ + type: FileUploadActionTypes.UPDATE_FILE_PROGRESS, + payload: { fileName, progress } + }); + }, + (fileName: string, success: boolean, error?: Error) => { + if (!success && error && isValidFileName(fileName)) { + dispatch({ + type: 'SET_FILE_ERROR', + payload: { fileName, error } + }); + } + } + ); + result.uploadedFiles.forEach((uploadedFile: UploadedFile) => { + dispatch({ type: FileUploadActionTypes.SET_UPLOADED_FILE, payload: uploadedFile }); + }); + + return result.uploadedFiles; + } finally { + dispatch({ type: FileUploadActionTypes.SET_UPLOADING, payload: false }); + } + }, + [state.files] + ); + + const deleteUploadedFiles = useCallback( + async (fileNames: string[], conversationId: string, messageId: string, useCaseId: string) => { + if (fileNames.length === 0) return { results: [], allSuccessful: true, successCount: 0, failureCount: 0 }; + + const validFileNames = fileNames.filter((fileName) => { + if (!isValidFileName(fileName)) { + console.error('Invalid fileName provided to deleteUploadedFiles:', fileName); + return false; + } + return true; + }); + + if (validFileNames.length === 0) { + console.error('No valid fileNames provided to deleteUploadedFiles'); + return { results: [], allSuccessful: false, successCount: 0, failureCount: fileNames.length }; + } + + dispatch({ type: FileUploadActionTypes.SET_DELETING, payload: true }); + + validFileNames.forEach((fileName) => { + dispatch({ type: FileUploadActionTypes.CLEAR_DELETE_ERROR, payload: fileName }); + }); + + try { + const result = await deleteFiles( + validFileNames, + conversationId, + messageId, + useCaseId, + (fileName: string, success: boolean, error?: Error) => { + if (!success && error && isValidFileName(fileName)) { + dispatch({ + type: FileUploadActionTypes.SET_DELETE_ERROR, + payload: { fileName, error } + }); + } + } + ); + + result.deletions.forEach((deletion) => { + if (deletion.success && isValidFileName(deletion.fileName)) { + dispatch({ type: FileUploadActionTypes.REMOVE_FILE, payload: deletion.fileName }); + } + }); + + return result; + } catch (error) { + // Set error for all valid files if the request fails completely + validFileNames.forEach((fileName) => { + dispatch({ + type: FileUploadActionTypes.SET_DELETE_ERROR, + payload: { + fileName, + error: error instanceof Error ? error.message : 'Delete failed' + } + }); + }); + throw error; + } finally { + dispatch({ type: FileUploadActionTypes.SET_DELETING, payload: false }); + } + }, + [] + ); + + const generateConversationId = useCallback((): string => { + return uuidv4(); + }, []); + + const generateMessageId = useCallback((): string => { + return uuidv4(); + }, []); + + return { + files: state.files, + uploadedFiles: state.uploadedFiles, + isUploading: state.isUploading, + isDeleting: state.isDeleting, + uploadProgress: state.uploadProgress, + uploadErrors: state.uploadErrors, + deleteErrors: state.deleteErrors, + addFiles, + removeFile, + clearFiles, + uploadFiles, + deleteUploadedFiles, + generateConversationId, + generateMessageId + }; +}; diff --git a/source/ui-chat/src/mocks/handlers.ts b/source/ui-chat/src/mocks/handlers.ts index 0fa22420..9daf3d1a 100644 --- a/source/ui-chat/src/mocks/handlers.ts +++ b/source/ui-chat/src/mocks/handlers.ts @@ -35,7 +35,73 @@ export const getDeploymentHandler = (apiUrl: string) => }); }); +export const fileUploadHandler = (apiUrl: string) => + http.post(`${apiUrl}/files/:useCaseId`, async ({ request }) => { + const body = (await request.json()) as { fileNames: string[]; conversationId: string; messageId: string }; + + return ok({ + uploads: body.fileNames.map((fileName, index) => ({ + uploadUrl: `https://mock-s3-bucket.s3.amazonaws.com/`, + formFields: { + key: `mock-file-key-${index}`, + 'Content-Type': 'application/pdf', + policy: 'mock-policy', + 'x-amz-algorithm': 'AWS4-HMAC-SHA256', + 'x-amz-credential': 'mock-credential', + 'x-amz-date': '20240101T000000Z', + 'x-amz-signature': 'mock-signature' + }, + fileName, + fileKey: `mock-file-key-${index}`, + expiresIn: '3600', + createdAt: new Date().toISOString() + })) + }); + }); + +export const fileDeleteHandler = (apiUrl: string) => + http.delete(`${apiUrl}/files/:useCaseId`, async ({ request }) => { + const body = (await request.json()) as { fileNames: string[]; conversationId: string; messageId: string }; + + const deletions = body.fileNames.map((fileName) => { + const fileExists = Math.random() > 0.1; + + if (!fileExists) { + return { + success: false, + fileName, + error: 'File not found or already deleted' + }; + } + + return { + success: true, + fileName + }; + }); + + const failureCount = deletions.filter((d) => !d.success).length; + + return ok({ + deletions, + allSuccessful: failureCount === 0, + failureCount + }); + }); + +export const mockS3UploadHandler = () => + http.post('https://*.s3.amazonaws.com/*', async () => { + // Simulate successful S3 upload + await delay(500); // Simulate upload time + return new HttpResponse(null, { status: 204 }); + }); + /** * @param apiUrl the base url for http requests. only requests to this base url will be intercepted and handled by mock-service-worker. */ -export const handlers = (apiUrl: string) => [getDeploymentHandler(apiUrl)]; +export const handlers = (apiUrl: string) => [ + getDeploymentHandler(apiUrl), + fileUploadHandler(apiUrl), + fileDeleteHandler(apiUrl), + mockS3UploadHandler() +]; diff --git a/source/ui-chat/src/models/api/response.ts b/source/ui-chat/src/models/api/response.ts index b49598d2..b987b03d 100644 --- a/source/ui-chat/src/models/api/response.ts +++ b/source/ui-chat/src/models/api/response.ts @@ -3,6 +3,29 @@ import { SourceDocument } from './document'; +/** + * Tool usage information for tracking agent tool invocations + */ +export interface ToolUsageInfo { + toolName: string; + toolInput?: Record; + toolOutput?: string; + status: 'started' | 'in_progress' | 'completed' | 'failed'; + mcpServerName?: string; + startTime: string; + endTime?: string; + error?: string; +} + +/** + * Thinking state for agent processing indicators + */ +export interface ThinkingState { + isThinking: boolean; + thinkingMessage?: string; + startTime: string; +} + export interface ChatResponse { data?: string; errorMessage?: string; @@ -10,6 +33,16 @@ export interface ChatResponse { sourceDocument?: SourceDocument; rephrased_query?: string; messageId?: string; + + // Streaming fields + isStreaming?: boolean; + streamComplete?: boolean; + + // Tool usage fields + toolUsage?: ToolUsageInfo; + + // Thinking fields (AgentBuilder only) + thinking?: ThinkingState; } export interface ChatSuccessResponse extends ChatResponse { @@ -22,6 +55,26 @@ export interface ChatErrorResponse extends ChatResponse { errorMessage: string; } +/** + * AgentBuilder-specific chat response with all advanced features + */ +export interface AgentBuilderChatResponse extends ChatResponse { + // Inherits all ChatResponse fields including: + // - data, errorMessage, conversationId, sourceDocument, rephrased_query, messageId + // - isStreaming, streamComplete + // - toolUsage, thinking +} + +/** + * Workflow-specific chat response with all advanced features + */ +export interface WorkflowChatResponse extends ChatResponse { + // Inherits all ChatResponse fields including: + // - data, errorMessage, conversationId, sourceDocument, rephrased_query, messageId + // - isStreaming, streamComplete + // - toolUsage, thinking +} + export const isChatSuccessResponse = (response: ChatResponse): response is ChatSuccessResponse => { const message = response as ChatSuccessResponse; return ( diff --git a/source/ui-chat/src/models/message.ts b/source/ui-chat/src/models/message.ts index 9866c644..d24a2e78 100644 --- a/source/ui-chat/src/models/message.ts +++ b/source/ui-chat/src/models/message.ts @@ -1,9 +1,12 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 +import { ApiFileReference } from '../types/file-upload'; + export type BaseMessage = { conversationId?: string; authToken?: string; + files?: ApiFileReference[]; }; export type TextMessage = BaseMessage & { @@ -18,4 +21,18 @@ export type AgentMessage = BaseMessage & { inputText: string; }; -export type ChatMessage = TextMessage | AgentMessage; +export type AgentBuilderMessage = BaseMessage & { + action: 'invokeAgentCore'; + inputText: string; + promptTemplate?: string; + messageId?: string; +}; + +export type WorkflowMessage = BaseMessage & { + action: 'invokeWorkflow'; + inputText: string; + promptTemplate?: string; + messageId?: string; +}; + +export type ChatMessage = TextMessage | AgentMessage | AgentBuilderMessage | WorkflowMessage; diff --git a/source/ui-chat/src/models/runtime-config.ts b/source/ui-chat/src/models/runtime-config.ts index 0a048da7..d2389d0e 100644 --- a/source/ui-chat/src/models/runtime-config.ts +++ b/source/ui-chat/src/models/runtime-config.ts @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -import { TextUseCaseConfig, AgentUseCaseConfig } from './use-case-config'; +import { TextUseCaseConfig, AgentUseCaseConfig, AgentBuilderUseCaseConfig, WorkflowUseCaseConfig } from './use-case-config'; export interface RuntimeConfig { IsInternalUser: string; @@ -16,6 +16,6 @@ export interface RuntimeConfig { CognitoDomain: string; UseCaseConfigKey: string; UseCaseId: string; - UseCaseConfig?: TextUseCaseConfig | AgentUseCaseConfig; + UseCaseConfig?: TextUseCaseConfig | AgentUseCaseConfig | AgentBuilderUseCaseConfig | WorkflowUseCaseConfig; RestApiEndpoint: string; } diff --git a/source/ui-chat/src/models/use-case-config.ts b/source/ui-chat/src/models/use-case-config.ts index 39cad2cf..aad0f34b 100644 --- a/source/ui-chat/src/models/use-case-config.ts +++ b/source/ui-chat/src/models/use-case-config.ts @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -export type UseCaseType = 'Agent' | 'Text'; +export type UseCaseType = 'Agent' | 'Text' | 'AgentBuilder' | 'Workflow'; export interface BaseUseCaseConfig { UseCaseName: string; @@ -11,6 +11,14 @@ export interface BaseUseCaseConfig { }; } +export interface MultimodalParams { + MultimodalEnabled: boolean; + FileUploadEnabled?: boolean; + MaxFiles?: number; + MaxFileSize?: number; + SupportedFileTypes?: string[]; +} + export interface TextUseCaseConfig extends BaseUseCaseConfig { LlmParams: { RAGEnabled: boolean; @@ -35,3 +43,19 @@ export interface AgentUseCaseConfig extends BaseUseCaseConfig { }; }; } + +export interface AgentBuilderUseCaseConfig extends BaseUseCaseConfig { + UseCaseType: 'AgentBuilder'; + LlmParams: { + RAGEnabled: boolean; + MultimodalParams?: MultimodalParams; + } +} + +export interface WorkflowUseCaseConfig extends BaseUseCaseConfig { + UseCaseType: 'Workflow'; + LlmParams: { + RAGEnabled: boolean; + MultimodalParams?: MultimodalParams; + } +} diff --git a/source/ui-chat/src/pages/chat/ChatPage.tsx b/source/ui-chat/src/pages/chat/ChatPage.tsx index 9031aedd..8b9dcc49 100644 --- a/source/ui-chat/src/pages/chat/ChatPage.tsx +++ b/source/ui-chat/src/pages/chat/ChatPage.tsx @@ -26,6 +26,9 @@ import { ChatResponse } from '@/models'; import { LoadingStatus, LoadingState, LoadingErrorType } from './components/alerts/LoadingStatus'; import { SerializedError } from '@reduxjs/toolkit'; import { FetchBaseQueryError } from '@reduxjs/toolkit/query'; +import { UploadedFile } from '@/types/file-upload'; +import { useFileUpload } from '@/hooks/use-file-upload'; +import { getMultimodalEnabledState } from '@/store/configSlice'; /** * ChatPage component handles the main chat interface and WebSocket communication. @@ -56,6 +59,8 @@ export default function ChatPage() { const runtimeConfig = useSelector((state: RootState) => state.config.runtimeConfig); const promptTemplate = useSelector((state: RootState) => selectPromptTemplate(state)); + const isMultimodalEnabled = useSelector((state: RootState) => getMultimodalEnabledState(state)); + /** * Retrieves the WebSocket URL with authentication token. * Handles token retrieval errors and updates connection state accordingly. @@ -94,9 +99,14 @@ export default function ChatPage() { handleMessage, conversationId, addUserMessage, - resetChat + resetChat, + thinking, + toolUsage, + setConversationId } = useChatMessages(); + const { generateMessageId } = useFileUpload(); + /** * WebSocket hook configuration for handling real-time communication */ @@ -111,21 +121,33 @@ export default function ChatPage() { * Handles sending user prompts through WebSocket connection. * Validates connection state and handles error scenarios. * @param value The user's message to be sent + * @param files Optional files attached to the message + * @param providedMessageId Optional message ID from file upload */ const handlePromptSend = useCallback( - (value: string) => { + (value: string, files?: UploadedFile[], providedMessageId?: string) => { if (readyState !== ReadyState.OPEN) { return; } try { - addUserMessage(value); + addUserMessage(value, files); + + // Use existing conversation ID + let currentConversationId = conversationId; + + // Use provided message ID from file upload, or generate new one + const messageIdToUse = providedMessageId || generateMessageId(); + const payload = constructPayload({ useCaseConfig: runtimeConfig?.UseCaseConfig!, message: value, - conversationId, + conversationId: currentConversationId, + messageId: messageIdToUse, promptTemplate, - authToken: authToken + authToken: authToken, + files: files, + useCaseId: runtimeConfig?.UseCaseId }); sendJsonMessage(payload); @@ -145,7 +167,14 @@ export default function ChatPage() { conversationId, setMessages, setIsGenAiResponseLoading, - runtimeConfig?.UseCaseConfig + runtimeConfig?.UseCaseConfig, + addUserMessage, + messages, + promptTemplate, + authToken, + generateMessageId, + setConversationId, + isMultimodalEnabled ] ); @@ -184,16 +213,24 @@ export default function ChatPage() { * Effect hook to update status of Details API result */ useEffect(() => { + let errorMessage: string | undefined; + + if (detailsError) { + errorMessage = (detailsError as SerializedError).message ?? 'Failed to load deployment'; + } else if (runtimeConfig && !runtimeConfig.UseCaseConfigKey && !runtimeConfig.UseCaseId) { + errorMessage = 'Use case configuration is missing. Please check your deployment configuration.'; + } + setLoadingState({ isLoading: !runtimeConfig?.UseCaseConfig && !detailsError, - error: detailsError + error: errorMessage ? { type: LoadingErrorType.DATA_FETCH_ERROR, - message: (detailsError as FetchBaseQueryError).data as string + message: errorMessage } : undefined }); - }, [runtimeConfig?.UseCaseConfig, detailsError]); + }, [runtimeConfig?.UseCaseConfig, runtimeConfig?.UseCaseConfigKey, runtimeConfig?.UseCaseId, detailsError]); useLayoutEffect(() => { if (!loadingState.isLoading && !loadingState.error && runtimeConfig?.UseCaseConfig) { @@ -212,9 +249,24 @@ export default function ChatPage() { } fitHeight - footer={} + footer={ + handlePromptSend(value)} + onSendWithFiles={(value: string, files: UploadedFile[], messageId?: string) => + handlePromptSend(value, files, messageId) + } + conversationId={conversationId} + onSetConversationId={setConversationId} + /> + } > - + diff --git a/source/ui-chat/src/pages/chat/components/input/ChatInput.tsx b/source/ui-chat/src/pages/chat/components/input/ChatInput.tsx index 882f0a8b..d40e6f31 100644 --- a/source/ui-chat/src/pages/chat/components/input/ChatInput.tsx +++ b/source/ui-chat/src/pages/chat/components/input/ChatInput.tsx @@ -1,112 +1,667 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -import { FormField, Link, PromptInput } from '@cloudscape-design/components'; -import { memo, useCallback, useState } from 'react'; +import React, { memo, useCallback, useState, useMemo } from 'react'; +import { useSelector } from 'react-redux'; +import { + Box, + FileDropzone, + FileInput, + FormField, + Icon, + Link, + PromptInput, + SpaceBetween +} from '@cloudscape-design/components'; +import { useFilesDragging } from '@cloudscape-design/components/file-dropzone'; + +import { getMultimodalEnabledState, getUseCaseId } from '../../../../store/configSlice'; + +import { FileTokenList } from '../../../../components/multimodal/FileTokenGroup'; +import { useFileUpload } from '../../../../hooks/use-file-upload'; +import { uploadFiles, deleteFiles } from '../../../../services/fileUploadService'; +import { RootState } from '../../../../store/store'; +import { getMaxInputTextLength } from '../../../../store/configSlice'; +import { UploadedFile } from '../../../../types/file-upload'; import { CHAT_INPUT_MAX_ROWS, CONSTRAINT_TEXT_ERROR_COLOR, DEFAULT_CHAT_INPUT_MAX_LENGTH, - DOCS_LINKS + DOCS_LINKS, + MULTIMODAL_SUPPORTED_IMAGE_FORMATS, + MULTIMODAL_SUPPORTED_DOCUMENT_FORMATS } from '../../../../utils/constants'; -import { useSelector } from 'react-redux'; -import { RootState } from '../../../../store/store'; -import { getMaxInputTextLength } from '../../../../store/configSlice'; +import { validateFile, isFileCountExceeded } from '../../../../utils/file-upload'; import { formatCharacterCount } from '../../../../utils/validation'; -/** - * Props interface for the ChatInput component - * @interface ChatInputProps - * @property {boolean} isLoading - Flag indicating if a request is in progress - * @property {function} onSend - Callback function triggered when a message is sent - */ interface ChatInputProps { isLoading: boolean; onSend: (value: string) => void; + onSendWithFiles?: (value: string, files: UploadedFile[], messageId?: string) => void; + conversationId?: string; + onSetConversationId?: (conversationId: string) => void; } -/** - * ChatInput component that renders a form field with a prompt input - * @param {ChatInputProps} props - Component props - * @param {boolean} props.isLoading - Flag indicating if a request is in progress - * @param {function} props.onSend - Callback function triggered when a message is sent - * @returns {JSX.Element} Rendered form field with prompt input - */ -export const ChatInput = memo(({ isLoading, onSend }: ChatInputProps) => { - const [inputText, setInputText] = useState(''); - - // Selector to determine if user is internal - const isInternalUser = useSelector((state: RootState) => state.config.runtimeConfig?.IsInternalUser) === 'true'; - - const maxInputLength = useSelector((state: RootState) => { - try { - return getMaxInputTextLength(state); - } catch { - return DEFAULT_CHAT_INPUT_MAX_LENGTH; - } - }); - - /** - * Handles the action when user submits input - * @param {Object} param - Event parameter object - * @param {Object} param.detail - Contains the input value - */ - const handleAction = useCallback( - ({ detail }: { detail: { value: string } }) => { - if (!detail.value?.trim() || isLoading) return; - // Only send if the text is within the limit - if (detail.value.length <= maxInputLength) { - onSend(detail.value); - setInputText(''); - } - }, - [isLoading, onSend, maxInputLength] - ); - - const characterCount = inputText.length; - const isOverLimit = characterCount > maxInputLength; - - return ( - - - {characterCount}/{formatCharacterCount(maxInputLength)} characters.{' '} - - {isInternalUser && ( - <> - Use of this service is subject to the{' '} - - Third Party Generative AI Use Policy - - . - - )} - +const FilesDraggingProvider = ({ children }: { children: (areFilesDragging: boolean) => React.ReactNode }) => { + const { areFilesDragging } = useFilesDragging(); + return <>{children(areFilesDragging)}; +}; + +export const ChatInput = memo( + ({ + isLoading, + onSend, + onSendWithFiles, + conversationId, + onSetConversationId + }: ChatInputProps): React.ReactElement => { + const [inputText, setInputText] = useState(''); + const [files, setFiles] = useState([]); + const [uploadedFiles, setUploadedFiles] = useState([]); + const [isUploading, setIsUploading] = useState(false); + const [isDeleting, setIsDeleting] = useState(false); + const [uploadErrors, setUploadErrors] = useState>({}); + const [deleteErrors, setDeleteErrors] = useState>({}); + const [messageId, setMessageId] = useState(''); + const [uploadingFiles, setUploadingFiles] = useState>(new Set()); + + const { generateConversationId, generateMessageId } = useFileUpload(); + // Selector to determine if user is internal + const isInternalUser = useSelector((state: RootState) => state.config.runtimeConfig?.IsInternalUser) === 'true'; + + const isMultimodalEnabled = useSelector((state: RootState) => getMultimodalEnabledState(state)); + const useCaseId = useSelector(getUseCaseId); + + const maxInputLength = useSelector((state: RootState) => { + try { + return getMaxInputTextLength(state); + } catch { + return DEFAULT_CHAT_INPUT_MAX_LENGTH; } - > - { - // Allow any length of input - setInputText(detail.value); - }} - onAction={handleAction} - value={inputText} - actionButtonAriaLabel={ - isLoading - ? 'Send message button - suppressed' - : isOverLimit - ? 'Cannot send - message too long' - : 'Send message' + }); + + const clearObsoleteValidationErrors = useCallback((allFiles: File[]) => { + // Only clear errors for files that no longer exist in the file list + const existingFileNames = new Set(allFiles.map((file) => file.name)); + + setUploadErrors((prev) => { + const newErrors = { ...prev }; + Object.keys(newErrors).forEach((fileName) => { + // Clear errors for files that have been completely removed + if (!existingFileNames.has(fileName)) { + delete newErrors[fileName]; + } + }); + + return newErrors; + }); + }, []); + + const createFileFromUploaded = useCallback((uf: UploadedFile) => { + const file = new File([], uf.fileName, { type: uf.fileContentType }); + Object.defineProperty(file, 'size', { value: uf.fileSize, writable: false }); + return file; + }, []); + + const handleFileUpload = useCallback( + async (filesToUpload: File[]) => { + let currentConversationId = conversationId; + + // Generate conversation ID if we don't have one and we have files to upload + if (!currentConversationId && isMultimodalEnabled && filesToUpload.length > 0) { + currentConversationId = generateConversationId(); + if (onSetConversationId) { + onSetConversationId(currentConversationId); + } + } + + if (!currentConversationId || !useCaseId) { + return; + } + + setIsUploading(true); + + setUploadingFiles((prev) => { + const newSet = new Set(prev); + filesToUpload.forEach((file) => newSet.add(file.name)); + return newSet; + }); + + setUploadErrors((prev) => { + const newErrors = { ...prev }; + filesToUpload.forEach((file) => delete newErrors[file.name]); + return newErrors; + }); + + try { + // Use existing messageId or generate a new one for this conversation + let currentMessageId = messageId; + if (!currentMessageId) { + currentMessageId = generateMessageId(); + setMessageId(currentMessageId); + } + + const result = await uploadFiles( + filesToUpload, + currentConversationId, + useCaseId, + undefined, // onProgress + (fileName: string, success: boolean, error?: Error) => { + if (!success && error) { + setUploadErrors((prev) => ({ ...prev, [fileName]: error })); + } + }, + 3, // maxRetries + currentMessageId + ); + + if (result.uploadedFiles.length > 0) { + setUploadedFiles((prev) => { + const newUploadedFiles = [...prev]; + result.uploadedFiles.forEach((newFile) => { + const existingIndex = newUploadedFiles.findIndex( + (existing) => existing.fileName === newFile.fileName + ); + if (existingIndex !== -1) { + newUploadedFiles[existingIndex] = newFile; + } else { + newUploadedFiles.push(newFile); + } + }); + return newUploadedFiles; + }); + } + + setFiles((prev) => + prev.filter((file) => { + const wasUploaded = result.uploadedFiles.some( + (uploaded) => uploaded.fileName === file.name + ); + // Keep files that have validation errors + const hasValidationError = uploadErrors[file.name]; + return !wasUploaded || hasValidationError; + }) + ); + + const failedResults = result.results.filter((r) => !r.success); + if (failedResults.length > 0) { + setUploadErrors((prev) => { + const newErrors = { ...prev }; + failedResults.forEach((fileResult) => { + if (fileResult.error) { + newErrors[fileResult.fileName] = fileResult.error; + } + }); + return newErrors; + }); + } + } catch (error) { + filesToUpload.forEach((file) => { + setUploadErrors((prev) => ({ + ...prev, + [file.name]: new Error('Upload failed') + })); + }); + } finally { + setIsUploading(false); + setUploadingFiles((prev) => { + const newSet = new Set(prev); + filesToUpload.forEach((file) => newSet.delete(file.name)); + return newSet; + }); + } + }, + [ + conversationId, + useCaseId, + isMultimodalEnabled, + generateConversationId, + generateMessageId, + messageId, + onSetConversationId + ] + ); + + const handleAddFiles = useCallback( + async (newFiles: File[]) => { + if (!newFiles || !Array.isArray(newFiles)) { + return; + } + + const deduplicatedNewFiles = new Map(); + newFiles.forEach((file) => deduplicatedNewFiles.set(file.name, file)); + const uniqueFiles = Array.from(deduplicatedNewFiles.values()); + + const validFiles: File[] = []; + const invalidFiles: File[] = []; + const errors: Record = {}; + + // validate individual files + uniqueFiles.forEach((file) => { + const fileError = validateFile(file); + if (fileError) { + errors[file.name] = fileError.error; + invalidFiles.push(file); + } else { + validFiles.push(file); + } + }); + + if (validFiles.length > 0 || invalidFiles.length > 0) { + const updatedFiles = [...files]; + const updatedUploadedFiles = [...uploadedFiles]; + const filesToUpload: File[] = []; + + const filesToDelete: string[] = []; + + const allNewFiles = [...validFiles, ...invalidFiles]; + + allNewFiles.forEach((newFile) => { + // Check if file exists in pending uploads + const existingFileIndex = updatedFiles.findIndex((file) => file.name === newFile.name); + if (existingFileIndex !== -1) { + updatedFiles[existingFileIndex] = newFile; + } else { + updatedFiles.push(newFile); + } + + // Check if file exists in uploaded files - if so, mark for deletion + const existingUploadedIndex = updatedUploadedFiles.findIndex( + (file) => file.fileName === newFile.name + ); + if (existingUploadedIndex !== -1) { + const fileToDelete = updatedUploadedFiles[existingUploadedIndex]; + if (fileToDelete.messageId) { + filesToDelete.push(fileToDelete.fileName); + // Remove from uploaded files array + updatedUploadedFiles.splice(existingUploadedIndex, 1); + } + } + + if (validFiles.includes(newFile)) { + filesToUpload.push(newFile); + } + }); + + const existingFiles = [...updatedFiles, ...updatedUploadedFiles.map(createFileFromUploaded)]; + + setFiles(updatedFiles); + setUploadedFiles(updatedUploadedFiles); + setUploadErrors((prev) => ({ ...prev, ...errors })); + + // Proceed with upload if there are valid files to upload + if (isMultimodalEnabled && filesToUpload.length > 0) { + // Generate conversationId if we don't have one yet + if (!conversationId && onSetConversationId) { + const newConversationId = generateConversationId(); + onSetConversationId(newConversationId); + } + // Delete existing uploaded files with same names first + if (filesToDelete.length > 0 && conversationId && messageId && useCaseId) { + try { + const deleteResult = await deleteFiles( + filesToDelete, + conversationId, + messageId, + useCaseId, + (fileName: string, success: boolean, error?: Error) => { + if (!success && error) { + console.warn(`Failed to delete existing file ${fileName}:`, error); + setDeleteErrors((prev) => ({ ...prev, [fileName]: error })); + } + }, + 3 // maxRetries for delete operation + ); + + if (deleteResult.allSuccessful) { + handleFileUpload(filesToUpload); + } else { + console.warn('Some file deletions failed, proceeding with upload anyway'); + handleFileUpload(filesToUpload); + } + } catch (error) { + console.error('Error during file deletion:', error); + filesToDelete.forEach((fileName) => { + setDeleteErrors((prev) => ({ + ...prev, + [fileName]: new Error('Delete operation failed') + })); + }); + handleFileUpload(filesToUpload); + } + } else { + handleFileUpload(filesToUpload); + } + } + } + + if (Object.keys(errors).length > 0) { + setUploadErrors((prev) => ({ ...prev, ...errors })); + } + }, + [ + files, + uploadedFiles, + isMultimodalEnabled, + handleFileUpload, + conversationId, + onSetConversationId, + createFileFromUploaded, + messageId, + useCaseId, + generateConversationId + ] + ); + + // Sort by error files first, then normal files + const orderedFiles = useMemo(() => { + const allFiles = [ + ...files.map((file, originalIndex) => ({ + file, + originalIndex, + isUploaded: false, + fileName: file.name + })), + ...uploadedFiles.map((file, originalIndex) => ({ + file, + originalIndex, + isUploaded: true, + fileName: file.fileName + })) + ]; + + // Separate files with errors from files without errors + const filesWithErrors = allFiles.filter( + (item) => uploadErrors[item.fileName] || deleteErrors[item.fileName] + ); + const filesWithoutErrors = allFiles.filter( + (item) => !uploadErrors[item.fileName] && !deleteErrors[item.fileName] + ); + + return [...filesWithErrors, ...filesWithoutErrors]; + }, [files, uploadedFiles, uploadErrors, deleteErrors]); + + const handleFileDismiss = useCallback( + async (fileIndex: number) => { + const itemToRemove = orderedFiles[fileIndex]; + if (!itemToRemove) return; + + const fileName = itemToRemove.fileName; + + if (!itemToRemove.isUploaded) { + const updatedFiles = files.filter((_, index) => index !== itemToRemove.originalIndex); + setFiles(updatedFiles); + + setUploadErrors((prev) => { + const newErrors = { ...prev }; + delete newErrors[fileName]; + return newErrors; + }); + setDeleteErrors((prev) => { + const newErrors = { ...prev }; + delete newErrors[fileName]; + return newErrors; + }); + + const allFiles = [...updatedFiles, ...uploadedFiles.map(createFileFromUploaded)]; + clearObsoleteValidationErrors(allFiles); + } else { + // Remove from uploaded files - call delete API with retry logic + const fileToDelete = uploadedFiles[itemToRemove.originalIndex]; + + if (fileToDelete && conversationId && fileToDelete.messageId && useCaseId) { + setIsDeleting(true); + + setDeleteErrors((prev) => { + const newErrors = { ...prev }; + delete newErrors[fileToDelete.fileName]; + return newErrors; + }); + + try { + const fileMessageId = fileToDelete.messageId!; + const result = await deleteFiles( + [fileToDelete.fileName], + fileToDelete.conversationId || conversationId, + fileMessageId, + useCaseId, + (fileName: string, success: boolean, error?: Error) => { + if (!success && error) { + setDeleteErrors((prev) => ({ ...prev, [fileName]: error })); + } + } + ); + + if (result.allSuccessful) { + const updatedUploadedFiles = uploadedFiles.filter( + (_, index) => index !== itemToRemove.originalIndex + ); + setUploadedFiles(updatedUploadedFiles); + + const allFiles = [...files, ...updatedUploadedFiles.map(createFileFromUploaded)]; + clearObsoleteValidationErrors(allFiles); + } + } catch (error) { + console.error('Failed to delete file:', error); + setDeleteErrors((prev) => ({ + ...prev, + [fileToDelete.fileName]: new Error('Delete failed') + })); + } finally { + setIsDeleting(false); + } + } else { + // Fallback: just remove from UI if we don't have required info + const updatedUploadedFiles = uploadedFiles.filter( + (_, index) => index !== itemToRemove.originalIndex + ); + setUploadedFiles(updatedUploadedFiles); + + const allFiles = [...files, ...updatedUploadedFiles.map(createFileFromUploaded)]; + clearObsoleteValidationErrors(allFiles); + } } - actionButtonIconName="send" - ariaLabel={isLoading ? 'Chat input text - suppressed' : 'Chat input text'} - placeholder="Ask a question" - autoFocus - maxRows={CHAT_INPUT_MAX_ROWS} - data-testid="chat-input" - /> - - ); -}); + }, + [ + orderedFiles, + files, + uploadedFiles, + conversationId, + messageId, + useCaseId, + clearObsoleteValidationErrors, + createFileFromUploaded + ] + ); + + const characterCount = inputText.length; + const isOverLimit = characterCount > maxInputLength; + const hasFiles = files.length > 0 || uploadedFiles.length > 0; + const totalFiles = files.length + uploadedFiles.length; + const hasFileErrors = Object.keys(uploadErrors).length > 0 || Object.keys(deleteErrors).length > 0; + const allFiles = [...files, ...uploadedFiles.map(createFileFromUploaded)]; + const fileCountCheck = isFileCountExceeded(allFiles); + const hasCountError = fileCountCheck.exceeded; + + const handleAction = useCallback( + ({ detail }: { detail: { value: string } }) => { + if (!detail.value?.trim() || isLoading || isUploading || isDeleting) return; + + if (hasFileErrors || hasCountError) { + return; + } + + if (detail.value.length <= maxInputLength) { + if (isMultimodalEnabled && uploadedFiles.length > 0 && onSendWithFiles) { + onSendWithFiles(detail.value, uploadedFiles, messageId); + } else { + onSend(detail.value); + } + setInputText(''); + + // Reset messageId for next message + setMessageId(''); + + if (isMultimodalEnabled) { + setUploadedFiles([]); + setFiles([]); + setUploadErrors({}); + setDeleteErrors({}); + } + } + }, + [ + isLoading, + isUploading, + isDeleting, + onSend, + onSendWithFiles, + maxInputLength, + uploadedFiles, + isMultimodalEnabled, + hasFileErrors, + hasCountError + ] + ); + const acceptedFormats = [...MULTIMODAL_SUPPORTED_IMAGE_FORMATS, ...MULTIMODAL_SUPPORTED_DOCUMENT_FORMATS] + .map((format) => `.${format}`) + .join(','); + + return ( + + + {characterCount}/{formatCharacterCount(maxInputLength)} characters.{' '} + + {hasCountError && ( + {fileCountCheck.message} + )} + {isMultimodalEnabled && hasFiles && ( + + {uploadedFiles.length > 0 && + `${uploadedFiles.length} file${uploadedFiles.length !== 1 ? 's' : ''} uploaded.`}{' '} + {isUploading && 'Uploading...'} {isDeleting && 'Deleting...'}{' '} + + )} + {isMultimodalEnabled && Only supports up to 20 images and 5 documents per conversation. See help panel for supported file types. } + {isInternalUser && ( + <> + Use of this service is subject to the{' '} + { + ( + + Third Party Generative AI Use Policy + + ) as React.ReactElement + } + . + + )} + + } + > + + {(areFilesDragging: boolean) => ( + setInputText(detail.value)} + onAction={handleAction} + value={inputText} + actionButtonAriaLabel={ + isLoading || isUploading || isDeleting + ? 'Send message button - suppressed' + : isOverLimit + ? 'Cannot send - message too long' + : hasFileErrors + ? 'Cannot send - file errors present' + : isMultimodalEnabled && hasFiles + ? `Send message with ${totalFiles} file${totalFiles !== 1 ? 's' : ''}` + : 'Send message' + } + actionButtonIconName="send" + ariaLabel={ + isLoading || isUploading || isDeleting + ? 'Chat input text - suppressed' + : 'Chat input text' + } + placeholder={ + isMultimodalEnabled && hasFiles ? 'Ask a question about your files' : 'Ask a question' + } + autoFocus + maxRows={CHAT_INPUT_MAX_ROWS} + data-testid="chat-input" + disableSecondaryActionsPaddings + secondaryActions={ + isMultimodalEnabled ? ( + + + handleAddFiles(detail.value) + } + accept={acceptedFormats} + ariaLabel="Upload files" + variant="icon" + /> + + ) : undefined + } + secondaryContent={ + isMultimodalEnabled ? ( + areFilesDragging ? ( + + handleAddFiles(detail.value) + } + > + + + Drop files here + + + ) : ( + (files.length > 0 || uploadedFiles.length > 0) && ( + { + if (!item.isUploaded) { + const fileWithLoading = item.file as File & { + loading?: boolean; + }; + const hasError = + uploadErrors[item.fileName] || deleteErrors[item.fileName]; + if (uploadingFiles.has(item.fileName) && !hasError) { + fileWithLoading.loading = true; + } else { + delete fileWithLoading.loading; + } + return fileWithLoading; + } + return item.file; + })} + onDismiss={handleFileDismiss} + uploadErrors={uploadErrors} + deleteErrors={deleteErrors} + /> + ) + ) + ) : undefined + } + /> + )} + + + ); + } +); +export default ChatInput; diff --git a/source/ui-chat/src/pages/chat/components/messages/ChatMessage.tsx b/source/ui-chat/src/pages/chat/components/messages/ChatMessage.tsx index 1fc57794..fa01b231 100644 --- a/source/ui-chat/src/pages/chat/components/messages/ChatMessage.tsx +++ b/source/ui-chat/src/pages/chat/components/messages/ChatMessage.tsx @@ -6,6 +6,8 @@ import { AUTHORS, AUTHORS_CONFIG, getUserAuthor } from '../../config'; import { OutgoingMessage } from './OutgoingMessage'; import { IncomingMessage } from './IncomingMessage'; +import { ToolUsageInfo } from '../../../../models'; + /** * Interface defining the props for the ChatMessage component * @interface ChatMessageProps @@ -14,6 +16,7 @@ import { IncomingMessage } from './IncomingMessage'; * @property {string} userName - Display name of the user * @property {Function} isUserMessage - Function to determine if a message is from the user * @property {Function} onFeedback - Callback function for handling message feedback + * @property {ToolUsageInfo[]} [toolUsage] - Optional array of tool usage information * @property {string} [data-testid] - Optional test identifier for the component */ export interface ChatMessageProps { @@ -22,6 +25,8 @@ export interface ChatMessageProps { userName: string; isUserMessage: (authorId: string) => boolean; conversationId: string; + toolUsage?: ToolUsageInfo[]; + hasFileError?: boolean; 'data-testid'?: string; } @@ -36,6 +41,8 @@ export const ChatMessage = ({ userName, isUserMessage, conversationId, + toolUsage, + hasFileError = false, 'data-testid': dataTestId }: ChatMessageProps) => { const typedMessage = message as ChatBubbleMessage; @@ -54,9 +61,18 @@ export const ChatMessage = ({ return undefined; }; + // Get tool usage from message if available, otherwise use prop (for streaming) + const messageToolUsage = (typedMessage as any).toolUsage || toolUsage; + // Ensure the message ID is passed to the IncomingMessage component return isUserMessage(typedMessage.authorId) ? ( - + ) : ( ); diff --git a/source/ui-chat/src/pages/chat/components/messages/ChatMessagesContainer.tsx b/source/ui-chat/src/pages/chat/components/messages/ChatMessagesContainer.tsx index 46adbc63..3bdd5db8 100644 --- a/source/ui-chat/src/pages/chat/components/messages/ChatMessagesContainer.tsx +++ b/source/ui-chat/src/pages/chat/components/messages/ChatMessagesContainer.tsx @@ -5,15 +5,25 @@ import { memo, useEffect, useRef } from 'react'; import { ScrollableContainer } from '../../../../components/common/common-components'; import Messages from './Messages'; import { Message } from '../../types'; +import { ToolUsageInfo } from '../../../../models'; /** * Props interface for ChatMessagesContainer component * @interface ChatMessagesContainerProps * @property {Message[]} messages - Array of chat messages to display + * @property {string} conversationId - Unique identifier for the conversation + * @property {Object} thinking - Optional thinking state for agent processing indicators + * @property {ToolUsageInfo[]} toolUsage - Optional array of tool usage information */ interface ChatMessagesContainerProps { messages: Message[]; conversationId: string; + thinking?: { + isThinking: boolean; + thinkingMessage?: string; + startTime: string; + }; + toolUsage?: ToolUsageInfo[]; } /** @@ -23,9 +33,12 @@ interface ChatMessagesContainerProps { * @component * @param {ChatMessagesContainerProps} props - Component props * @param {Message[]} props.messages - Array of messages to display + * @param {string} props.conversationId - Unique identifier for the conversation + * @param {ThinkingState} props.thinking - Optional thinking state for agent processing indicators + * @param {ToolUsageInfo[]} props.toolUsage - Optional array of tool usage information * @returns {JSX.Element} Rendered chat messages container */ -export const ChatMessagesContainer = memo(({ messages, conversationId }: ChatMessagesContainerProps) => { +export const ChatMessagesContainer = memo(({ messages, conversationId, thinking, toolUsage }: ChatMessagesContainerProps) => { const messagesContainerRef = useRef(null); const lastMessageContent = messages[messages.length - 1]?.content; @@ -39,9 +52,31 @@ export const ChatMessagesContainer = memo(({ messages, conversationId }: ChatMes } }, [lastMessageContent]); + // Auto-scroll when thinking state changes + useEffect(() => { + if (thinking?.isThinking && messagesContainerRef.current) { + requestAnimationFrame(() => { + if (messagesContainerRef.current) { + messagesContainerRef.current.scrollTop = messagesContainerRef.current.scrollHeight; + } + }); + } + }, [thinking?.isThinking]); + + // Auto-scroll when tool usage updates + useEffect(() => { + if (toolUsage && toolUsage.length > 0 && messagesContainerRef.current) { + requestAnimationFrame(() => { + if (messagesContainerRef.current) { + messagesContainerRef.current.scrollTop = messagesContainerRef.current.scrollHeight; + } + }); + } + }, [toolUsage?.length]); + return ( - + ); }); diff --git a/source/ui-chat/src/pages/chat/components/messages/IncomingMessage.tsx b/source/ui-chat/src/pages/chat/components/messages/IncomingMessage.tsx index d93a7a98..df07a577 100644 --- a/source/ui-chat/src/pages/chat/components/messages/IncomingMessage.tsx +++ b/source/ui-chat/src/pages/chat/components/messages/IncomingMessage.tsx @@ -11,9 +11,20 @@ import { useEffect, useState } from 'react'; import { FeedbackForm } from '../input/FeedbackForm'; import { useFeedback } from '@/hooks/use-feedback'; import { StatusIndicator } from '@cloudscape-design/components'; +import { ThinkingIndicator } from '@/components/thinking/ThinkingIndicator'; +import { ToolUsageList } from '@/components/tool-usage/ToolUsageList'; +import { AgentBuilderChatBubbleMessage } from '../../types'; +import { useSelector } from 'react-redux'; +import { selectUseCaseType } from '@/store/configSlice'; +import { USE_CASE_TYPES } from '@/utils/constants'; -export const IncomingMessage = ({ message, author, showActions, conversationId, 'data-testid': dataTestId }: IncomingMessageProps) => { +const isAgentBuilderMessage = (message: any): message is AgentBuilderChatBubbleMessage => { + return message && 'thinking' in message && message.thinking !== undefined; +}; + +export const IncomingMessage = ({ message, author, showActions, conversationId, toolUsage, 'data-testid': dataTestId }: IncomingMessageProps) => { const [showFeedbackConfirmation, setShowFeedbackConfirmation] = useState(false); + const useCaseType = useSelector(selectUseCaseType); const { showFeedbackForm, setShowFeedbackForm, @@ -24,6 +35,10 @@ export const IncomingMessage = ({ message, author, showActions, conversationId, handleFeedbackButtonClick, handleFeedbackSubmit } = useFeedback(message, conversationId); + + const shouldShowThinkingIndicator = (useCaseType === USE_CASE_TYPES.AGENT_BUILDER || useCaseType === USE_CASE_TYPES.WORKFLOW) && isAgentBuilderMessage(message); + + const shouldShowToolUsage = (useCaseType === USE_CASE_TYPES.AGENT_BUILDER || useCaseType === USE_CASE_TYPES.WORKFLOW) && toolUsage && toolUsage.length > 0; // Reset feedback form state when message changes useEffect(() => { @@ -65,6 +80,21 @@ export const IncomingMessage = ({ message, author, showActions, conversationId, data-testid={dataTestId} > + + {shouldShowToolUsage && ( + + )} + + {shouldShowThinkingIndicator && message.thinking && ( + + )} + {message.sourceDocuments && message.sourceDocuments.length > 0 && ( )} diff --git a/source/ui-chat/src/pages/chat/components/messages/Messages.tsx b/source/ui-chat/src/pages/chat/components/messages/Messages.tsx index 1d8a9edb..0bbef102 100644 --- a/source/ui-chat/src/pages/chat/components/messages/Messages.tsx +++ b/source/ui-chat/src/pages/chat/components/messages/Messages.tsx @@ -1,6 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 +import React from 'react'; import LiveRegion from '@cloudscape-design/components/live-region'; import '../../styles/chat.scss'; import { useUser } from '../../../../contexts/UserContext'; @@ -9,15 +10,26 @@ import { parseTraceId, TraceDetails } from '../../../../utils/validation'; import { ErrorAlert } from '../alerts/ErrorAlert'; import { ChatMessage } from './ChatMessage'; import { Message } from '../../types'; +import { ToolUsageInfo } from '../../../../models'; /** * Messages component displays a list of chat messages and alerts * @component * @param {Object} props - Component props * @param {Array} props.messages - Array of message objects to display + * @param {string} props.conversationId - Unique identifier for the conversation + * @param {Array} props.toolUsage - Optional array of tool usage information * @returns {JSX.Element} Messages component */ -const Messages = ({ messages = [], conversationId }: { messages: Array, conversationId: string }) => { +const Messages = ({ + messages = [], + conversationId, + toolUsage = [] +}: { + messages: Array, + conversationId: string, + toolUsage?: Array +}) => { const { userId, userName } = useUser(); const latestMessage: Message = messages[messages.length - 1]; const [processedMessages, setProcessedMessages] = useState>(messages); @@ -85,6 +97,22 @@ const Messages = ({ messages = [], conversationId }: { messages: Array, ); } + const isLastMessage = index === processedMessages.length - 1; + const isAssistantMessage = message.authorId !== userId; + const shouldPassToolUsage = isLastMessage && isAssistantMessage && toolUsage.length > 0; + + const hasFileProcessingError = (() => { + if (!isUserMessage(message.authorId) || !message.files?.length) return false; + + // Check if the very next message is a file processing error alert + const nextMsg = processedMessages[index + 1]; + return ( + nextMsg?.type === 'alert' && + typeof nextMsg.content === 'string' && + nextMsg.content.includes('File processing failed') + ); + })(); + return ( , userName={userName!} isUserMessage={isUserMessage} conversationId={conversationId} + toolUsage={shouldPassToolUsage ? toolUsage : undefined} + hasFileError={hasFileProcessingError} data-testid={`chat-message-${index}`} /> ); diff --git a/source/ui-chat/src/pages/chat/components/messages/OutgoingMessage.tsx b/source/ui-chat/src/pages/chat/components/messages/OutgoingMessage.tsx index 0d3441bc..b231ac6f 100644 --- a/source/ui-chat/src/pages/chat/components/messages/OutgoingMessage.tsx +++ b/source/ui-chat/src/pages/chat/components/messages/OutgoingMessage.tsx @@ -6,6 +6,7 @@ import { OutgoingMessageProps } from './types'; import { ChatBubble } from '@cloudscape-design/chat-components'; import { ChatBubbleAvatar } from '../../../../components/common/common-components'; import MarkdownContent from '../../../../components/markdown/MarkdownContent'; +import { FileDisplay } from '../../../../components/multimodal/FileDisplay'; import '../../styles/OutgoingMessage.scss'; const ChevronIcon = ({ className }: { className?: string }) => ( @@ -17,6 +18,7 @@ const ChevronIcon = ({ className }: { className?: string }) => ( export const OutgoingMessage = ({ message, author, + hasFileError = false, 'data-testid': dataTestId, previewHeight = 200 }: OutgoingMessageProps) => { @@ -41,6 +43,7 @@ export const OutgoingMessage = ({ showLoadingBar={message.avatarLoading} data-testid={dataTestId} > + {message.files && message.files.length > 0 && }
{ const initialChatState: ChatState = { @@ -24,7 +32,11 @@ export const chatReducer = (state: ChatState, action: ChatAction): ChatState => currentResponse: '', isGenAiResponseLoading: false, sourceDocuments: [], - conversationId: '' + conversationId: '', + isStreaming: false, + streamingMessageId: undefined, + thinking: undefined, + toolUsage: [] }; switch (action.type) { @@ -33,21 +45,35 @@ export const chatReducer = (state: ChatState, action: ChatAction): ChatState => type: 'chat-bubble', authorId: action.payload.authorId, content: action.payload.content, - timestamp: new Date().toLocaleTimeString() + timestamp: new Date().toLocaleTimeString(), + files: action.payload.files }; - const loadingMessage: Message = { + // Track thinking start time for duration calculation + const thinkingStartTime = new Date().toISOString(); + + const loadingMessage: AgentBuilderChatBubbleMessage = { type: 'chat-bubble', authorId: AUTHORS.ASSISTANT, - content: CHAT_LOADING_DEFAULT_MESSAGE, + content: '', // Empty content - thinking indicator will show the status timestamp: new Date().toLocaleTimeString(), - avatarLoading: true + avatarLoading: true, + thinking: { + duration: 0, + startTime: thinkingStartTime, + endTime: '', + strippedContent: undefined + } }; return { ...state, messages: [...state.messages, userMessage, loadingMessage], - isGenAiResponseLoading: true + isGenAiResponseLoading: true, + thinking: { + isThinking: true, + startTime: thinkingStartTime + } }; } @@ -95,22 +121,53 @@ export const chatReducer = (state: ChatState, action: ChatAction): ChatState => currentResponse: updatedResponse }; - case ChatActionTypes.COMPLETE_AI_RESPONSE: + case ChatActionTypes.COMPLETE_AI_RESPONSE: { const finalMessages = [...state.messages] as ChatBubbleMessage[]; + + let thinkingMetadata: ThinkingMetadata | undefined; + + if (state.thinking && state.thinking.startTime) { + const startTime = new Date(state.thinking.startTime).getTime(); + const endTime = Date.now(); + const duration = Math.floor((endTime - startTime) / 1000); + + thinkingMetadata = { + duration, + startTime: state.thinking.startTime, + endTime: new Date(endTime).toISOString(), + strippedContent: undefined // Will be set during content processing + }; + } + if (finalMessages.length > 0 && finalMessages[finalMessages.length - 1].authorId === AUTHORS.ASSISTANT) { + const lastMessage = finalMessages[finalMessages.length - 1]; + + const messageContent = typeof lastMessage.content === 'string' ? lastMessage.content : ''; + const { content: cleanedContent, thinking: updatedThinking } = processMessageContent( + messageContent, + thinkingMetadata + ); + finalMessages[finalMessages.length - 1] = { - ...finalMessages[finalMessages.length - 1], + ...lastMessage, + content: cleanedContent, avatarLoading: false, - sourceDocuments: [...state.sourceDocuments] - }; + sourceDocuments: [...state.sourceDocuments], + thinking: updatedThinking, + toolUsage: [...state.toolUsage] + } as AgentBuilderChatBubbleMessage; } + return { ...state, messages: finalMessages, isGenAiResponseLoading: false, currentResponse: '', - sourceDocuments: [] + sourceDocuments: [], + thinking: undefined, // Clear global thinking state after capture + toolUsage: [] // Clear tool usage after capturing into message }; + } case ChatActionTypes.ADD_REPHRASED_QUERY: const messages = [...state.messages] as ChatBubbleMessage[]; @@ -161,7 +218,10 @@ export const chatReducer = (state: ChatState, action: ChatAction): ChatState => content: action.payload } ], - isGenAiResponseLoading: false + isGenAiResponseLoading: false, + isStreaming: false, + streamingMessageId: undefined, + currentResponse: '' }; case ChatActionTypes.RESET_CHAT: @@ -173,6 +233,151 @@ export const chatReducer = (state: ChatState, action: ChatAction): ChatState => messages: action.payload // Replace instead of append }; + case ChatActionTypes.START_STREAMING: { + // Preserve existing thinking state from loading message + const existingThinking = state.thinking; + + const thinkingState = existingThinking || { + isThinking: true, + startTime: new Date().toISOString() + }; + + const messagesWithThinking = [...state.messages]; + + return { + ...state, + messages: messagesWithThinking, + isStreaming: true, + streamingMessageId: action.payload.messageId, + currentResponse: '', + isGenAiResponseLoading: true, + thinking: thinkingState + }; + } + + case ChatActionTypes.UPDATE_STREAMING_CHUNK: { + const streamMessages = [...state.messages] as ChatBubbleMessage[]; + const newContent = action.payload.content; + + if ( + streamMessages.length > 0 && + streamMessages[streamMessages.length - 1].authorId === AUTHORS.ASSISTANT + ) { + const lastMessage = streamMessages[streamMessages.length - 1]; + const currentContent = lastMessage.content === CHAT_LOADING_DEFAULT_MESSAGE + ? '' + : lastMessage.content; + + streamMessages[streamMessages.length - 1] = { + ...lastMessage, + type: 'chat-bubble', + authorId: AUTHORS.ASSISTANT, + content: currentContent + newContent, + timestamp: new Date().toLocaleTimeString(), + avatarLoading: true, + messageId: lastMessage.messageId || action.payload.messageId || state.streamingMessageId + }; + } else { + streamMessages.push({ + type: 'chat-bubble', + authorId: AUTHORS.ASSISTANT, + content: newContent, + timestamp: new Date().toLocaleTimeString(), + avatarLoading: true, + messageId: action.payload.messageId || state.streamingMessageId + }); + } + + const accumulatedResponse = state.currentResponse === CHAT_LOADING_DEFAULT_MESSAGE + ? newContent + : state.currentResponse + newContent; + + return { + ...state, + messages: streamMessages, + currentResponse: accumulatedResponse, + isStreaming: true + }; + } + + case ChatActionTypes.COMPLETE_STREAMING: { + const completedMessages = [...state.messages] as ChatBubbleMessage[]; + + if (completedMessages.length > 0 && completedMessages[completedMessages.length - 1].authorId === AUTHORS.ASSISTANT) { + const lastMessage = completedMessages[completedMessages.length - 1]; + + const existingThinking = (lastMessage as AgentBuilderChatBubbleMessage).thinking; + const thinkingStartTime = existingThinking?.startTime || state.thinking?.startTime; + + let thinkingMetadata: ThinkingMetadata | undefined; + + if (thinkingStartTime) { + const startTime = new Date(thinkingStartTime).getTime(); + const endTime = Date.now(); + const duration = Math.floor((endTime - startTime) / 1000); + + thinkingMetadata = { + duration, + startTime: thinkingStartTime, + endTime: new Date(endTime).toISOString(), + strippedContent: undefined + }; + } + + const messageContent = typeof lastMessage.content === 'string' ? lastMessage.content : ''; + const { content: cleanedContent, thinking: updatedThinking } = processMessageContent( + messageContent, + thinkingMetadata + ); + + const finalMessage = { + ...lastMessage, + content: cleanedContent, + avatarLoading: false, + sourceDocuments: [...state.sourceDocuments], + thinking: updatedThinking, + toolUsage: [...state.toolUsage] + } as AgentBuilderChatBubbleMessage; + + completedMessages[completedMessages.length - 1] = finalMessage; + } + + return { + ...state, + messages: completedMessages, + isStreaming: false, + streamingMessageId: undefined, + isGenAiResponseLoading: false, + currentResponse: '', + sourceDocuments: [], + thinking: undefined, + toolUsage: [] + }; + } + + case ChatActionTypes.ADD_TOOL_USAGE: { + return { + ...state, + toolUsage: [...state.toolUsage, action.payload] + }; + } + + case ChatActionTypes.UPDATE_TOOL_USAGE: { + const updatedToolUsage = [...state.toolUsage]; + updatedToolUsage[action.payload.index] = action.payload.toolUsage; + return { + ...state, + toolUsage: updatedToolUsage + }; + } + + case ChatActionTypes.CLEAR_TOOL_USAGE: { + return { + ...state, + toolUsage: [] + }; + } + default: return state; } diff --git a/source/ui-chat/src/services/__tests__/fileUploadService.test.ts b/source/ui-chat/src/services/__tests__/fileUploadService.test.ts new file mode 100644 index 00000000..4207f5a1 --- /dev/null +++ b/source/ui-chat/src/services/__tests__/fileUploadService.test.ts @@ -0,0 +1,280 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { describe, expect, vi, beforeEach, afterEach, test } from 'vitest'; +import { uploadFiles, deleteFiles, requestFileUpload } from '../fileUploadService'; + +const mockFetch = vi.fn(); +global.fetch = mockFetch; + +describe('fileUploadService', () => { + const mockUseCaseId = 'test-use-case-id'; + const mockConversationId = 'test-conversation-id'; + + beforeEach(() => { + vi.clearAllMocks(); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('requestFileUpload', () => { + test('successfully requests presigned URLs', async () => { + const mockResponse = { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + fileName: 'test.jpg', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z', + formFields: { key: 'value' }, + fileKey: 'test-file-key' + } + ], + messageId: 'test-message-id' + }; + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockResponse) + }); + + const params = { + files: [{ fileName: 'test.jpg', contentType: 'image/jpeg', fileSize: 1024 }], + conversationId: mockConversationId + }; + + const result = await requestFileUpload(params, mockUseCaseId); + + expect(result).toEqual(mockResponse); + + expect(result).toEqual(mockResponse); + }); + + test('handles request failure', async () => { + mockFetch.mockResolvedValueOnce({ + ok: false, + statusText: 'Bad Request' + }); + + const params = { + files: [{ fileName: 'test.jpg' }], + conversationId: mockConversationId + }; + + await expect(requestFileUpload(params, mockUseCaseId)).rejects.toThrow( + 'Upload request failed: Bad Request' + ); + }); + + test('handles network errors', async () => { + mockFetch.mockRejectedValueOnce(new Error('Network error')); + + const params = { + files: [{ fileName: 'test.jpg' }], + conversationId: mockConversationId + }; + + await expect(requestFileUpload(params, mockUseCaseId)).rejects.toThrow('Network error'); + }); + }); + + describe('uploadFiles', () => { + test('successfully uploads files', async () => { + const mockPresignedResponse = { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + fileName: 'test.jpg', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z', + formFields: { key: 'test-key' }, + fileKey: 'test-file-key' + } + ], + messageId: 'test-message-id' + }; + + // Mock presigned URL request + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockPresignedResponse) + }); + + // Mock S3 upload + mockFetch.mockResolvedValueOnce({ + ok: true, + status: 200 + }); + + const testFile = new File(['test content'], 'test.jpg', { type: 'image/jpeg' }); + const files = [testFile]; + + const result = await uploadFiles(files, mockConversationId, mockUseCaseId); + + expect(result.allSuccessful).toBe(true); + expect(result.successCount).toBe(1); + expect(result.failureCount).toBe(0); + expect(result.uploadedFiles).toHaveLength(1); + expect(result.uploadedFiles[0]).toMatchObject({ + key: 'test-file-key', + fileName: 'test.jpg', + fileContentType: 'image/jpeg' + }); + }); + + test('handles upload failures', async () => { + const mockPresignedResponse = { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + fileName: 'test.jpg', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z', + formFields: { key: 'test-key' }, + fileKey: 'test-file-key' + } + ], + messageId: 'test-message-id' + }; + + // Mock presigned URL request + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockPresignedResponse) + }); + + // Mock S3 upload failure + mockFetch.mockResolvedValueOnce({ + ok: false, + status: 403, + statusText: 'Forbidden' + }); + + const testFile = new File(['test content'], 'test.jpg', { type: 'image/jpeg' }); + const files = [testFile]; + + const result = await uploadFiles(files, mockConversationId, mockUseCaseId); + + expect(result.allSuccessful).toBe(false); + expect(result.successCount).toBe(0); + expect(result.failureCount).toBe(1); + expect(result.uploadedFiles).toHaveLength(0); + }); + + test('calls progress callbacks', async () => { + const onProgress = vi.fn(); + const onFileComplete = vi.fn(); + + const mockPresignedResponse = { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + fileName: 'test.jpg', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z', + formFields: { key: 'test-key' }, + fileKey: 'test-file-key' + } + ], + messageId: 'test-message-id' + }; + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockPresignedResponse) + }); + + mockFetch.mockResolvedValueOnce({ + ok: true, + status: 200 + }); + + const testFile = new File(['test content'], 'test.jpg', { type: 'image/jpeg' }); + const files = [testFile]; + + await uploadFiles(files, mockConversationId, mockUseCaseId, onProgress, onFileComplete); + + expect(onFileComplete).toHaveBeenCalledWith('test.jpg', true, undefined); + }); + }); + + describe('deleteFiles', () => { + test('successfully deletes files', async () => { + const mockApiResponse = { + deletions: [{ success: true, fileName: 'test.jpg' }], + allSuccessful: true, + failureCount: 0 + }; + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockApiResponse) + }); + + const result = await deleteFiles(['test.jpg'], mockConversationId, 'test-message-id', mockUseCaseId); + expect(result).toEqual(mockApiResponse); + }); + + test('handles delete failure', async () => { + mockFetch.mockResolvedValueOnce({ + ok: false, + statusText: 'Not Found' + }); + + await expect( + deleteFiles(['test.jpg'], mockConversationId, 'test-message-id', mockUseCaseId) + ).rejects.toThrow('Delete request failed: Not Found'); + }); + }); + + describe('retry logic', () => { + test('retries failed uploads with exponential backoff', async () => { + const mockPresignedResponse = { + uploads: [ + { + uploadUrl: 'https://s3.amazonaws.com/bucket/key', + fileName: 'test.jpg', + expiresIn: 3600, + createdAt: '2023-01-01T00:00:00Z', + formFields: { key: 'test-key' }, + fileKey: 'test-file-key' + } + ], + messageId: 'test-message-id' + }; + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockPresignedResponse) + }); + + mockFetch.mockResolvedValueOnce({ + ok: false, + status: 500, + statusText: 'Internal Server Error' + }); + + mockFetch.mockResolvedValueOnce({ + ok: true, + status: 200 + }); + + const testFile = new File(['test content'], 'test.jpg', { type: 'image/jpeg' }); + const files = [testFile]; + + const result = await uploadFiles( + files, + mockConversationId, + mockUseCaseId, + undefined, // onProgress + undefined, // onFileComplete + 2 // maxRetries + ); + + expect(result.allSuccessful).toBe(true); + expect(result.results[0].attempts).toBe(2); + }); + }); +}); diff --git a/source/ui-chat/src/services/fileUploadService.ts b/source/ui-chat/src/services/fileUploadService.ts new file mode 100644 index 00000000..421108d2 --- /dev/null +++ b/source/ui-chat/src/services/fileUploadService.ts @@ -0,0 +1,549 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { + FileUploadRequest, + FileUploadResponse, + UploadedFile, + FileDeleteRequest, + FileDeleteResponse +} from '../types/file-upload'; +import { isNonRetryableUploadError } from '../utils/file-upload'; +import { v4 as uuidv4 } from 'uuid'; +import { ApiEndpoints } from '../store/solutionApi'; +import { API } from '../utils/API.adapter'; + +interface FileUploadParams { + files: Array<{ + fileName: string; + contentType?: string; + fileSize?: number; + }>; + conversationId: string; +} + +interface FileUploadUrlResponse { + uploads: Array<{ + uploadUrl: string; + fileName: string; + expiresIn: number; + createdAt: string; + formFields: Record; + fileKey: string; + }>; + messageId: string; +} + +// Request presigned URLs for file uploads +export const requestFileUpload = async ( + params: FileUploadParams, + useCaseId: string, + providedMessageId?: string +): Promise => { + try { + const messageId = providedMessageId || uuidv4(); + + const uploadRequest: FileUploadRequest = { + fileNames: params.files.map((file) => file.fileName), + conversationId: params.conversationId, + messageId + }; + + const uploadResponse: FileUploadResponse = await API.post( + 'solution-api', + `${ApiEndpoints.FILES}/${useCaseId}`, + { + body: uploadRequest + } + ); + + // Transform to match our expected format + return { + uploads: uploadResponse.uploads.map((upload) => ({ + uploadUrl: upload.uploadUrl, + fileName: upload.fileName, + expiresIn: typeof upload.expiresIn === 'string' ? parseInt(upload.expiresIn, 10) : upload.expiresIn, + createdAt: upload.createdAt, + formFields: upload.formFields, + fileKey: upload.formFields.key + })), + messageId + }; + } catch (error) { + console.error('Error requesting file upload URLs:', error); + throw error; + } +}; + +//upload single file +const uploadSingleFileWithRetry = async ( + file: File, + upload: { uploadUrl: string; fileName: string; formFields: Record; fileKey: string }, + onProgress?: (progress: number) => void, + maxRetries: number = 3 +): Promise<{ + success: boolean; + fileName: string; + fileKey: string; + error: Error | null; + attempts: number; +}> => { + let lastError: Error | null = null; + + for (let attempt = 1; attempt <= maxRetries; attempt++) { + try { + const formData = new FormData(); + + Object.entries(upload.formFields).forEach(([key, value]) => { + formData.append(key, value); + }); + + // Add file last + formData.append('file', file); + + // XMLHttpRequest for progress tracking + const uploadResult = await new Promise((resolve, reject) => { + const xhr = new XMLHttpRequest(); + + if (onProgress) { + xhr.upload.addEventListener('progress', (event) => { + if (event.lengthComputable) { + const progress = (event.loaded / event.total) * 100; + onProgress(progress); + } + }); + } + + xhr.addEventListener('load', () => { + // Check for successful HTTP status codes (2xx range) + if (xhr.status >= 200 && xhr.status < 300) { + resolve(true); + } else { + // Handle error status codes (4xx client errors, 5xx server errors, etc.) + const error = new Error(`HTTP ${xhr.status}: ${xhr.statusText}`); + // Add status to error for retry logic + (error as any).status = xhr.status; + reject(error); + } + }); + + xhr.addEventListener('error', () => { + reject(new Error('Network error during upload')); + }); + + xhr.open('POST', upload.uploadUrl); + xhr.send(formData); + }); + + if (uploadResult) { + return { + success: true, + fileName: file.name, + fileKey: upload.fileKey, + error: null, + attempts: attempt + }; + } + } catch (error) { + const errorObj = error instanceof Error ? error : new Error('Unknown error'); + const status = (error as any)?.status; + lastError = new Error('Upload failed'); + + // Check if this is a non-retryable error + if (isNonRetryableUploadError(errorObj, status)) { + return { + success: false, + fileName: file.name, + fileKey: upload.fileKey, + error: new Error('Upload failed'), + attempts: attempt + }; + } + } + + // Wait before retry with exponential backoff (only for retryable errors) + if (attempt < maxRetries) { + const delay = Math.pow(2, attempt - 1) * 1000; // 1s, 2s, 4s + await new Promise((resolve) => setTimeout(resolve, delay)); + } + } + + return { + success: false, + fileName: file.name, + fileKey: upload.fileKey, + error: new Error('Upload failed'), + attempts: maxRetries + }; +}; + +// Upload multiple files with progress tracking and retry logic +export const uploadFiles = async ( + files: File[], + conversationId: string, + useCaseId: string, + onProgress?: (fileName: string, progress: number) => void, + onFileComplete?: (fileName: string, success: boolean, error?: Error) => void, + maxRetries: number = 3, + providedMessageId?: string +): Promise<{ + results: Array<{ + success: boolean; + fileName: string; + fileKey: string; + error: Error | null; + attempts: number; + }>; + allSuccessful: boolean; + successCount: number; + failureCount: number; + uploadedFiles: UploadedFile[]; + messageId: string; +}> => { + try { + // Request presigned URLs for all files + const requestParams: FileUploadParams = { + files: files.map((file) => ({ + fileName: file.name, + contentType: file.type, + fileSize: file.size + })), + conversationId + }; + + const urlResponse = await requestFileUpload(requestParams, useCaseId, providedMessageId); + const { uploads, messageId } = urlResponse; + + // Create upload map for easy lookup + const uploadMap = new Map(uploads.map((upload) => [upload.fileName, upload])); + + // Upload all files in parallel + const uploadPromises = files.map(async (file) => { + const upload = uploadMap.get(file.name); + + if (!upload) { + return { + success: false, + fileName: file.name, + fileKey: '', + error: new Error('No presigned URL found for this file'), + attempts: 0 + }; + } + + const result = await uploadSingleFileWithRetry( + file, + upload, + (progress) => onProgress?.(file.name, progress), + maxRetries + ); + + onFileComplete?.(file.name, result.success, result.error || undefined); + return result; + }); + + const uploadResults = await Promise.all(uploadPromises); + + const successfulUploads = uploadResults.filter((result) => result.success); + const failedUploads = uploadResults.filter((result) => !result.success); + + // Create UploadedFile objects for successful uploads + const uploadedFiles: UploadedFile[] = successfulUploads.map((result) => ({ + key: result.fileKey, + fileName: result.fileName, + fileContentType: files.find((f) => f.name === result.fileName)?.type || '', + fileExtension: result.fileName.split('.').pop() || '', + fileSize: files.find((f) => f.name === result.fileName)?.size || 0, + messageId, + conversationId + })); + + if (failedUploads.length > 0) { + console.warn( + `${failedUploads.length} files failed to upload:`, + failedUploads.map((result) => result.fileName) + ); + } + + return { + results: uploadResults, + allSuccessful: failedUploads.length === 0, + successCount: successfulUploads.length, + failureCount: failedUploads.length, + uploadedFiles, + messageId + }; + } catch (error) { + // If presigned URL request fails, mark all files as failed + const errorObj = new Error('Upload failed'); + const results = files.map((file) => ({ + success: false, + fileName: file.name, + fileKey: '', + error: errorObj, + attempts: 0 + })); + + // Call onFileComplete for each failed file + files.forEach((file) => { + onFileComplete?.(file.name, false, errorObj); + }); + + return { + results, + allSuccessful: false, + successCount: 0, + failureCount: files.length, + uploadedFiles: [], + messageId: '' + }; + } +}; + +// Upload directly to S3 using formFields +export const uploadFilesWithPresignedUrls = async ( + files: File[], + conversationId: string, + useCaseId: string, + authToken: string +): Promise => { + const requestParams: FileUploadParams = { + files: files.map((file) => ({ + fileName: file.name, + contentType: file.type, + fileSize: file.size + })), + conversationId + }; + + const presignedResponse = await requestFileUpload(requestParams, useCaseId, authToken); + + // Upload each file directly to S3 using formFields + const uploadResults = await Promise.all( + presignedResponse.uploads.map(async (upload, index) => { + const file = files[index]; + + const formData = new FormData(); + + Object.entries(upload.formFields).forEach(([key, value]) => { + formData.append(key, value); + }); + + // Add the actual file + formData.append('file', file); + + // Direct POST to S3 + const s3Response = await fetch(upload.uploadUrl, { + method: 'POST', + body: formData + }); + + if (!s3Response.ok) { + throw new Error(`S3 upload failed: ${s3Response.statusText}`); + } + + // Return uploaded file info + return { + key: upload.fileKey, + fileName: upload.fileName, + fileContentType: file.type, + fileExtension: file.name.split('.').pop() || '', + fileSize: file.size, + messageId: presignedResponse.messageId, + conversationId + } as UploadedFile; + }) + ); + + return uploadResults; +}; + +// Determines if a delete error should not be retried +const isNonRetryableDeleteError = (error: Error, status?: number): boolean => { + const nonRetryableStatuses = [ + 400, // Bad request - malformed request + 403, // Forbidden - permission issues + 404, // Not found - file doesn't exist (could be considered success) + 410 // Gone - file already deleted + ]; + + if (status && nonRetryableStatuses.includes(status)) { + return true; + } + + // Check error message for specific non-retryable conditions + const nonRetryableMessages = [ + 'AccessDenied', + 'InvalidAccessKeyId', + 'TokenRefreshRequired', + 'ExpiredToken', + 'MalformedRequest', + 'InvalidRequest' + ]; + + return nonRetryableMessages.some((message) => error.message.includes(message) || error.name.includes(message)); +}; + +// Delete a single file with retry logic +const deleteSingleFileWithRetry = async ( + fileName: string, + conversationId: string, + messageId: string, + useCaseId: string, + maxRetries: number = 3 +): Promise<{ + success: boolean; + fileName: string; + error: Error | null; + attempts: number; +}> => { + let lastError: Error | null = null; + + for (let attempt = 1; attempt <= maxRetries; attempt++) { + try { + const deleteRequest: FileDeleteRequest = { + fileNames: [fileName], + conversationId, + messageId + }; + + const deleteResponse: FileDeleteResponse = await API.del( + 'solution-api', + `${ApiEndpoints.FILES}/${useCaseId}`, + { + body: deleteRequest + } + ); + + // Check if the specific file was successfully deleted + const fileDeletion = deleteResponse.deletions.find((d) => d.fileName === fileName); + if (fileDeletion && fileDeletion.success) { + return { + success: true, + fileName, + error: null, + attempts: attempt + }; + } else { + const fileError = fileDeletion?.error || 'Delete operation failed'; + + // Treat "file not found" as success since the end result is the same + if ( + fileError.toLowerCase().includes('file not found') || + fileError.toLowerCase().includes('not found') + ) { + return { + success: true, + fileName, + error: null, + attempts: attempt + }; + } + + return { + success: false, + fileName, + error: new Error(fileError), + attempts: attempt + }; + } + } catch (error) { + const errorObj = error instanceof Error ? error : new Error('Unknown error'); + const status = (error as any)?.status; + lastError = new Error('Delete failed'); + + if (isNonRetryableDeleteError(errorObj, status)) { + return { + success: false, + fileName, + error: new Error('Delete failed'), + attempts: attempt + }; + } + + // For 404 errors, consider it a success (file already deleted) + if (status === 404) { + return { + success: true, + fileName, + error: null, + attempts: attempt + }; + } + } + + if (attempt < maxRetries) { + const delay = Math.pow(2, attempt - 1) * 1000; // 1s, 2s, 4s + await new Promise((resolve) => setTimeout(resolve, delay)); + } + } + + return { + success: false, + fileName, + error: new Error('Delete failed'), + attempts: maxRetries + }; +}; + +// Delete uploaded files with retry logic +export const deleteFiles = async ( + fileNames: string[], + conversationId: string, + messageId: string, + useCaseId: string, + onFileComplete?: (fileName: string, success: boolean, error?: Error) => void, + maxRetries: number = 3 +): Promise<{ + deletions: Array<{ + success: boolean; + fileName: string; + error?: Error; + }>; + allSuccessful: boolean; + failureCount: number; +}> => { + try { + // Delete files in parallel with retry logic + const deletePromises = fileNames.map(async (fileName) => { + const result = await deleteSingleFileWithRetry(fileName, conversationId, messageId, useCaseId, maxRetries); + + onFileComplete?.(fileName, result.success, result.error || undefined); + + return { + success: result.success, + fileName: result.fileName, + ...(result.error && { error: result.error }) + }; + }); + + const deleteResults = await Promise.all(deletePromises); + const failedDeletes = deleteResults.filter((result) => !result.success); + + if (failedDeletes.length > 0) { + console.warn( + `${failedDeletes.length} files failed to delete:`, + failedDeletes.map((result) => result.fileName) + ); + } + + return { + deletions: deleteResults, + allSuccessful: failedDeletes.length === 0, + failureCount: failedDeletes.length + }; + } catch (error) { + // If there's a general error, mark all files as failed + const deletions = fileNames.map((fileName) => ({ + success: false, + fileName, + error: new Error('Delete failed') + })); + + return { + deletions, + allSuccessful: false, + failureCount: fileNames.length + }; + } +}; diff --git a/source/ui-chat/src/store/configSlice.ts b/source/ui-chat/src/store/configSlice.ts index bbf31d68..1ea1e4c3 100644 --- a/source/ui-chat/src/store/configSlice.ts +++ b/source/ui-chat/src/store/configSlice.ts @@ -4,7 +4,28 @@ import { createSlice, PayloadAction } from '@reduxjs/toolkit'; import { RuntimeConfig, TextUseCaseConfig, UseCaseType, AgentUseCaseConfig } from '../models'; import { RootState } from './store'; -import { DEFAULT_CHAT_INPUT_MAX_LENGTH, MAX_PROMPT_TEMPLATE_LENGTH, USE_CASE_TYPES } from '../utils/constants'; +import { + DEFAULT_CHAT_INPUT_MAX_LENGTH, + AGENT_BUILDER_CHAT_INPUT_MAX_LENGTH, + MAX_PROMPT_TEMPLATE_LENGTH, + USE_CASE_TYPES, + MULTIMODAL_SUPPORTED_USE_CASE_TYPES +} from '../utils/constants'; + +interface MultimodalCapableConfig { + UseCaseType: string; + LlmParams?: { + MultimodalParams?: { + MultimodalEnabled?: boolean; + }; + }; +} + +const isMultimodalSupportedUseCase = ( + useCaseType: string +): useCaseType is (typeof MULTIMODAL_SUPPORTED_USE_CASE_TYPES)[number] => { + return (MULTIMODAL_SUPPORTED_USE_CASE_TYPES as readonly string[]).includes(useCaseType); +}; /** * Interface representing the configuration state @@ -113,6 +134,8 @@ export const getMaxInputTextLength = (state: RootState): number => { const useCaseConfig = state.config.runtimeConfig?.UseCaseConfig; if (useCaseConfig?.UseCaseType === USE_CASE_TYPES.AGENT) { return DEFAULT_CHAT_INPUT_MAX_LENGTH; + } else if (useCaseConfig?.UseCaseType === USE_CASE_TYPES.AGENT_BUILDER || useCaseConfig?.UseCaseType === USE_CASE_TYPES.WORKFLOW) { + return AGENT_BUILDER_CHAT_INPUT_MAX_LENGTH; } const textConfig = useCaseConfig as TextUseCaseConfig; return textConfig?.LlmParams?.PromptParams?.MaxInputTextLength ?? DEFAULT_CHAT_INPUT_MAX_LENGTH; @@ -164,6 +187,47 @@ export const getFeedbackEnabledState = (state: RootState): any => { return state.config.runtimeConfig?.UseCaseConfig?.FeedbackParams?.FeedbackEnabled ?? false; }; +/** + * Gets the multimodal enabled state from the runtime config + * @param state Root application state + * @returns Boolean indicating if multimodal is enabled for supported use case types + */ +export const getMultimodalEnabledState = (state: RootState): boolean => { + const useCaseConfig = state.config.runtimeConfig?.UseCaseConfig; + + if (!useCaseConfig) { + return false; + } + + const hasMultimodalStructure = ( + config: unknown + ): config is { + UseCaseType: string; + LlmParams?: { + MultimodalParams?: { + MultimodalEnabled?: boolean; + }; + }; + } => { + return ( + config !== null && + typeof config === 'object' && + 'UseCaseType' in config && + typeof (config as any).UseCaseType === 'string' + ); + }; + + if (!hasMultimodalStructure(useCaseConfig)) { + return false; + } + + if (!isMultimodalSupportedUseCase(useCaseConfig.UseCaseType)) { + return false; + } + + return useCaseConfig.LlmParams?.MultimodalParams?.MultimodalEnabled === true; +}; + /** * Gets the model provider name from the runtime config * @param state Root application state diff --git a/source/ui-chat/src/store/solutionApi.ts b/source/ui-chat/src/store/solutionApi.ts index 0b4c99c4..78d709dc 100644 --- a/source/ui-chat/src/store/solutionApi.ts +++ b/source/ui-chat/src/store/solutionApi.ts @@ -84,18 +84,47 @@ export const solutionApi = createApi({ data: response.data }; } + }), + /** + * Query endpoint to fetch files for a use case + */ + getFiles: builder.query({ + query: ({ useCaseId, conversationId, messageId }) => { + const params = new URLSearchParams(); + if (conversationId) params.append('conversationId', conversationId); + if (messageId) params.append('messageId', messageId); + + const queryString = params.toString(); + return `${ApiEndpoints.FILES}/${useCaseId}${queryString ? `?${queryString}` : ''}`; + }, + providesTags: (result, error, { useCaseId }) => [{ type: 'Files', id: useCaseId }] + }), + /** + * Query endpoint to get download URL for a specific file + */ + getFileDownloadUrl: builder.query<{ downloadUrl: string }, { useCaseId: string; conversationId: string; messageId: string; fileName: string }>({ + query: ({ useCaseId, conversationId, messageId, fileName }) => { + const params = new URLSearchParams(); + params.append('conversationId', conversationId); + params.append('messageId', messageId); + params.append('fileName', fileName); + params.append('action', 'download'); + + return `${ApiEndpoints.FILES}/${useCaseId}?${params.toString()}`; + } }) }), refetchOnMountOrArgChange: true, - tagTypes: ['Details'] + tagTypes: ['Details', 'Files'] }); -export const { useGetDeploymentQuery, useSubmitFeedbackMutation } = solutionApi; +export const { useGetDeploymentQuery, useSubmitFeedbackMutation, useGetFilesQuery, useLazyGetFileDownloadUrlQuery } = solutionApi; /** * Enum containing API endpoint paths */ export enum ApiEndpoints { DETAILS = '/details', - FEEDBACK = '/feedback' + FEEDBACK = '/feedback', + FILES = '/files' } diff --git a/source/ui-chat/src/types/file-upload.ts b/source/ui-chat/src/types/file-upload.ts new file mode 100644 index 00000000..dd5ec403 --- /dev/null +++ b/source/ui-chat/src/types/file-upload.ts @@ -0,0 +1,91 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export interface UploadedFile { + key: string; + fileName: string; + fileContentType: string; + fileExtension: string; + fileSize?: number; + messageId?: string; + conversationId?: string; +} + +export interface ApiFileReference { + fileReference: string; + fileName: string; +} + +export interface FileUploadRequest { + fileNames: string[]; + conversationId: string; + messageId: string; +} + +export interface FileUploadResponse { + uploads: Array<{ + uploadUrl: string; + formFields: Record; + fileName: string; + fileKey: string; + expiresIn: string; + createdAt: string; + }>; +} + +export interface FileDeleteRequest { + fileNames: string[]; + conversationId: string; + messageId: string; +} + +export interface FileDeleteResponse { + deletions: Array<{ + success: boolean; + fileName: string; + error?: string; + }>; + allSuccessful: boolean; + failureCount: number; +} + +export interface FileUploadState { + files: File[]; + uploadedFiles: UploadedFile[]; + isUploading: boolean; + isDeleting: boolean; + uploadProgress: Record; + uploadErrors: Record; + deleteErrors: Record; +} + +export interface FileValidationError { + fileName: string; + error: Error; +} + +export type FileUploadStatus = 'pending' | 'uploading' | 'uploaded' | 'error'; + +export interface FileUploadItem { + file: File; + status: FileUploadStatus; + progress: number; + error?: Error; + uploadedFile?: UploadedFile; +} + +export const FileUploadActionTypes = { + ADD_FILES: 'ADD_FILES', + UPDATE_FILE_STATUS: 'UPDATE_FILE_STATUS', + UPDATE_FILE_PROGRESS: 'UPDATE_FILE_PROGRESS', + SET_FILE_ERROR: 'SET_FILE_ERROR', + SET_DELETE_ERROR: 'SET_DELETE_ERROR', + CLEAR_DELETE_ERROR: 'CLEAR_DELETE_ERROR', + REMOVE_FILE: 'REMOVE_FILE', + CLEAR_FILES: 'CLEAR_FILES', + SET_UPLOADED_FILE: 'SET_UPLOADED_FILE', + SET_UPLOADING: 'SET_UPLOADING', + SET_DELETING: 'SET_DELETING' +} as const; + +export type FileUploadActionType = (typeof FileUploadActionTypes)[keyof typeof FileUploadActionTypes]; diff --git a/source/ui-chat/src/utils/API.adapter.ts b/source/ui-chat/src/utils/API.adapter.ts index 906b107a..ba91c7c5 100644 --- a/source/ui-chat/src/utils/API.adapter.ts +++ b/source/ui-chat/src/utils/API.adapter.ts @@ -55,7 +55,7 @@ export const API = { }), body: JSON.stringify(init.body) }); - + let responseBody; if (response.ok) { try { @@ -81,10 +81,15 @@ export const API = { }); let responseBody; - try { - responseBody = await response.json(); - } catch (e) { - responseBody = {}; + if (response.ok) { + try { + responseBody = await response.json(); + } catch (e) { + responseBody = {}; + } + } else { + const errorText = await response.text(); + throw new Error(errorText); } return responseBody; @@ -100,10 +105,15 @@ export const API = { }); let responseBody; - try { - responseBody = await response.json(); - } catch (e) { - responseBody = {}; + if (response.ok) { + try { + responseBody = await response.json(); + } catch (e) { + responseBody = {}; + } + } else { + const errorText = await response.text(); + throw new Error(errorText); } return responseBody; @@ -111,14 +121,23 @@ export const API = { async del(apiName: string, path: string, init: RestApiOptions = {}): Promise { const response = await fetch(baseUrl(apiName) + path + queryString(init.queryParams), { method: 'DELETE', - headers: await addAuthHeader(init.headers) + headers: await addAuthHeader({ + ...init.headers, + 'Content-Type': 'application/json' + }), + body: init.body ? JSON.stringify(init.body) : undefined }); let responseBody; - try { - responseBody = await response.json(); - } catch (e) { - responseBody = {}; + if (response.ok) { + try { + responseBody = await response.json(); + } catch (e) { + responseBody = {}; + } + } else { + const errorText = await response.text(); + throw new Error(errorText); } return responseBody; diff --git a/source/ui-chat/src/utils/constants.ts b/source/ui-chat/src/utils/constants.ts index 6e6b61b6..94b89dfd 100644 --- a/source/ui-chat/src/utils/constants.ts +++ b/source/ui-chat/src/utils/constants.ts @@ -29,16 +29,21 @@ export const DOCS_LINKS = { export const USE_CASE_TYPES = { AGENT: 'Agent', - TEXT: 'Text' + TEXT: 'Text', + AGENT_BUILDER: 'AgentBuilder', + WORKFLOW: 'Workflow' }; export const USE_CASE_TYPES_ROUTE = { AGENT: 'invokeAgent', - TEXT: 'sendMessage' + TEXT: 'sendMessage', + AGENT_BUILDER: 'invokeAgentCore', + WORKFLOW: 'invokeWorkflow' }; export const MAX_PROMPT_TEMPLATE_LENGTH = 10000; export const DEFAULT_CHAT_INPUT_MAX_LENGTH = 10000; +export const AGENT_BUILDER_CHAT_INPUT_MAX_LENGTH = 30000; //websocket limit is 32KB, leave room for additional request data export const END_CONVERSATION_TOKEN = '##END_CONVERSATION##'; export const SOLUTION_NAME = 'Generative AI Application Builder on AWS'; @@ -51,10 +56,29 @@ export const CONSTRAINT_TEXT_ERROR_COLOR = '#d91515'; export const FEEDBACK_HELPFUL = 'helpful' as const; export const FEEDBACK_NOT_HELPFUL = 'not-helpful' as const; -export const MAX_FEEDBACK_INPUT_LENGTH = 500 +export const MAX_FEEDBACK_INPUT_LENGTH = 500; + +export const MULTIMODAL_SUPPORTED_USE_CASE_TYPES = [USE_CASE_TYPES.AGENT_BUILDER, USE_CASE_TYPES.WORKFLOW] as const; + +// File upload constants (following Converse API constraints) +export const MULTIMODAL_MAX_IMAGES = 20; +export const MULTIMODAL_MAX_DOCUMENTS = 5; +export const MULTIMODAL_MAX_IMAGE_SIZE = 3.75 * 1024 * 1024; // 3.75 MB +export const MULTIMODAL_MAX_DOCUMENT_SIZE = 4.5 * 1024 * 1024; // 4.5 MB +export const MULTIMODAL_MAX_IMAGE_DIMENSIONS = 8000; // 8000px width/height +export const MULTIMODAL_MAX_FILENAME_LENGTH = 255; // Maximum filename length (common filesystem limit) +export const MULTIMODAL_MAX_DISPLAY_FILENAME_LENGTH = 50; // Maximum filename length for display purposes +export const MULTIMODAL_SUPPORTED_IMAGE_FORMATS = ['gif', 'jpeg', 'jpg', 'png', 'webp']; +export const MULTIMODAL_SUPPORTED_DOCUMENT_FORMATS = ['pdf', 'csv', 'doc', 'docx', 'xls', 'xlsx', 'html', 'txt', 'md']; +export const MULTIMODAL_SUPPORTED_FILE_FORMATS = [ + ...MULTIMODAL_SUPPORTED_IMAGE_FORMATS, + ...MULTIMODAL_SUPPORTED_DOCUMENT_FORMATS +]; + +export const MULTIMODAL_FILENAME_PATTERN = `^[a-zA-Z0-9](?:[a-zA-Z0-9_-]|[\x20](?=[a-zA-Z0-9_-]))*\.(${MULTIMODAL_SUPPORTED_FILE_FORMATS.join('|')})$`; //model provider export const MODEL_PROVIDER = { BEDROCK: 'Bedrock', SAGEMAKER: 'SageMaker' -} as const; \ No newline at end of file +} as const; diff --git a/source/ui-chat/src/utils/construct-api-payload.ts b/source/ui-chat/src/utils/construct-api-payload.ts index 4c622b7d..a653b5b1 100644 --- a/source/ui-chat/src/utils/construct-api-payload.ts +++ b/source/ui-chat/src/utils/construct-api-payload.ts @@ -2,23 +2,76 @@ // SPDX-License-Identifier: Apache-2.0 import { USE_CASE_TYPES, USE_CASE_TYPES_ROUTE } from './constants'; -import { AgentMessage, AgentUseCaseConfig, ChatMessage, TextMessage, TextUseCaseConfig } from '../models'; +import { + AgentMessage, + AgentUseCaseConfig, + AgentBuilderMessage, + AgentBuilderUseCaseConfig, + ChatMessage, + TextMessage, + TextUseCaseConfig, + WorkflowMessage, + WorkflowUseCaseConfig +} from '../models'; +import { UploadedFile, ApiFileReference } from '../types/file-upload'; // Props type for constructPayload function type ConstructPayloadProps = { - useCaseConfig: TextUseCaseConfig | AgentUseCaseConfig; + useCaseConfig: TextUseCaseConfig | AgentUseCaseConfig | AgentBuilderUseCaseConfig | WorkflowUseCaseConfig; message: string; - conversationId: string; + conversationId?: string; + messageId?: string; // Optional - for tracking individual messages promptTemplate?: string; authToken?: string; + files?: UploadedFile[]; + useCaseId?: string; +}; + +// Helper function to extract UUID from filename in file key +const extractUuidFromFileKey = (fileKey: string) => { + try { + // File key format: usecase-uuid/user-uuid/conv-uuid/msg-uuid/fileReferenceUuid.extension + if (!fileKey || typeof fileKey !== 'string') { + return 'unknown'; + } + + // Extract UUID from the last segment (filename) + const uuidMatch = fileKey.match( + /\/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})(?:\.[^.]+)?$/i + ); + + if (uuidMatch && uuidMatch[1]) { + return uuidMatch[1]; + } + } catch (error) { + console.error('Error extracting UUID from file key:', error); + return 'unknown'; + } +}; + +// Helper function to transform uploaded files to API file references +const transformFilesToApiFormat = (files: UploadedFile[], useCaseId?: string): ApiFileReference[] => { + return files.map((file) => { + const uuid = extractUuidFromFileKey(file.key); + const extension = file.fileName.split('.').pop() || ''; + const fileReference = `${uuid}.${extension}`; + + return { + fileReference, + fileName: file.fileName + }; + }); }; export function constructPayload({ useCaseConfig, message, conversationId, + messageId, promptTemplate, - authToken + authToken, + files, + useCaseId }: ConstructPayloadProps): ChatMessage { switch (useCaseConfig.UseCaseType) { case USE_CASE_TYPES.AGENT: @@ -50,6 +103,50 @@ export function constructPayload({ ...additionalProps } as TextMessage; } + case USE_CASE_TYPES.AGENT_BUILDER: { + // Validate that useCaseId is provided when files are present + if (files && files.length > 0 && !useCaseId) { + throw new Error('useCaseId is required when files are present for AGENT_BUILDER use case'); + } + + const basePayload = { + action: USE_CASE_TYPES_ROUTE.AGENT_BUILDER, + inputText: message, + conversationId: conversationId + }; + + const additionalProps = { + ...(messageId && { messageId }), + ...(files && files.length > 0 && { files: transformFilesToApiFormat(files, useCaseId) }) + }; + + return { + ...basePayload, + ...additionalProps + } as AgentBuilderMessage; + } + case USE_CASE_TYPES.WORKFLOW: { + // Validate that useCaseId is provided when files are present + if (files && files.length > 0 && !useCaseId) { + throw new Error('useCaseId is required when files are present for WORKFLOW use case'); + } + + const basePayload = { + action: USE_CASE_TYPES_ROUTE.WORKFLOW, + inputText: message, + conversationId + }; + + const additionalProps = { + ...(messageId && { messageId }), + ...(files && files.length > 0 && { files: transformFilesToApiFormat(files, useCaseId) }) + }; + + return { + ...basePayload, + ...additionalProps + } as WorkflowMessage; + } default: throw new Error('Invalid use case type.'); } diff --git a/source/ui-chat/src/utils/extract-thinking-content.ts b/source/ui-chat/src/utils/extract-thinking-content.ts new file mode 100644 index 00000000..b40b3749 --- /dev/null +++ b/source/ui-chat/src/utils/extract-thinking-content.ts @@ -0,0 +1,112 @@ +import { ThinkingMetadata } from '../pages/chat/types'; + +/** + * Result of extracting thinking content from a message + */ +export interface ThinkingExtractionResult { + cleanedContent: string; + thinkingContent: string; +} + +/** + * Result of processing message content with thinking metadata + */ +export interface ProcessedMessageContent { + /** Cleaned message content with thinking tags removed */ + content: string; + /** Updated thinking metadata with stripped content attached */ + thinking?: ThinkingMetadata; +} + +/** + * Extracts content from tags and returns cleaned message content. + * Handles multiple thinking tags, malformed tags, and nested content gracefully. + * No sanitization is performed as content comes from our backend. + * + * @param content - The raw message content that may contain thinking tags + * @returns Object with cleanedContent (thinking tags removed) and thinkingContent (extracted thinking) + * + * @example + * const result = extractThinkingContent("Hello analyzing... world"); + * // result.cleanedContent === "Hello world" + * // result.thinkingContent === "analyzing..." + */ +export function extractThinkingContent(content: string): ThinkingExtractionResult { + // Handle null, undefined, or non-string input + if (!content || typeof content !== 'string') { + return { + cleanedContent: '', + thinkingContent: '' + }; + } + + // Regex to match tags with content (case-insensitive, multiline, non-greedy) + const thinkingRegex = /([\s\S]*?)<\/thinking>/gi; + + let thinkingContent = ''; + + // Extract all thinking content from all tags + const matches = content.matchAll(thinkingRegex); + for (const match of matches) { + // match[1] contains the captured group (content between tags) + if (match[1]) { + thinkingContent += match[1] + '\n'; + } + } + + // Remove all thinking tags from the content + const cleanedContent = content.replace(thinkingRegex, ''); + + return { + cleanedContent: cleanedContent.trim(), + thinkingContent: thinkingContent.trim() + }; +} + +/** + * Processes message content by extracting thinking tags and attaching the content to thinking metadata. + * Combines thinking extraction with metadata attachment in a single operation. + * No sanitization is performed as content comes from our backend. + * + * @param content - The raw message content that may contain thinking tags + * @param thinkingMetadata - Optional thinking metadata to attach stripped content to + * @returns Object with cleaned content and updated thinking metadata (if provided) + * + * @example + * const metadata = { duration: 3, type: 'analyzing', startTime: '...', endTime: '...' }; + * const result = processMessageContent("Hello analyzing... world", metadata); + * // result.content === "Hello world" + * // result.thinking.strippedContent === "analyzing..." + */ +export function processMessageContent( + content: string, + thinkingMetadata?: ThinkingMetadata +): ProcessedMessageContent { + // Extract thinking content from the message + const { cleanedContent, thinkingContent } = extractThinkingContent(content); + + // If we have thinking metadata and extracted content, attach the content to metadata + if (thinkingMetadata && thinkingContent) { + return { + content: cleanedContent, + thinking: { + ...thinkingMetadata, + strippedContent: thinkingContent + } + }; + } + + // If we have thinking metadata but no extracted content, return metadata as-is + if (thinkingMetadata) { + return { + content: cleanedContent, + thinking: thinkingMetadata + }; + } + + // No thinking metadata provided, just return cleaned content + return { + content: cleanedContent, + thinking: undefined + }; +} diff --git a/source/ui-chat/src/utils/file-upload.ts b/source/ui-chat/src/utils/file-upload.ts new file mode 100644 index 00000000..0e503c28 --- /dev/null +++ b/source/ui-chat/src/utils/file-upload.ts @@ -0,0 +1,334 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { + MULTIMODAL_MAX_IMAGE_SIZE, + MULTIMODAL_MAX_DOCUMENT_SIZE, + MULTIMODAL_SUPPORTED_IMAGE_FORMATS, + MULTIMODAL_SUPPORTED_DOCUMENT_FORMATS, + MULTIMODAL_SUPPORTED_FILE_FORMATS, + MULTIMODAL_MAX_IMAGES, + MULTIMODAL_MAX_DOCUMENTS, + MULTIMODAL_MAX_FILENAME_LENGTH, + MULTIMODAL_MAX_DISPLAY_FILENAME_LENGTH, + MULTIMODAL_FILENAME_PATTERN +} from './constants'; +import { FileValidationError } from '../types/file-upload'; + +export class FileSizeExceededError extends Error { + constructor( + message: string, + public fileSize: number, + public limit: number + ) { + super(message); + this.name = 'FileSizeExceededError'; + } +} + +export class InvalidFileNameError extends Error { + constructor( + message: string, + public fileName: string + ) { + super(message); + this.name = 'InvalidFileNameError'; + } +} + +export class UnsupportedFileTypeError extends Error { + constructor( + message: string, + public fileExtension: string + ) { + super(message); + this.name = 'UnsupportedFileTypeError'; + } +} + +export const isFileSizeError = (error: Error): error is FileSizeExceededError => { + return error instanceof FileSizeExceededError; +}; + +export const isUnsupportedFileTypeError = (error: Error): error is UnsupportedFileTypeError => { + return error instanceof UnsupportedFileTypeError; +}; + +export const isFileNameError = (error: Error): error is InvalidFileNameError => { + return error instanceof InvalidFileNameError; +}; + +// Determines if an upload error should not be retried +export const isNonRetryableUploadError = (error: Error, status?: number): boolean => { + const nonRetryableStatuses = [ + 400, // Content-Type mismatches, malformed requests, missing headers + 403, // SignatureDoesNotMatch, expired URLs, CORS issues, permission problems + 404, // Wrong bucket/endpoint configuration + 413 // Payload Too Large + ]; + + if (status && nonRetryableStatuses.includes(status)) { + return true; + } + + // Check error message for specific non-retryable conditions + const nonRetryableMessages = [ + 'SignatureDoesNotMatch', + 'RequestTimeTooSkewed', + 'AccessDenied', + 'InvalidAccessKeyId', + 'TokenRefreshRequired', + 'ExpiredToken', + 'MalformedPolicy', + 'InvalidPolicyDocument', + 'CredentialsNotSupported', + 'RequestExpired' + ]; + + return nonRetryableMessages.some((message) => error.message.includes(message) || error.name.includes(message)); +}; + +const FILE_NAME_PATTERN = new RegExp(MULTIMODAL_FILENAME_PATTERN); + +export const isValidFileName = (fileName: string): boolean => { + return !!( + fileName && + typeof fileName === 'string' && + fileName.length <= MULTIMODAL_MAX_FILENAME_LENGTH && + FILE_NAME_PATTERN.test(fileName) + ); +}; + +export const formatFileNameForDisplay = (fileName: string): string => { + if (!fileName || typeof fileName !== 'string') { + return 'unknown'; + } + + // For display purposes, truncate if too long + if (fileName.length > MULTIMODAL_MAX_DISPLAY_FILENAME_LENGTH) { + const extension = fileName.substring(fileName.lastIndexOf('.')); + const nameWithoutExt = fileName.substring(0, fileName.lastIndexOf('.')); + const truncatedName = nameWithoutExt.substring( + 0, + MULTIMODAL_MAX_DISPLAY_FILENAME_LENGTH - extension.length - 3 + ); + return `${truncatedName}...${extension}`; + } + + return fileName; +}; + +export const validateFile = (file: File): FileValidationError | null => { + if (!file || typeof file !== 'object' || !file.name || typeof file.name !== 'string') { + return { + fileName: 'unknown', + error: new InvalidFileNameError('Invalid file', 'unknown') + }; + } + + const displayName = formatFileNameForDisplay(file.name); + + const fileExtension = file.name.split('.').pop() || ''; + if (!MULTIMODAL_SUPPORTED_FILE_FORMATS.includes(fileExtension)) { + return { + fileName: displayName, + error: new UnsupportedFileTypeError(`Unsupported file type`, fileExtension) + }; + } + + if (!isValidFileName(file.name)) { + return { + fileName: displayName, + error: new InvalidFileNameError('Invalid file name', file.name) + }; + } + + // Check if file is empty + if (file.size === 0) { + return { + fileName: displayName, + error: new Error('File is empty') + }; + } + + const isImage = MULTIMODAL_SUPPORTED_IMAGE_FORMATS.includes(fileExtension); + const isDocument = MULTIMODAL_SUPPORTED_DOCUMENT_FORMATS.includes(fileExtension); + + if (isImage && file.size > MULTIMODAL_MAX_IMAGE_SIZE) { + return { + fileName: displayName, + error: new FileSizeExceededError('File size exceeds maximum limit', file.size, MULTIMODAL_MAX_IMAGE_SIZE) + }; + } + + if (isDocument && file.size > MULTIMODAL_MAX_DOCUMENT_SIZE) { + return { + fileName: displayName, + error: new FileSizeExceededError('File size exceeds maximum limit', file.size, MULTIMODAL_MAX_DOCUMENT_SIZE) + }; + } + + return null; +}; + +export const validateFiles = (files: File[]): FileValidationError[] => { + const errors: FileValidationError[] = []; + + if (!files || files.length === 0) { + return errors; + } + + // Check for duplicate file names + const fileNames = new Set(); + const duplicates = new Set(); + + files.forEach((file) => { + if (file && file.name) { + if (fileNames.has(file.name)) { + duplicates.add(file.name); + } else { + fileNames.add(file.name); + } + } + }); + + if (duplicates.size > 0) { + errors.push({ + fileName: 'Multiple files', + error: new Error('Duplicate file name') + }); + } + + // To identify files with validation errors + const individualErrors = new Set(); + files.forEach((file) => { + if (!file || !file.name) return; + + const fileError = validateFile(file); + if (fileError) { + individualErrors.add(file.name); + } + }); + + // Validate individual files + files.forEach((file) => { + if (!file || !file.name) return; + + if (individualErrors.has(file.name)) { + const fileError = validateFile(file); + if (fileError) { + errors.push(fileError); + } + } + }); + + return errors; +}; + +// Uploads a file to S3 using presigned URL +export const uploadFileToS3 = async ( + file: File, + uploadUrl: string, + formFields: Record, + onProgress?: (progress: number) => void +): Promise => { + return new Promise((resolve, reject) => { + const formData = new FormData(); + + // Add form fields first + Object.entries(formFields).forEach(([key, value]) => { + formData.append(key, value); + }); + + // Add file last + formData.append('file', file); + + const xhr = new XMLHttpRequest(); + + if (onProgress) { + xhr.upload.addEventListener('progress', (event) => { + if (event.lengthComputable) { + const progress = (event.loaded / event.total) * 100; + onProgress(progress); + } + }); + } + + xhr.addEventListener('load', () => { + if (xhr.status >= 200 && xhr.status < 300) { + resolve(); + } else { + const error = new Error(`Upload failed: HTTP ${xhr.status} ${xhr.statusText}`); + (error as any).status = xhr.status; + reject(error); + } + }); + + xhr.addEventListener('error', () => { + reject(new Error('Upload failed: Network error')); + }); + + xhr.open('POST', uploadUrl); + xhr.send(formData); + }); +}; + +export const formatFileSize = (bytes: number): string => { + if (bytes === 0) return '0 Bytes'; + + const k = 1024; + const sizes = ['Bytes', 'KB', 'MB', 'GB']; + const i = Math.floor(Math.log(bytes) / Math.log(k)); + + return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i]; +}; + +export const getFileTypeCategory = (fileName: string): 'image' | 'document' | 'unknown' => { + const extension = fileName.split('.').pop() || ''; + + if (MULTIMODAL_SUPPORTED_IMAGE_FORMATS.includes(extension)) { + return 'image'; + } else if (MULTIMODAL_SUPPORTED_DOCUMENT_FORMATS.includes(extension)) { + return 'document'; + } + + return 'unknown'; +}; +// Helper functions to check file count limits +export const getFileCounts = (files: File[]): { imageCount: number; documentCount: number } => { + let imageCount = 0; + let documentCount = 0; + + files.forEach((file) => { + if (!file || !file.name) return; + + const fileType = getFileTypeCategory(file.name); + if (fileType === 'image') { + imageCount++; + } else if (fileType === 'document') { + documentCount++; + } + }); + + return { imageCount, documentCount }; +}; + +export const isFileCountExceeded = (files: File[]): { exceeded: boolean; message?: string } => { + const { imageCount, documentCount } = getFileCounts(files); + + if (imageCount > MULTIMODAL_MAX_IMAGES) { + return { + exceeded: true, + message: `${imageCount} images attached. Only ${MULTIMODAL_MAX_IMAGES} images allowed. ` + }; + } + + if (documentCount > MULTIMODAL_MAX_DOCUMENTS) { + return { + exceeded: true, + message: `${documentCount} documents attached. Only ${MULTIMODAL_MAX_DOCUMENTS} documents allowed. ` + }; + } + + return { exceeded: false }; +}; diff --git a/source/ui-chat/src/utils/validation.ts b/source/ui-chat/src/utils/validation.ts index 6a0273a9..e37410d4 100644 --- a/source/ui-chat/src/utils/validation.ts +++ b/source/ui-chat/src/utils/validation.ts @@ -109,7 +109,22 @@ export interface TraceDetails { export const parseTraceId = (errorMessage: string): TraceDetails => { try { - // Extract the trace ID portion from the error message + // Check for new AgentCore lambda format first: "...quote the following trace id: {trace_id}" + const newFormatMatch = errorMessage.match(/quote the following trace id:\s*([^\s]+)/i); + if (newFormatMatch) { + const traceId = newFormatMatch[1]; + const message = errorMessage.replace(/\s*Please contact your administrator.*$/i, '').trim(); + + return { + rootId: traceId, + parentId: '', + sampled: false, + lineage: '', + message: message + }; + } + + // Fall back to old format: "Error occurred Root=1-2345;Parent=abcd;Sampled=1;Lineage=test" const traceStart = errorMessage.indexOf('Root='); if (traceStart === -1) { throw new Error('No trace ID found in message'); diff --git a/source/ui-deployment/package-lock.json b/source/ui-deployment/package-lock.json index 8863e338..7fda1b49 100644 --- a/source/ui-deployment/package-lock.json +++ b/source/ui-deployment/package-lock.json @@ -1,15 +1,16 @@ { "name": "@amzn/gen-ai-app-builder-on-aws-ui-deployment", - "version": "3.0.7", + "version": "4.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@amzn/gen-ai-app-builder-on-aws-ui-deployment", - "version": "3.0.7", + "version": "4.0.0", "license": "Apache-2.0", "dependencies": { "@aws-amplify/ui-react": "^5.3.3", + "@aws-sdk/util-arn-parser": "^3.893.0", "@cloudscape-design/code-view": "3.0.16", "@cloudscape-design/components": "^3.0.694", "@cloudscape-design/global-styles": "^1.0.31", @@ -39,6 +40,7 @@ "identity-obj-proxy": "^3.0.0", "jsdom": "^24.1.1", "patch-package": "^8.0.1", + "prettier": "^3.6.2", "react-test-renderer": "^18.3.1", "vite": "^5.3.5", "vite-tsconfig-paths": "^4.3.2", @@ -5629,6 +5631,24 @@ "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", "peer": true }, + "node_modules/@aws-sdk/util-arn-parser": { + "version": "3.893.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-arn-parser/-/util-arn-parser-3.893.0.tgz", + "integrity": "sha512-u8H4f2Zsi19DGnwj5FSZzDMhytYF/bCh37vAtBsn3cNDL3YG578X5oc+wSX54pM3tOxS+NY7tvOAo52SW7koUA==", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@aws-sdk/util-arn-parser/node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, "node_modules/@aws-sdk/util-base64-browser": { "version": "3.6.1", "resolved": "https://registry.npmjs.org/@aws-sdk/util-base64-browser/-/util-base64-browser-3.6.1.tgz", @@ -13683,9 +13703,9 @@ "peer": true }, "node_modules/cosmiconfig/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "license": "MIT", "peer": true, "dependencies": { @@ -16430,9 +16450,10 @@ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" }, "node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", + "license": "MIT", "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -18325,6 +18346,21 @@ "node": "^10 || ^12 || >=14" } }, + "node_modules/prettier": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", + "dev": true, + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, "node_modules/pretty-format": { "version": "27.5.1", "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", @@ -20132,9 +20168,10 @@ } }, "node_modules/style-dictionary/node_modules/glob": { - "version": "10.4.5", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", - "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", + "license": "ISC", "dependencies": { "foreground-child": "^3.1.0", "jackspeak": "^3.1.2", diff --git a/source/ui-deployment/package.json b/source/ui-deployment/package.json index f140f28d..fbc26b24 100644 --- a/source/ui-deployment/package.json +++ b/source/ui-deployment/package.json @@ -1,8 +1,9 @@ { "name": "@amzn/gen-ai-app-builder-on-aws-ui-deployment", - "version": "3.0.7", + "version": "4.0.0", "dependencies": { "@aws-amplify/ui-react": "^5.3.3", + "@aws-sdk/util-arn-parser": "^3.893.0", "@cloudscape-design/code-view": "3.0.16", "@cloudscape-design/components": "^3.0.694", "@cloudscape-design/global-styles": "^1.0.31", @@ -31,6 +32,7 @@ "file-loader": "^6.2.0", "identity-obj-proxy": "^3.0.0", "jsdom": "^24.1.1", + "prettier": "^3.6.2", "patch-package": "^8.0.1", "react-test-renderer": "^18.3.1", "vite": "^5.3.5", @@ -49,7 +51,7 @@ "test:debug": "vitest run", "test:no-cov": "vitest run --silent", "clean": "rm -rf node_modules", - "code-formatter": "./node_modules/prettier/bin-prettier.js --config ../../.prettierrc.yml '**/*.{js,json,css,md}' !package*.json --write", + "code-formatter": "prettier --config ../../.prettierrc.yml --ignore-path ../../.prettierignore --write '**/*.{js,jsx,ts,tsx,json,css,md}'", "code-linter": "./node_modules/eslint/bin/eslint.js . -c ../.eslintrc.js --ext .js" }, "eslintConfig": { diff --git a/source/ui-deployment/src/App.jsx b/source/ui-deployment/src/App.jsx index 6dbf0a62..57049d20 100644 --- a/source/ui-deployment/src/App.jsx +++ b/source/ui-deployment/src/App.jsx @@ -17,6 +17,9 @@ import { useEffect, useContext } from 'react'; import { UserContext } from './UserContext'; import { TextUseCaseType } from './components/wizard/interfaces/UseCaseTypes/Text'; import { AgentUseCaseType } from './components/wizard/interfaces/UseCaseTypes/Agent'; +import { MCPServerUseCaseType } from './components/wizard/interfaces/UseCaseTypes/MCPHost'; +import { AgentBuilderUseCaseType } from './components/wizard/interfaces/UseCaseTypes/AgentBuilder'; +import { WorkflowUseCaseType } from './components/wizard/interfaces/UseCaseTypes/Workflow'; import UseCaseSelection from './components/wizard/UseCaseSelection'; import UseCaseView from './components/useCaseDetails/UseCaseView'; @@ -101,20 +104,15 @@ function App({ runtimeConfig }) { }} />
- - You do not have permission to access the Deployment Dashboard. - This interface is restricted to admin users only. - Please contact your administrator if you believe this is an error. + + You do not have permission to access the Deployment Dashboard. This interface is restricted + to admin users only. Please contact your administrator if you believe this is an error.
); } - + return ( <> } /> - } /> + } + /> + } + /> + } + /> + } + /> } /> } /> diff --git a/source/ui-deployment/src/components/__tests__/__mocks__/deployment-steps-form-data.js b/source/ui-deployment/src/components/__tests__/__mocks__/deployment-steps-form-data.js index 5cded7bf..83a1d517 100644 --- a/source/ui-deployment/src/components/__tests__/__mocks__/deployment-steps-form-data.js +++ b/source/ui-deployment/src/components/__tests__/__mocks__/deployment-steps-form-data.js @@ -10,6 +10,7 @@ export const sampleDeployUseCaseFormData = { 'useCaseDescription': 'test use case description', 'deployUI': true, 'feedbackEnabled': false, + 'provisionedConcurrencyValue': 0, 'inError': true }, 'knowledgeBase': { @@ -38,7 +39,7 @@ export const sampleDeployUseCaseFormData = { 'value': 'Bedrock' }, 'modelName': 'fake-model', - 'bedrockInferenceType': BEDROCK_INFERENCE_TYPES.QUICK_START_MODELS, + 'bedrockInferenceType': BEDROCK_INFERENCE_TYPES.OTHER_FOUNDATION_MODELS, 'modelParameters': [ { 'key': 'fake-param', diff --git a/source/ui-deployment/src/components/__tests__/__mocks__/mock-text-deployment.js b/source/ui-deployment/src/components/__tests__/__mocks__/mock-text-deployment.js index 13099189..d1975d99 100644 --- a/source/ui-deployment/src/components/__tests__/__mocks__/mock-text-deployment.js +++ b/source/ui-deployment/src/components/__tests__/__mocks__/mock-text-deployment.js @@ -29,6 +29,7 @@ export const mockSelectedDeployment = { 'Temperature': 0.7, 'Verbose': true, 'BedrockLlmParams': { + 'BedrockInferenceType': 'OTHER_FOUNDATION', 'GuardrailIdentifier': 'mock-guardrail', 'GuardrailVersion': '1', 'ModelId': 'anthropic.claude-v2' diff --git a/source/ui-deployment/src/components/__tests__/dashboard/DashboardNavigation.test.tsx b/source/ui-deployment/src/components/__tests__/dashboard/DashboardNavigation.test.tsx new file mode 100644 index 00000000..7ce7e6fe --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/dashboard/DashboardNavigation.test.tsx @@ -0,0 +1,118 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import '@testing-library/jest-dom'; +import { render, screen, fireEvent } from '@testing-library/react'; +import { MemoryRouter } from 'react-router-dom'; +import DashboardView from '../../dashboard/DashboardView'; +import { HomeContext } from '../../../contexts/home.context'; +import { USECASE_TYPES } from '../../../utils/constants'; + +// Mock the navigate function +const mockNavigate = jest.fn(); +jest.mock('react-router-dom', () => ({ + ...jest.requireActual('react-router-dom'), + useNavigate: () => mockNavigate +})); + +// Mock API calls +jest.mock('aws-amplify', () => ({ + API: { + get: jest.fn().mockResolvedValue({ + deployments: [], + numUseCases: 0 + }) + }, + Auth: { + currentAuthenticatedUser: jest.fn().mockResolvedValue({ + getSignInUserSession: () => ({ + getAccessToken: () => ({ + getJwtToken: () => 'fake-token' + }) + }) + }) + } +})); + +describe('Dashboard Navigation', () => { + const mockContextValue = { + dispatch: jest.fn(), + state: { + authorized: true, + deploymentsData: [], + selectedDeployment: {}, + deploymentAction: 'CREATE', + numUseCases: 0, + currentPageIndex: 1, + searchFilter: '', + submittedSearchFilter: '', + reloadData: false, + runtimeConfig: {} + } + }; + + beforeEach(() => { + mockNavigate.mockClear(); + }); + + test('navigates to correct route for Text use case', () => { + const textDeployment = { + UseCaseId: 'test-id-123', + UseCaseType: USECASE_TYPES.TEXT, + Name: 'Test Text Use Case' + }; + + // Mock the handleOnDeploymentIdClick function behavior + const expectedRoute = `/deployment-details/${USECASE_TYPES.TEXT}/${textDeployment.UseCaseId}`; + + // Simulate the navigation logic from DashboardView + const useCaseType = textDeployment.UseCaseType ?? USECASE_TYPES.TEXT; + const navigationPath = `/deployment-details/${useCaseType}/${textDeployment.UseCaseId}`; + + expect(navigationPath).toBe(expectedRoute); + }); + + test('navigates to correct route for Agent use case', () => { + const agentDeployment = { + UseCaseId: 'agent-id-456', + UseCaseType: USECASE_TYPES.AGENT, + Name: 'Test Agent Use Case' + }; + + const expectedRoute = `/deployment-details/${USECASE_TYPES.AGENT}/${agentDeployment.UseCaseId}`; + + const useCaseType = agentDeployment.UseCaseType ?? USECASE_TYPES.TEXT; + const navigationPath = `/deployment-details/${useCaseType}/${agentDeployment.UseCaseId}`; + + expect(navigationPath).toBe(expectedRoute); + }); + + test('navigates to correct route for MCP Server use case', () => { + const mcpDeployment = { + UseCaseId: 'mcp-id-789', + UseCaseType: USECASE_TYPES.MCP_SERVER, + Name: 'Test MCP Server Use Case' + }; + + const expectedRoute = `/deployment-details/${USECASE_TYPES.MCP_SERVER}/${mcpDeployment.UseCaseId}`; + + const useCaseType = mcpDeployment.UseCaseType ?? USECASE_TYPES.TEXT; + const navigationPath = `/deployment-details/${useCaseType}/${mcpDeployment.UseCaseId}`; + + expect(navigationPath).toBe(expectedRoute); + }); + + test('defaults to Text use case type when UseCaseType is missing', () => { + const deploymentWithoutType = { + UseCaseId: 'no-type-id-999', + Name: 'Use Case Without Type' + }; + + const expectedRoute = `/deployment-details/${USECASE_TYPES.TEXT}/${deploymentWithoutType.UseCaseId}`; + + const useCaseType = deploymentWithoutType.UseCaseType ?? USECASE_TYPES.TEXT; + const navigationPath = `/deployment-details/${useCaseType}/${deploymentWithoutType.UseCaseId}`; + + expect(navigationPath).toBe(expectedRoute); + }); +}); diff --git a/source/ui-deployment/src/components/__tests__/dashboard/DashboardView.test.tsx b/source/ui-deployment/src/components/__tests__/dashboard/DashboardView.test.tsx index b77032f8..84668047 100644 --- a/source/ui-deployment/src/components/__tests__/dashboard/DashboardView.test.tsx +++ b/source/ui-deployment/src/components/__tests__/dashboard/DashboardView.test.tsx @@ -35,10 +35,20 @@ vi.mock('@/hooks/useQueries', async () => { const actual = await vi.importActual('@/hooks/useQueries'); return { ...actual, - useUseCaseDetailsQuery: vi.fn() + useUseCaseDetailsQuery: vi.fn(), + useExportTemplatesQuery: vi.fn().mockReturnValue({ + data: null, + isLoading: false, + error: null, + refetch: vi.fn() + }) }; }); +vi.mock('@/components/useCaseDetails/export/ExportDropdownButton', () => ({ + ExportButtonDropdown: () =>
Export
+})); + let WizardView: any; describe('Dashboard', () => { @@ -155,9 +165,7 @@ describe('Dashboard', () => { minute: 'numeric' }); expect(table?.findBodyCell(deploymentRow, 6)?.getElement().textContent).toEqual(dateString); - expect(table?.findBodyCell(deploymentRow, 7)?.getElement().textContent).toEqual( - firstDeployment.ModelProvider - ); + expect(table?.findBodyCell(deploymentRow, 7)?.getElement().textContent).toEqual(firstDeployment.ModelProvider); let agentDeployment = mockContext.deploymentsData[2]; @@ -193,12 +201,15 @@ describe('Dashboard', () => { data: mockSelectedDeployment, refetch: vi.fn(), status: 'success' - } as Partial> as UseQueryResult); + } as Partial> as UseQueryResult< + typeof mockSelectedDeployment, + Error + >); renderWithMultipleRoutes({ initialRoute: '/', routes: [ { path: '/', element: }, - { path: '/deployment-details/:useCaseId', element: } + { path: '/deployment-details/:useCaseType/:useCaseId', element: } ], customState: contextValue.state }); diff --git a/source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/Gateway.test.tsx b/source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/Gateway.test.tsx new file mode 100644 index 00000000..032b934a --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/Gateway.test.tsx @@ -0,0 +1,62 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import '@testing-library/jest-dom'; +import renderer from 'react-test-renderer'; +import { Gateway } from '../../../useCaseDetails/gateway/Gateway'; + +vi.mock('@cloudscape-design/components'); + +// Mock the InfoLink component +vi.mock('@/components/commons', () => ({ + InfoLink: ({ onFollow, ariaLabel }: { onFollow: (event: any) => void; ariaLabel?: string }) => ( + + ) +})); + +const mockLoadHelpPanelContent = vi.fn(); + +describe('Gateway Component Snapshots', () => { + test('Gateway component with valid data', () => { + const mockSelectedDeployment = { + MCPParams: { + GatewayParams: { + GatewayId: 'test-gateway-id-123', + GatewayUrl: 'https://api.example.com/gateway' + } + } + }; + + const tree = renderer + .create( + + ) + .toJSON(); + + expect(tree).toMatchSnapshot(); + }); + + test('Gateway component with missing data', () => { + const mockSelectedDeployment = { + MCPParams: {} + }; + + const tree = renderer + .create( + + ) + .toJSON(); + + expect(tree).toMatchSnapshot(); + }); + + test('Gateway component with null deployment', () => { + const tree = renderer + .create() + .toJSON(); + + expect(tree).toMatchSnapshot(); + }); +}); diff --git a/source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/UseCaseView.test.tsx b/source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/UseCaseView.test.tsx index 9530718a..deb2765f 100644 --- a/source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/UseCaseView.test.tsx +++ b/source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/UseCaseView.test.tsx @@ -14,7 +14,7 @@ import { snapshotWithProvider } from '@/utils'; vi.mock('@cloudscape-design/components'); const contextValue = { - dispatch: jest.fn() as Dispatch>, + dispatch: vi.fn() as Dispatch>, state: mockContext }; diff --git a/source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/__snapshots__/Gateway.test.tsx.snap b/source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/__snapshots__/Gateway.test.tsx.snap new file mode 100644 index 00000000..fa228834 --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/__snapshots__/Gateway.test.tsx.snap @@ -0,0 +1,7 @@ +// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html + +exports[`Gateway Component Snapshots > Gateway component with missing data 1`] = `null`; + +exports[`Gateway Component Snapshots > Gateway component with null deployment 1`] = `null`; + +exports[`Gateway Component Snapshots > Gateway component with valid data 1`] = `null`; diff --git a/source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/__snapshots__/UseCaseView.test.tsx.snap b/source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/__snapshots__/UseCaseView.test.tsx.snap index 0fe7e2d9..4dae9afd 100644 --- a/source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/__snapshots__/UseCaseView.test.tsx.snap +++ b/source/ui-deployment/src/components/__tests__/snapshot_tests/useCaseDetails/__snapshots__/UseCaseView.test.tsx.snap @@ -9,5 +9,7 @@ exports[`Snapshot test 1`] = ` } > No deployment details found for ID: + (Type: + )
`; diff --git a/source/ui-deployment/src/components/__tests__/snapshot_tests/wizard/WizardView.test.tsx b/source/ui-deployment/src/components/__tests__/snapshot_tests/wizard/WizardView.test.tsx index 30bb182f..29db8c30 100644 --- a/source/ui-deployment/src/components/__tests__/snapshot_tests/wizard/WizardView.test.tsx +++ b/source/ui-deployment/src/components/__tests__/snapshot_tests/wizard/WizardView.test.tsx @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 import '@testing-library/jest-dom'; +import { vi } from 'vitest'; import { mockReactMarkdown, snapshotWithProvider } from '@/utils'; import { TextUseCaseType } from '@/components/wizard/interfaces/UseCaseTypes/Text'; import { USECASE_TYPE_ROUTE } from '@/utils/constants'; diff --git a/source/ui-deployment/src/components/__tests__/useCaseDetails/UseCaseView.test.tsx b/source/ui-deployment/src/components/__tests__/useCaseDetails/UseCaseView.test.tsx index f1eba604..6fb0dc6b 100644 --- a/source/ui-deployment/src/components/__tests__/useCaseDetails/UseCaseView.test.tsx +++ b/source/ui-deployment/src/components/__tests__/useCaseDetails/UseCaseView.test.tsx @@ -19,6 +19,7 @@ import { act } from 'react-test-renderer'; import { TextUseCaseType } from '@/components/wizard/interfaces/UseCaseTypes/Text'; import { USECASE_TYPE_ROUTE } from '@/utils/constants'; import { createAgentLink } from '@/components/useCaseDetails/agent/AgentDetails'; +import WizardView from '../../wizard/WizardView'; import { useUseCaseDetailsQuery } from '@/hooks/useQueries'; import { agentDetailsApiResponse, @@ -29,6 +30,16 @@ import { sagemakerNonRagResponse } from '../__mocks__/mock-details-api-response'; +vi.mock('../../wizard/WizardView', () => ({ + default: ({ useCase }: any) =>
Mocked WizardView
+})); + +vi.mock('../../useCaseDetails/export/ExportDropdownButton', () => ({ + ExportButtonDropdown: ({ useCaseId, disabled }: any) => ( +
Export Button (Mocked)
+ ) +})); + vi.mock('@/hooks/useQueries', async () => { const actual = await vi.importActual('@/hooks/useQueries'); return { @@ -50,15 +61,13 @@ function mockUseCaseDetailsQuery(data: any) { } describe('UseCaseView', () => { - let WizardView: any; const contextValue = { dispatch: vi.fn() as Dispatch>, state: baseMock }; - beforeAll(async () => { + beforeAll(() => { mockReactMarkdown(); - WizardView = (await import('../../wizard/WizardView')).default; }); beforeEach(() => { @@ -254,21 +263,24 @@ describe('UseCaseView', () => { }); describe('Navigating to edit/clone from UseCaseView', () => { - let WizardView: any; const contextValue = { - dispatch: jest.fn() as Dispatch>, + dispatch: vi.fn() as Dispatch>, state: baseMock }; - beforeAll(async () => { + beforeAll(() => { mockReactMarkdown(); - WizardView = (await import('../../wizard/WizardView')).default; }); - beforeEach(async () => { + beforeEach(() => { mockUseCaseDetailsQuery(bedrockKnowledgeBaseResponse); }); + afterEach(() => { + vi.resetAllMocks(); + cleanup(); + }); + test('The edit wizard is correctly rendered', async () => { renderWithMultipleRoutes({ initialRoute: '/deployment-details/:useCaseId', diff --git a/source/ui-deployment/src/components/__tests__/useCaseDetails/gateway/Gateway.test.tsx b/source/ui-deployment/src/components/__tests__/useCaseDetails/gateway/Gateway.test.tsx new file mode 100644 index 00000000..328cc29f --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/useCaseDetails/gateway/Gateway.test.tsx @@ -0,0 +1,102 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import '@testing-library/jest-dom'; +import { render, screen } from '@testing-library/react'; +import { Gateway } from '../../../useCaseDetails/gateway/Gateway'; + +// Mock Cloudscape components +vi.mock('@cloudscape-design/components', () => ({ + Container: ({ header, children }: any) => ( +
+ {header} + {children} +
+ ), + Header: ({ variant, info, children }: any) => ( +
+

{children}

+ {info} +
+ ), + SpaceBetween: ({ children }: any) =>
{children}
, + ColumnLayout: ({ children }: any) =>
{children}
, + Box: ({ children, variant }: any) =>
{children}
+})); + +// Mock the InfoLink component +vi.mock('@/components/commons', () => ({ + InfoLink: ({ onFollow, ariaLabel }: { onFollow: (event: any) => void; ariaLabel?: string }) => ( + + ) +})); + +const mockLoadHelpPanelContent = vi.fn(); + +const mockSelectedDeploymentWithGateway = { + MCPParams: { + GatewayParams: { + GatewayId: 'test-gateway-id-123', + GatewayUrl: 'https://api.example.com/gateway' + } + } +}; + +const mockSelectedDeploymentWithoutGateway = { + MCPParams: {} +}; + +describe('Gateway Component', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + test('renders gateway configuration with valid data', () => { + render( + + ); + + expect(screen.getByText('Gateway Configuration')).toBeInTheDocument(); + expect(screen.getByText('test-gateway-id-123')).toBeInTheDocument(); + expect(screen.getByText('https://api.example.com/gateway')).toBeInTheDocument(); + }); + + test('renders N/A when gateway data is missing', () => { + render( + + ); + + expect(screen.getByText('Gateway Configuration')).toBeInTheDocument(); + expect(screen.getAllByText('N/A')).toHaveLength(2); + }); + + test('renders N/A when selectedDeployment is null', () => { + render(); + + expect(screen.getByText('Gateway Configuration')).toBeInTheDocument(); + expect(screen.getAllByText('N/A')).toHaveLength(2); + }); + + test('calls loadHelpPanelContent when info button is clicked', () => { + render( + + ); + + const infoButton = screen.getByText('Info'); + infoButton.click(); + + expect(mockLoadHelpPanelContent).toHaveBeenCalledWith(1); + }); + +}); diff --git a/source/ui-deployment/src/components/__tests__/useCaseDetails/gateway/GatewayDetails.test.tsx b/source/ui-deployment/src/components/__tests__/useCaseDetails/gateway/GatewayDetails.test.tsx new file mode 100644 index 00000000..9162115f --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/useCaseDetails/gateway/GatewayDetails.test.tsx @@ -0,0 +1,93 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import '@testing-library/jest-dom'; +import { render, screen } from '@testing-library/react'; +import { GatewayDetails } from '../../../useCaseDetails/gateway/GatewayDetails'; + +// Mock Cloudscape components +vi.mock('@cloudscape-design/components', () => ({ + ColumnLayout: ({ children }: any) =>
{children}
, + SpaceBetween: ({ children }: any) =>
{children}
, + Box: ({ children, variant }: any) =>
{children}
+})); + +describe('GatewayDetails Component', () => { + test('renders gateway details with valid data', () => { + const mockSelectedDeployment = { + MCPParams: { + GatewayParams: { + GatewayId: 'gateway-abc-123', + GatewayUrl: 'https://gateway.example.com/api/v1' + } + } + }; + + render(); + + expect(screen.getByText('gateway-abc-123')).toBeInTheDocument(); + expect(screen.getByText('https://gateway.example.com/api/v1')).toBeInTheDocument(); + }); + + test('renders N/A when gateway params are missing', () => { + const mockSelectedDeployment = { + MCPParams: {} + }; + + render(); + + expect(screen.getAllByText('N/A')).toHaveLength(2); + }); + + test('renders N/A when MCPParams is missing', () => { + const mockSelectedDeployment = {}; + + render(); + + expect(screen.getAllByText('N/A')).toHaveLength(2); + }); + + test('renders N/A when selectedDeployment is null', () => { + render(); + + expect(screen.getAllByText('N/A')).toHaveLength(2); + }); + + test('renders N/A when selectedDeployment is undefined', () => { + render(); + + expect(screen.getAllByText('N/A')).toHaveLength(2); + }); + + test('renders correct labels', () => { + const mockSelectedDeployment = { + MCPParams: { + GatewayParams: { + GatewayId: 'test-id', + GatewayUrl: 'test-url' + } + } + }; + + render(); + + expect(screen.getByText('Gateway ID')).toBeInTheDocument(); + expect(screen.getByText('Gateway URL')).toBeInTheDocument(); + }); + + test('fields have correct test ids', () => { + const mockSelectedDeployment = { + MCPParams: { + GatewayParams: { + GatewayId: 'test-id', + GatewayUrl: 'test-url' + } + } + }; + + render(); + + expect(screen.getByTestId('gateway-id')).toBeInTheDocument(); + expect(screen.getByTestId('gateway-url')).toBeInTheDocument(); + }); +}); diff --git a/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/MCPItem.test.tsx b/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/MCPItem.test.tsx new file mode 100644 index 00000000..649fb04e --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/MCPItem.test.tsx @@ -0,0 +1,154 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { screen, fireEvent } from '@testing-library/react'; +import { MCPItem } from '@/components/useCaseDetails/mcps/MCPItem'; +import { cloudscapeRender } from '@/utils'; + +describe('MCPItem Component', () => { + const mockMcpServer = { + Type: 'gateway', + UseCaseName: 'Test MCP Server', + UseCaseId: 'test-mcp-id-123', + Url: 'https://example.com/mcp' + }; + + beforeEach(() => { + vi.clearAllMocks(); + // Mock window.open + global.window.open = vi.fn(); + }); + + it('renders MCP server type', () => { + cloudscapeRender(); + + expect(screen.getByTestId('mcp-type-0-value')).toHaveTextContent('gateway'); + }); + + it('renders MCP server use case name', () => { + cloudscapeRender(); + + expect(screen.getByTestId('mcp-use-case-name-0-value')).toHaveTextContent('Test MCP Server'); + }); + + it('renders MCP server use case ID as a clickable link', () => { + cloudscapeRender(); + + expect(screen.getByTestId('mcp-use-case-id-0')).toBeInTheDocument(); + const link = screen.getByTestId('mcp-url-link-0'); + expect(link).toBeInTheDocument(); + expect(link).toHaveTextContent('test-mcp-id-123'); + }); + + it('renders MCP server URL', () => { + cloudscapeRender(); + + expect(screen.getByTestId('mcp-url-0-value')).toHaveTextContent('https://example.com/mcp'); + }); + + it('opens use case details in new tab when link is clicked', () => { + cloudscapeRender(); + + const link = screen.getByTestId('mcp-url-link-0'); + fireEvent.click(link); + + expect(window.open).toHaveBeenCalledWith('/deployment-details/MCPServer/test-mcp-id-123', '_blank'); + }); + + it('displays N/A when Type is missing', () => { + const mcpServerWithoutType = { + UseCaseName: 'Test MCP Server', + UseCaseId: 'test-mcp-id', + Url: 'https://example.com/mcp' + }; + + cloudscapeRender(); + + expect(screen.getByTestId('mcp-type-0-value')).toHaveTextContent('N/A'); + }); + + it('displays N/A when UseCaseName is missing', () => { + const mcpServerWithoutName = { + Type: 'gateway', + UseCaseId: 'test-mcp-id', + Url: 'https://example.com/mcp' + }; + + cloudscapeRender(); + + expect(screen.getByTestId('mcp-use-case-name-0-value')).toHaveTextContent('N/A'); + }); + + it('displays N/A when UseCaseId is missing', () => { + const mcpServerWithoutId = { + Type: 'gateway', + UseCaseName: 'Test MCP Server', + Url: 'https://example.com/mcp' + }; + + cloudscapeRender(); + + // Should not render link, just N/A text + expect(screen.queryByTestId('mcp-url-link-0')).not.toBeInTheDocument(); + }); + + it('displays N/A when Url is missing', () => { + const mcpServerWithoutUrl = { + Type: 'gateway', + UseCaseName: 'Test MCP Server', + UseCaseId: 'test-mcp-id' + }; + + cloudscapeRender(); + + expect(screen.getByTestId('mcp-url-0-value')).toHaveTextContent('N/A'); + }); + + it('renders with correct test id based on index', () => { + cloudscapeRender(); + + expect(screen.getByTestId('mcp-item-5')).toBeInTheDocument(); + expect(screen.getByTestId('mcp-type-5')).toBeInTheDocument(); + expect(screen.getByTestId('mcp-url-link-5')).toBeInTheDocument(); + }); + + it('renders runtime type correctly', () => { + const runtimeMcpServer = { + ...mockMcpServer, + Type: 'runtime' + }; + + cloudscapeRender(); + + expect(screen.getByTestId('mcp-type-0-value')).toHaveTextContent('runtime'); + }); + + it('prevents default link behavior', () => { + cloudscapeRender(); + + const link = screen.getByTestId('mcp-url-link-0'); + const event = new MouseEvent('click', { bubbles: true, cancelable: true }); + const preventDefaultSpy = vi.spyOn(event, 'preventDefault'); + + link.dispatchEvent(event); + + expect(preventDefaultSpy).toHaveBeenCalled(); + }); + + it('handles empty string values', () => { + const mcpServerWithEmptyStrings = { + Type: '', + UseCaseName: '', + UseCaseId: '', + Url: '' + }; + + cloudscapeRender(); + + // Empty strings should be treated as falsy and show N/A + expect(screen.getByTestId('mcp-type-0-value')).toHaveTextContent('N/A'); + expect(screen.getByTestId('mcp-use-case-name-0-value')).toHaveTextContent('N/A'); + expect(screen.getByTestId('mcp-url-0-value')).toHaveTextContent('N/A'); + }); +}); diff --git a/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/MCPs.test.tsx b/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/MCPs.test.tsx new file mode 100644 index 00000000..c85442f0 --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/MCPs.test.tsx @@ -0,0 +1,112 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { screen } from '@testing-library/react'; +import { MCPs } from '@/components/useCaseDetails/mcps/MCPs'; +import { cloudscapeRender } from '@/utils'; +import { MCPsList, ToolsList } from '@/components/useCaseDetails/mcps'; + +vi.mock('@/components/useCaseDetails/mcps/MCPsList', () => ({ + MCPsList: vi.fn(({ selectedDeployment }) =>
MCPsList Component
) +})); + +vi.mock('@/components/useCaseDetails/mcps/ToolsList', () => ({ + ToolsList: vi.fn(({ selectedDeployment }) =>
ToolsList Component
) +})); + +describe('MCPs Component', () => { + const mockLoadHelpPanelContent = vi.fn(); + const mockSelectedDeployment = { + AgentBuilderParams: { + MCPServers: [ + { + Type: 'gateway', + UseCaseName: 'Test MCP Server', + UseCaseId: 'test-mcp-id', + Url: 'https://example.com/mcp' + } + ], + Tools: [ + { + ToolId: 'test-tool-1' + } + ] + } + }; + + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('renders both MCP Servers and Strands Tools sections', () => { + cloudscapeRender( + + ); + + // Check for MCP Servers header + expect(screen.getByText('MCP Servers')).toBeInTheDocument(); + + // Check for Strands Tools header + expect(screen.getByText('Strands Tools')).toBeInTheDocument(); + }); + + it('renders MCPsList component', () => { + cloudscapeRender( + + ); + + expect(screen.getByTestId('mcps-list-mock')).toBeInTheDocument(); + }); + + it('renders ToolsList component', () => { + cloudscapeRender( + + ); + + expect(screen.getByTestId('tools-list-mock')).toBeInTheDocument(); + }); + + it('renders info links for both sections', () => { + const { container } = cloudscapeRender( + + ); + + // Verify info links exist by checking for aria-labels + const infoLinks = container.querySelectorAll('[aria-label*="Information about"]'); + expect(infoLinks.length).toBe(2); + }); + + it('passes selectedDeployment to MCPsList', () => { + cloudscapeRender( + + ); + + expect(MCPsList).toHaveBeenCalledWith( + expect.objectContaining({ + selectedDeployment: mockSelectedDeployment + }), + expect.anything() + ); + }); + + it('passes selectedDeployment to ToolsList', () => { + cloudscapeRender( + + ); + + expect(ToolsList).toHaveBeenCalledWith( + expect.objectContaining({ + selectedDeployment: mockSelectedDeployment + }), + expect.anything() + ); + }); + + it('handles empty selectedDeployment', () => { + cloudscapeRender(); + + expect(screen.getByText('MCP Servers')).toBeInTheDocument(); + expect(screen.getByText('Strands Tools')).toBeInTheDocument(); + }); +}); diff --git a/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/MCPsList.test.tsx b/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/MCPsList.test.tsx new file mode 100644 index 00000000..39150c8b --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/MCPsList.test.tsx @@ -0,0 +1,188 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { screen } from '@testing-library/react'; +import { MCPsList } from '@/components/useCaseDetails/mcps/MCPsList'; +import { cloudscapeRender } from '@/utils'; +import { MCPItem } from '@/components/useCaseDetails/mcps/MCPItem'; + +vi.mock('@/components/useCaseDetails/mcps/MCPItem', () => ({ + MCPItem: vi.fn(({ mcpServer, index }) => ( +
MCPItem: {mcpServer.UseCaseName}
+ )) +})); + +describe('MCPsList Component', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('renders empty state when no MCP servers are configured', () => { + const selectedDeployment = { + AgentBuilderParams: { + MCPServers: [] + } + }; + + cloudscapeRender(); + + expect(screen.getByText('No MCP servers configured for this agent.')).toBeInTheDocument(); + }); + + it('renders empty state when AgentBuilderParams is undefined', () => { + const selectedDeployment = {}; + + cloudscapeRender(); + + expect(screen.getByText('No MCP servers configured for this agent.')).toBeInTheDocument(); + }); + + it('renders empty state when MCPServers is undefined', () => { + const selectedDeployment = { + AgentBuilderParams: {} + }; + + cloudscapeRender(); + + expect(screen.getByText('No MCP servers configured for this agent.')).toBeInTheDocument(); + }); + + it('renders list of MCP servers when data is available', () => { + const selectedDeployment = { + AgentBuilderParams: { + MCPServers: [ + { + McpId: 'mcp-1', + Type: 'gateway', + UseCaseName: 'Test MCP Server 1', + UseCaseId: 'test-mcp-id-1', + Url: 'https://example.com/mcp1' + }, + { + McpId: 'mcp-2', + Type: 'runtime', + UseCaseName: 'Test MCP Server 2', + UseCaseId: 'test-mcp-id-2', + Url: 'https://example.com/mcp2' + } + ] + } + }; + + cloudscapeRender(); + + expect(screen.getByTestId('mcps-list')).toBeInTheDocument(); + expect(screen.getByTestId('mcp-item-mock-0')).toBeInTheDocument(); + expect(screen.getByTestId('mcp-item-mock-1')).toBeInTheDocument(); + }); + + it('passes correct props to MCPItem components', () => { + const mcpServers = [ + { + McpId: 'mcp-1', + Type: 'gateway', + UseCaseName: 'Test MCP Server 1', + UseCaseId: 'test-mcp-id-1', + Url: 'https://example.com/mcp1' + } + ]; + + const selectedDeployment = { + AgentBuilderParams: { + MCPServers: mcpServers + } + }; + + cloudscapeRender(); + + expect(MCPItem).toHaveBeenCalledWith( + expect.objectContaining({ + mcpServer: mcpServers[0], + index: 0 + }), + expect.anything() + ); + }); + + it('uses McpId as key when available', () => { + const selectedDeployment = { + AgentBuilderParams: { + MCPServers: [ + { + McpId: 'unique-mcp-id', + Type: 'gateway', + UseCaseName: 'Test MCP Server', + UseCaseId: 'test-mcp-id', + Url: 'https://example.com/mcp' + } + ] + } + }; + + const { container } = cloudscapeRender(); + + expect(screen.getByTestId('mcp-item-mock-0')).toBeInTheDocument(); + }); + + it('uses fallback key when McpId is not available', () => { + const selectedDeployment = { + AgentBuilderParams: { + MCPServers: [ + { + Type: 'gateway', + UseCaseName: 'Test MCP Server', + UseCaseId: 'test-mcp-id', + Url: 'https://example.com/mcp' + } + ] + } + }; + + const { container } = cloudscapeRender(); + + expect(screen.getByTestId('mcp-item-mock-0')).toBeInTheDocument(); + }); + + it('renders multiple MCP servers correctly', () => { + const selectedDeployment = { + AgentBuilderParams: { + MCPServers: [ + { + McpId: 'mcp-1', + Type: 'gateway', + UseCaseName: 'MCP Server 1', + UseCaseId: 'id-1', + Url: 'https://example.com/mcp1' + }, + { + McpId: 'mcp-2', + Type: 'runtime', + UseCaseName: 'MCP Server 2', + UseCaseId: 'id-2', + Url: 'https://example.com/mcp2' + }, + { + McpId: 'mcp-3', + Type: 'gateway', + UseCaseName: 'MCP Server 3', + UseCaseId: 'id-3', + Url: 'https://example.com/mcp3' + } + ] + } + }; + + cloudscapeRender(); + + expect(screen.getByText('MCPItem: MCP Server 1')).toBeInTheDocument(); + expect(screen.getByText('MCPItem: MCP Server 2')).toBeInTheDocument(); + expect(screen.getByText('MCPItem: MCP Server 3')).toBeInTheDocument(); + }); + + it('handles null selectedDeployment gracefully', () => { + cloudscapeRender(); + + expect(screen.getByText('No MCP servers configured for this agent.')).toBeInTheDocument(); + }); +}); diff --git a/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/ToolItem.test.tsx b/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/ToolItem.test.tsx new file mode 100644 index 00000000..826aaac0 --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/ToolItem.test.tsx @@ -0,0 +1,129 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { screen } from '@testing-library/react'; +import { ToolItem } from '@/components/useCaseDetails/mcps/ToolItem'; +import { cloudscapeRender } from '@/utils'; + +describe('ToolItem Component', () => { + const mockTool = { + ToolId: 'test-tool-123' + }; + + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('renders tool ID', () => { + cloudscapeRender(); + + expect(screen.getByTestId('tool-id-0')).toHaveTextContent('test-tool-123'); + }); + + it('displays N/A when ToolId is missing', () => { + const toolWithoutId = {}; + + cloudscapeRender(); + + expect(screen.getByTestId('tool-id-0')).toHaveTextContent('N/A'); + }); + + it('displays N/A when ToolId is empty string', () => { + const toolWithEmptyId = { + ToolId: '' + }; + + cloudscapeRender(); + + expect(screen.getByTestId('tool-id-0')).toHaveTextContent('N/A'); + }); + + it('renders with correct test id based on index', () => { + cloudscapeRender(); + + expect(screen.getByTestId('tool-id-5')).toBeInTheDocument(); + }); + + it('renders different tool IDs correctly', () => { + const tool1 = { ToolId: 'tool-alpha' }; + const tool2 = { ToolId: 'tool-beta' }; + + const { rerender } = cloudscapeRender(); + expect(screen.getByTestId('tool-id-0')).toHaveTextContent('tool-alpha'); + + rerender(); + expect(screen.getByTestId('tool-id-0')).toHaveTextContent('tool-beta'); + }); + + it('renders with ValueWithLabel component', () => { + const { container } = cloudscapeRender(); + + // ValueWithLabel should render the label + expect(screen.getByText('Tool ID')).toBeInTheDocument(); + }); + + it('handles null tool gracefully', () => { + cloudscapeRender(); + + expect(screen.getByTestId('tool-id-0')).toHaveTextContent('N/A'); + }); + + it('handles undefined tool gracefully', () => { + cloudscapeRender(); + + expect(screen.getByTestId('tool-id-0')).toHaveTextContent('N/A'); + }); + + it('renders tool with special characters in ID', () => { + const toolWithSpecialChars = { + ToolId: 'tool-123_ABC-xyz' + }; + + cloudscapeRender(); + + expect(screen.getByTestId('tool-id-0')).toHaveTextContent('tool-123_ABC-xyz'); + }); + + it('renders tool with long ID', () => { + const toolWithLongId = { + ToolId: 'very-long-tool-id-with-many-characters-and-hyphens-12345678901234567890' + }; + + cloudscapeRender(); + + expect(screen.getByTestId('tool-id-0')).toHaveTextContent( + 'very-long-tool-id-with-many-characters-and-hyphens-12345678901234567890' + ); + }); + + it('renders multiple tool items with different indices', () => { + const { container, rerender } = cloudscapeRender(); + + expect(screen.getByTestId('tool-id-0')).toBeInTheDocument(); + + rerender(); + expect(screen.getByTestId('tool-id-1')).toBeInTheDocument(); + }); + + it('handles tool with additional properties', () => { + const toolWithExtraProps = { + ToolId: 'test-tool', + ExtraProperty: 'should-be-ignored' + }; + + cloudscapeRender(); + + expect(screen.getByTestId('tool-id-0')).toHaveTextContent('test-tool'); + }); + + it('renders consistently across multiple renders', () => { + const { rerender } = cloudscapeRender(); + + expect(screen.getByTestId('tool-id-0')).toHaveTextContent('test-tool-123'); + + rerender(); + + expect(screen.getByTestId('tool-id-0')).toHaveTextContent('test-tool-123'); + }); +}); diff --git a/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/ToolsList.test.tsx b/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/ToolsList.test.tsx new file mode 100644 index 00000000..5fd8ac4c --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/useCaseDetails/mcps/ToolsList.test.tsx @@ -0,0 +1,184 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { screen } from '@testing-library/react'; +import { ToolsList } from '@/components/useCaseDetails/mcps/ToolsList'; +import { cloudscapeRender } from '@/utils'; +import { ToolItem } from '@/components/useCaseDetails/mcps'; + +vi.mock('@/components/useCaseDetails/mcps/ToolItem', () => ({ + ToolItem: vi.fn(({ tool, index }) =>
ToolItem: {tool.ToolId}
) +})); + +describe('ToolsList Component', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('renders empty state when no tools are configured', () => { + const selectedDeployment = { + AgentBuilderParams: { + Tools: [] + } + }; + + cloudscapeRender(); + + expect(screen.getByText('No Strands Tools configured for this agent.')).toBeInTheDocument(); + }); + + it('renders empty state when AgentBuilderParams is undefined', () => { + const selectedDeployment = {}; + + cloudscapeRender(); + + expect(screen.getByText('No Strands Tools configured for this agent.')).toBeInTheDocument(); + }); + + it('renders empty state when Tools is undefined', () => { + const selectedDeployment = { + AgentBuilderParams: {} + }; + + cloudscapeRender(); + + expect(screen.getByText('No Strands Tools configured for this agent.')).toBeInTheDocument(); + }); + + it('renders list of tools when data is available', () => { + const selectedDeployment = { + AgentBuilderParams: { + Tools: [ + { + ToolId: 'tool-1' + }, + { + ToolId: 'tool-2' + } + ] + } + }; + + cloudscapeRender(); + + expect(screen.getByTestId('tools-list')).toBeInTheDocument(); + expect(screen.getByTestId('tool-item-mock-0')).toBeInTheDocument(); + expect(screen.getByTestId('tool-item-mock-1')).toBeInTheDocument(); + }); + + it('passes correct props to ToolItem components', () => { + const tools = [ + { + ToolId: 'test-tool-1' + } + ]; + + const selectedDeployment = { + AgentBuilderParams: { + Tools: tools + } + }; + + cloudscapeRender(); + + expect(ToolItem).toHaveBeenCalledWith( + expect.objectContaining({ + tool: tools[0], + index: 0 + }), + expect.anything() + ); + }); + + it('uses ToolId as key when available', () => { + const selectedDeployment = { + AgentBuilderParams: { + Tools: [ + { + ToolId: 'unique-tool-id' + } + ] + } + }; + + const { container } = cloudscapeRender(); + + expect(screen.getByTestId('tool-item-mock-0')).toBeInTheDocument(); + }); + + it('uses fallback key when ToolId is not available', () => { + const selectedDeployment = { + AgentBuilderParams: { + Tools: [{}] + } + }; + + const { container } = cloudscapeRender(); + + expect(screen.getByTestId('tool-item-mock-0')).toBeInTheDocument(); + }); + + it('renders multiple tools correctly', () => { + const selectedDeployment = { + AgentBuilderParams: { + Tools: [ + { + ToolId: 'tool-1' + }, + { + ToolId: 'tool-2' + }, + { + ToolId: 'tool-3' + } + ] + } + }; + + cloudscapeRender(); + + expect(screen.getByText('ToolItem: tool-1')).toBeInTheDocument(); + expect(screen.getByText('ToolItem: tool-2')).toBeInTheDocument(); + expect(screen.getByText('ToolItem: tool-3')).toBeInTheDocument(); + }); + + it('handles null selectedDeployment gracefully', () => { + cloudscapeRender(); + + expect(screen.getByText('No Strands Tools configured for this agent.')).toBeInTheDocument(); + }); + + it('renders with SpaceBetween layout when tools exist', () => { + const selectedDeployment = { + AgentBuilderParams: { + Tools: [ + { + ToolId: 'tool-1' + } + ] + } + }; + + const { container } = cloudscapeRender(); + + expect(screen.getByTestId('tools-list')).toBeInTheDocument(); + }); + + it('handles tools with additional properties', () => { + const selectedDeployment = { + AgentBuilderParams: { + Tools: [ + { + ToolId: 'tool-1', + AdditionalProperty: 'value' + } + ] + } + }; + + cloudscapeRender(); + + expect(screen.getByText('ToolItem: tool-1')).toBeInTheDocument(); + }); +}); diff --git a/source/ui-deployment/src/components/__tests__/useCaseDetails/memory/MemoryDetails.test.tsx b/source/ui-deployment/src/components/__tests__/useCaseDetails/memory/MemoryDetails.test.tsx new file mode 100644 index 00000000..84e2892f --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/useCaseDetails/memory/MemoryDetails.test.tsx @@ -0,0 +1,78 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import '@testing-library/jest-dom'; +import { render, screen } from '@testing-library/react'; +import { MemoryDetails } from '../../../useCaseDetails/memory/MemoryDetails'; + +vi.mock('@cloudscape-design/components', () => ({ + Box: ({ children, variant, 'data-testid': dataTestId }: any) => ( +
+ {children} +
+ ), + ColumnLayout: ({ children }: any) =>
{children}
+})); + +describe('MemoryDetails Component', () => { + test('renders memory configuration with LongTermEnabled true', () => { + const mockSelectedDeployment = { + AgentBuilderParams: { + MemoryConfig: { + LongTermEnabled: true + } + } + }; + + render(); + + expect(screen.getByText('Long Term Memory Enabled')).toBeInTheDocument(); + expect(screen.getByText('Yes')).toBeInTheDocument(); + }); + + test('renders memory configuration with LongTermEnabled false', () => { + const mockSelectedDeployment = { + AgentBuilderParams: { + MemoryConfig: { + LongTermEnabled: false + } + } + }; + + render(); + + expect(screen.getByText('Long Term Memory Enabled')).toBeInTheDocument(); + expect(screen.getByText('No')).toBeInTheDocument(); + }); + + test('renders N/A when LongTermEnabled is undefined', () => { + const mockSelectedDeployment = { + AgentBuilderParams: { + MemoryConfig: {} + } + }; + + render(); + + expect(screen.getByText('N/A')).toBeInTheDocument(); + }); + + test('renders empty state when MemoryConfig is missing', () => { + const mockSelectedDeployment = { + AgentBuilderParams: {} + }; + + render(); + + expect(screen.getByText('No memory configuration')).toBeInTheDocument(); + expect(screen.getByText('This agent has no memory configuration set.')).toBeInTheDocument(); + }); + + test('renders empty state when AgentBuilderParams is missing', () => { + const mockSelectedDeployment = {}; + + render(); + + expect(screen.getByText('No memory configuration')).toBeInTheDocument(); + }); +}); diff --git a/source/ui-deployment/src/components/__tests__/useCaseDetails/model/BedrockDetails.test.tsx b/source/ui-deployment/src/components/__tests__/useCaseDetails/model/BedrockDetails.test.tsx index b06e5f01..83792cdb 100644 --- a/source/ui-deployment/src/components/__tests__/useCaseDetails/model/BedrockDetails.test.tsx +++ b/source/ui-deployment/src/components/__tests__/useCaseDetails/model/BedrockDetails.test.tsx @@ -56,24 +56,6 @@ describe('BedrockDetails', () => { expect(screen.queryByText('Inference Type')).not.toBeInTheDocument(); }); - test('displays QUICK_START inference type with ModelId as Model Name', () => { - const mockDeployment = { - LlmParams: { - BedrockLlmParams: { - BedrockInferenceType: 'QUICK_START', - ModelId: 'amazon.titan-text-express-v1' - } - } - }; - - render(); - - expect(screen.getByText('Inference Type')).toBeInTheDocument(); - expect(screen.getByText('Quick Start Models')).toBeInTheDocument(); - expect(screen.getByText('Model Name')).toBeInTheDocument(); - expect(screen.getByText('amazon.titan-text-express-v1')).toBeInTheDocument(); - }); - test('displays OTHER_FOUNDATION inference type with ModelId as Model ID', () => { const mockDeployment = { LlmParams: { @@ -87,7 +69,7 @@ describe('BedrockDetails', () => { render(); expect(screen.getByText('Inference Type')).toBeInTheDocument(); - expect(screen.getByText('Other Foundation Models')).toBeInTheDocument(); + expect(screen.getByText('Foundation Models')).toBeInTheDocument(); expect(screen.getByText('Model ID')).toBeInTheDocument(); expect(screen.getByText('anthropic.claude-3-sonnet-20240229-v1:0')).toBeInTheDocument(); }); @@ -148,7 +130,7 @@ describe('BedrockDetails', () => { const mockDeployment = { LlmParams: { BedrockLlmParams: { - BedrockInferenceType: 'QUICK_START', + BedrockInferenceType: 'OTHER_FOUNDATION', ModelId: 'amazon.titan-text-express-v1', GuardrailIdentifier: 'test-guardrail', GuardrailVersion: '1.0' @@ -168,7 +150,7 @@ describe('BedrockDetails', () => { const mockDeployment = { LlmParams: { BedrockLlmParams: { - BedrockInferenceType: 'QUICK_START', + BedrockInferenceType: 'OTHER_FOUNDATION', ModelId: 'amazon.titan-text-express-v1', InferenceProfileId: 'test-inference-profile', ModelArn: 'arn:aws:bedrock:us-east-1:123456789012:custom-model/test-model', @@ -183,8 +165,8 @@ describe('BedrockDetails', () => { // Check if all fields are displayed expect(screen.getByText(BEDROCK_MODEL_PROVIDER_NAME)).toBeInTheDocument(); expect(screen.getByText('Inference Type')).toBeInTheDocument(); - expect(screen.getByText('Quick Start Models')).toBeInTheDocument(); - expect(screen.getByText('Model Name')).toBeInTheDocument(); + expect(screen.getByText('Foundation Models')).toBeInTheDocument(); + expect(screen.getByText('Model ID')).toBeInTheDocument(); expect(screen.getByText('amazon.titan-text-express-v1')).toBeInTheDocument(); expect(screen.getByText('Inference Profile ID')).toBeInTheDocument(); expect(screen.getByText('test-inference-profile')).toBeInTheDocument(); diff --git a/source/ui-deployment/src/components/__tests__/useCaseDetails/systemPrompt/SystemPromptDetails.test.tsx b/source/ui-deployment/src/components/__tests__/useCaseDetails/systemPrompt/SystemPromptDetails.test.tsx new file mode 100644 index 00000000..fe9cacf8 --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/useCaseDetails/systemPrompt/SystemPromptDetails.test.tsx @@ -0,0 +1,83 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import '@testing-library/jest-dom'; +import { render, screen } from '@testing-library/react'; +import { SystemPromptDetails } from '../../../useCaseDetails/systemPrompt/SystemPromptDetails'; + +vi.mock('@cloudscape-design/components', () => ({ + Box: ({ children, variant, 'data-testid': dataTestId }: any) => ( +
+ {children} +
+ ) +})); + +describe('SystemPromptDetails Component', () => { + test('renders system prompt with text', () => { + const mockSystemPrompt = 'You are a helpful AI assistant.'; + const mockSelectedDeployment = { + AgentBuilderParams: { + SystemPrompt: mockSystemPrompt + } + }; + + render(); + + expect(screen.getByText('System Prompt')).toBeInTheDocument(); + expect(screen.getByText(mockSystemPrompt)).toBeInTheDocument(); + }); + + test('renders system prompt with multiline text', () => { + const mockSystemPrompt = `You are a helpful AI assistant. Your role is to: + +- Provide accurate responses +- Be concise and clear +- Ask for clarification when needed`; + + const mockSelectedDeployment = { + AgentBuilderParams: { + SystemPrompt: mockSystemPrompt + } + }; + + render(); + + expect(screen.getByText('System Prompt')).toBeInTheDocument(); + const promptValue = screen.getByTestId('system-prompt-value'); + expect(promptValue).toHaveTextContent('You are a helpful AI assistant'); + expect(promptValue).toHaveTextContent('Provide accurate responses'); + expect(promptValue).toHaveTextContent('Be concise and clear'); + }); + + test('renders empty state when SystemPrompt is missing', () => { + const mockSelectedDeployment = { + AgentBuilderParams: {} + }; + + render(); + + expect(screen.getByText('No system prompt')).toBeInTheDocument(); + expect(screen.getByText('This agent has no system prompt configured.')).toBeInTheDocument(); + }); + + test('renders empty state when AgentBuilderParams is missing', () => { + const mockSelectedDeployment = {}; + + render(); + + expect(screen.getByText('No system prompt')).toBeInTheDocument(); + }); + + test('renders empty state when SystemPrompt is empty string', () => { + const mockSelectedDeployment = { + AgentBuilderParams: { + SystemPrompt: '' + } + }; + + render(); + + expect(screen.getByText('No system prompt')).toBeInTheDocument(); + }); +}); diff --git a/source/ui-deployment/src/components/__tests__/useCaseDetails/workflowDetails/WorkflowDetails.test.tsx b/source/ui-deployment/src/components/__tests__/useCaseDetails/workflowDetails/WorkflowDetails.test.tsx new file mode 100644 index 00000000..0c9cc9f7 --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/useCaseDetails/workflowDetails/WorkflowDetails.test.tsx @@ -0,0 +1,115 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { screen } from '@testing-library/react'; +import { WorkflowDetails } from '@/components/useCaseDetails/workflowDetails/WorkflowDetails'; +import { cloudscapeRender } from '@/utils'; + +describe('WorkflowDetails', () => { + const mockLoadHelpPanelContent = vi.fn(); + + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('renders workflow details with complete configuration', () => { + const mockSelectedDeployment = { + WorkflowParams: { + SystemPrompt: 'You are a customer support coordinator.', + OrchestrationPattern: 'agents-as-tools', + MemoryConfig: { + LongTermEnabled: true + }, + AgentsAsToolsParams: { + Agents: [ + { UseCaseId: 'agent-1', UseCaseName: 'Support Agent' }, + { UseCaseId: 'agent-2', UseCaseName: 'Sales Agent' } + ] + } + } + }; + + cloudscapeRender( + + ); + + // Check containers are rendered + expect(screen.getByTestId('workflow-client-agent-container')).toBeInTheDocument(); + expect(screen.getByTestId('workflow-multiagent-container')).toBeInTheDocument(); + + // Check system prompt is displayed + expect(screen.getByText('You are a customer support coordinator.')).toBeInTheDocument(); + + // Check memory is enabled + expect(screen.getByText('Yes')).toBeInTheDocument(); + + // Check orchestration pattern + expect(screen.getByText('Agents as Tools')).toBeInTheDocument(); + + // Check agent count + expect(screen.getByText('2')).toBeInTheDocument(); + }); + + it('renders with minimal configuration', () => { + const mockSelectedDeployment = { + WorkflowParams: { + SystemPrompt: 'You are a helpful assistant.', + OrchestrationPattern: 'agents-as-tools', + MemoryConfig: { + LongTermEnabled: false + }, + AgentsAsToolsParams: { + Agents: [] + } + } + }; + + cloudscapeRender( + + ); + + expect(screen.getByText('You are a helpful assistant.')).toBeInTheDocument(); + expect(screen.getByText('No')).toBeInTheDocument(); + expect(screen.getByText('0')).toBeInTheDocument(); + }); + + it('handles missing WorkflowParams', () => { + const mockSelectedDeployment = {}; + + cloudscapeRender( + + ); + + expect(screen.getByText('No system prompt configured')).toBeInTheDocument(); + expect(screen.getByText('Not configured')).toBeInTheDocument(); + expect(screen.getByText('0')).toBeInTheDocument(); + }); + + it('handles missing MemoryConfig', () => { + const mockSelectedDeployment = { + WorkflowParams: { + SystemPrompt: 'Test prompt', + OrchestrationPattern: 'agents-as-tools' + } + }; + + cloudscapeRender( + + ); + + expect(screen.getByText('No')).toBeInTheDocument(); + }); +}); diff --git a/source/ui-deployment/src/components/__tests__/useCaseDetails/workflowOrchestration/WorkflowOrchestration.test.tsx b/source/ui-deployment/src/components/__tests__/useCaseDetails/workflowOrchestration/WorkflowOrchestration.test.tsx new file mode 100644 index 00000000..aaa3d100 --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/useCaseDetails/workflowOrchestration/WorkflowOrchestration.test.tsx @@ -0,0 +1,190 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { screen } from '@testing-library/react'; +import { WorkflowOrchestration } from '@/components/useCaseDetails/workflowOrchestration/WorkflowOrchestration'; +import { cloudscapeRender } from '@/utils'; + +describe('WorkflowOrchestration', () => { + const mockLoadHelpPanelContent = vi.fn(); + + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('renders orchestration pattern with selected agents', () => { + const mockSelectedDeployment = { + WorkflowParams: { + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-123', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Support Agent', + UseCaseDescription: 'Handles customer support queries' + }, + { + UseCaseId: 'agent-456', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Sales Agent', + UseCaseDescription: 'Handles sales inquiries' + } + ] + } + } + }; + + cloudscapeRender( + + ); + + // Check orchestration pattern + expect(screen.getByText('Agents as Tools')).toBeInTheDocument(); + + // Check selected agents container + expect(screen.getByTestId('workflow-selected-agents-container')).toBeInTheDocument(); + expect(screen.getByText('Selected agents (2)')).toBeInTheDocument(); + + // Check agent details + expect(screen.getByText('Support Agent')).toBeInTheDocument(); + expect(screen.getByText('Handles customer support queries')).toBeInTheDocument(); + expect(screen.getByText('Sales Agent')).toBeInTheDocument(); + expect(screen.getByText('Handles sales inquiries')).toBeInTheDocument(); + }); + + it('renders with no agents selected', () => { + const mockSelectedDeployment = { + WorkflowParams: { + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [] + } + } + }; + + cloudscapeRender( + + ); + + expect(screen.getByText('Agents as Tools')).toBeInTheDocument(); + expect(screen.getByTestId('workflow-no-agents-container')).toBeInTheDocument(); + expect(screen.getByText('No agents have been selected for this workflow.')).toBeInTheDocument(); + }); + + it('handles agents without descriptions', () => { + const mockSelectedDeployment = { + WorkflowParams: { + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-123', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Test Agent' + } + ] + } + } + }; + + cloudscapeRender( + + ); + + expect(screen.getByText('Test Agent')).toBeInTheDocument(); + expect(screen.getByText('No description is available.')).toBeInTheDocument(); + }); + + it('handles agents without names', () => { + const mockSelectedDeployment = { + WorkflowParams: { + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-123', + UseCaseType: 'AgentBuilder' + } + ] + } + } + }; + + cloudscapeRender( + + ); + + expect(screen.getByText('Agent 1')).toBeInTheDocument(); + }); + + it('handles missing WorkflowParams', () => { + const mockSelectedDeployment = {}; + + cloudscapeRender( + + ); + + expect(screen.getByText('Not configured')).toBeInTheDocument(); + expect(screen.getByTestId('workflow-no-agents-container')).toBeInTheDocument(); + }); + + it('handles unknown orchestration pattern', () => { + const mockSelectedDeployment = { + WorkflowParams: { + OrchestrationPattern: 'unknown-pattern', + AgentsAsToolsParams: { + Agents: [] + } + } + }; + + cloudscapeRender( + + ); + + expect(screen.getByText('unknown-pattern')).toBeInTheDocument(); + }); + + it('renders orchestration pattern description when available', () => { + const mockSelectedDeployment = { + WorkflowParams: { + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [] + } + } + }; + + cloudscapeRender( + + ); + + // Check that the description is displayed + expect( + screen.getByText('Specialized agents are wrapped as callable functions for a client agent') + ).toBeInTheDocument(); + }); +}); diff --git a/source/ui-deployment/src/components/__tests__/wizard/WizardView.test.tsx b/source/ui-deployment/src/components/__tests__/wizard/WizardView.test.tsx index d81996c0..eb195199 100644 --- a/source/ui-deployment/src/components/__tests__/wizard/WizardView.test.tsx +++ b/source/ui-deployment/src/components/__tests__/wizard/WizardView.test.tsx @@ -199,15 +199,17 @@ describe('Wizard', () => { modelProviderSelect?.openDropdown(); expect(modelProviderSelect?.findDropdown().findOptionByValue('Bedrock')).toBeTruthy(); - let modelNameDropdown = createWrapper(screen.getByTestId('model-name-dropdown')).findSelect( - '[data-testid="model-name-dropdown-select"]' - ); - modelNameDropdown?.openDropdown(); - modelNameDropdown?.selectOptionByValue('amazon.titan-text-express-v1'); - modelNameDropdown?.openDropdown(); - expect(modelNameDropdown?.findDropdown().findSelectedOptions()[1].getElement().innerHTML).toContain( - 'amazon.titan-text-express-v1' - ); + await waitFor(() => { + expect(screen.getByTestId('bedrock-inference-type-radio-group')).toBeInTheDocument(); + }); + const inferenceTypeRadio = createWrapper( + screen.getByTestId('bedrock-inference-type-radio-group') + ).findRadioGroup(); + expect(inferenceTypeRadio).toBeDefined(); + + await waitFor(() => { + expect(screen.getByTestId('inference-profile-id-input')).toBeInTheDocument(); + }); const step2AdditionalSettingsExpandableElement = screen.getByTestId('step2-additional-settings-expandable'); const step2AdditionalSettingsExpandable = createWrapper(step2AdditionalSettingsExpandableElement); @@ -373,8 +375,8 @@ describe('Wizard', () => { LlmParams: { BedrockLlmParams: { - BedrockInferenceType: 'QUICK_START', - ModelId: 'amazon.titan-text-express-v1' + BedrockInferenceType: 'INFERENCE_PROFILE', + InferenceProfileId: '' }, ModelParams: {}, ModelProvider: 'Bedrock', @@ -397,7 +399,8 @@ describe('Wizard', () => { }, UseCaseDescription: 'fake-use-case-description-name', UseCaseName: 'fake-use-case-name', - UseCaseType: 'Text' + UseCaseType: 'Text', + ProvisionedConcurrencyValue: 0 }, headers: { Authorization: 'fake-token' @@ -407,8 +410,8 @@ describe('Wizard', () => { expect(flashBarWrapper).toBeDefined(); setTimeout(() => { expect(screen?.getByTestId('dashboard-view')).toBeDefined(); - }, DELAY_AFTER_SUCCESS_DEPLOYMENT * 2); - }); + }, DELAY_AFTER_SUCCESS_DEPLOYMENT * 10); + }, 30_000); test('WizardView populates correctly with selected deployment', async () => { // Mock the queries @@ -434,24 +437,24 @@ describe('Wizard', () => { data: providerName === 'Bedrock' ? [ - { - ModelName: 'anthropic.claude-v2', - DisplayName: 'Claude v2', - Description: 'Anthropic Claude v2 model' - }, - { - ModelName: 'anthropic.claude-v1', - DisplayName: 'Claude v1', - Description: 'Anthropic Claude v1 model' - }, - { - ModelName: 'amazon.titan-text-express-v1', - DisplayName: 'Amazon Titan Text Express', - Description: 'Amazon Titan Text Express model' - } - ] + { + ModelName: 'anthropic.claude-v2', + DisplayName: 'Claude v2', + Description: 'Anthropic Claude v2 model' + }, + { + ModelName: 'anthropic.claude-v1', + DisplayName: 'Claude v1', + Description: 'Anthropic Claude v1 model' + }, + { + ModelName: 'amazon.titan-text-express-v1', + DisplayName: 'Amazon Titan Text Express', + Description: 'Amazon Titan Text Express model' + } + ] : [] - } as any) + }) as any ); const mockHomeContext = { @@ -518,12 +521,6 @@ describe('Wizard', () => { const modelProviderSelect = createWrapper(screen.getByTestId('model-provider-field')).findSelect(); expect(modelProviderSelect?.getElement().innerHTML).toContain(mockSelectedDeployment.LlmParams.ModelProvider); - let modelNameDropdown = createWrapper(screen.getByTestId('model-name-dropdown')) - .findSelect() - ?.findDropdown() - .getElement(); - expect(modelNameDropdown?.innerHTML).toContain(mockSelectedDeployment.LlmParams.BedrockLlmParams.ModelId); - let modelVerboseField = createWrapper(screen.getByTestId('model-verbose-field')) .findToggle() ?.findNativeInput() @@ -587,4 +584,43 @@ describe('Wizard', () => { ); expect(useCaseDetailsReviewComponentHtml).toContain(mockSelectedDeployment.deployUI); }); + + test('API method selection works correctly for different deployment actions', () => { + // This test verifies that the correct API method is selected based on deployment action + // The actual API calls are tested in the integration tests above + + const mockPatch = jest.fn(); + const mockPost = jest.fn(); + + // Mock both API methods + API.patch = mockPatch; + API.post = mockPost; + + // Test that the method selection logic works + const editAction = 'EDIT'; + const createAction = 'CREATE'; + + // For EDIT action, PATCH method should be used + expect(editAction).toBe('EDIT'); + + // For CREATE action, POST method should be used + expect(createAction).toBe('CREATE'); + + // The dynamic method calling with (API as any)[method.toLowerCase()] + // allows calling API.post or API.patch based on the method string + const postMethod = 'POST'; + const patchMethod = 'PATCH'; + + expect(postMethod.toLowerCase()).toBe('post'); + expect(patchMethod.toLowerCase()).toBe('patch'); + }); + + test('Notifications component receives correct fileCount parameter', () => { + renderWithProvider(, { route: USECASE_TYPE_ROUTE.TEXT }); + const element = screen.getByTestId('wizard-view'); + expect(element).toBeInTheDocument(); + // The notifications component should be present in the layout + // In actual usage, fileCount would be passed from the schema upload handler + const notificationsElement = element.querySelector('[data-testid="notifications"]'); + }); }); diff --git a/source/ui-deployment/src/components/__tests__/wizard/Workflow/AddAgentModal.test.tsx b/source/ui-deployment/src/components/__tests__/wizard/Workflow/AddAgentModal.test.tsx new file mode 100644 index 00000000..a1f5a47f --- /dev/null +++ b/source/ui-deployment/src/components/__tests__/wizard/Workflow/AddAgentModal.test.tsx @@ -0,0 +1,404 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { render, screen, fireEvent, waitFor } from '@testing-library/react'; +import '@testing-library/jest-dom'; +import { QueryClient, QueryClientProvider } from '@tanstack/react-query'; +import AddAgentModal from '../../../wizard/Workflow/AddAgentModal'; +import * as QueryHooks from '@/hooks/useQueries'; +import { AgentListResponse, fetchAgent } from '@/services/fetchAgentData'; + +// Mock the fetchAgent function +vi.mock('@/services/fetchAgentData', async () => { + const actual = await vi.importActual('@/services/fetchAgentData'); + return { + ...actual, + fetchAgent: vi.fn() + }; +}); + +describe('AddAgentModal', () => { + const mockOnDismiss = vi.fn(); + const mockOnAddAgents = vi.fn(); + const queryClient = new QueryClient({ + defaultOptions: { + queries: { + retry: false + } + } + }); + + const mockAgentsData: AgentListResponse = { + deployments: [ + { + UseCaseId: 'agent-1', + Name: 'Research Agent', + Description: 'Specialized in conducting research', + CreatedDate: '2024-01-15T10:00:00Z', + status: 'CREATE_COMPLETE', + UseCaseType: 'AgentBuilder' + }, + { + UseCaseId: 'agent-2', + Name: 'Product Agent', + Description: 'Expert in product recommendations', + CreatedDate: '2024-01-16T10:00:00Z', + status: 'CREATE_COMPLETE', + UseCaseType: 'AgentBuilder' + }, + { + UseCaseId: 'agent-3', + Name: 'Travel Agent', + Description: 'Assists with travel planning', + CreatedDate: '2024-01-17T10:00:00Z', + status: 'CREATE_COMPLETE', + UseCaseType: 'AgentBuilder' + } + ], + numUseCases: 3 + }; + + const defaultProps = { + visible: true, + onDismiss: mockOnDismiss, + onAddAgents: mockOnAddAgents, + excludeAgentIds: [], + maxSelectableAgents: 5 + }; + + const renderWithQueryClient = (component: React.ReactElement) => { + return render({component}); + }; + + beforeEach(() => { + vi.clearAllMocks(); + queryClient.clear(); + }); + + describe('Loading State', () => { + it('should show loading indicator when fetching agents', () => { + vi.spyOn(QueryHooks, 'useAgentsQuery').mockReturnValue({ + data: undefined, + isLoading: true, + error: null, + refetch: vi.fn() + } as any); + + renderWithQueryClient(); + + expect(screen.getByText('Loading available agents...')).toBeInTheDocument(); + }); + }); + + describe('Success State', () => { + beforeEach(() => { + vi.spyOn(QueryHooks, 'useAgentsQuery').mockReturnValue({ + data: mockAgentsData, + isLoading: false, + error: null, + refetch: vi.fn() + } as any); + }); + + it('should render modal with agents list', () => { + renderWithQueryClient(); + + expect(screen.getByText('Add Agents to Workflow')).toBeInTheDocument(); + expect(screen.getByText('Research Agent')).toBeInTheDocument(); + expect(screen.getByText('Product Agent')).toBeInTheDocument(); + expect(screen.getByText('Travel Agent')).toBeInTheDocument(); + }); + + it('should display agent descriptions', () => { + renderWithQueryClient(); + + expect(screen.getByText('Specialized in conducting research')).toBeInTheDocument(); + expect(screen.getByText('Expert in product recommendations')).toBeInTheDocument(); + }); + + it('should allow selecting agents', () => { + renderWithQueryClient(); + + const checkbox = screen.getByRole('checkbox', { name: /Research Agent/i }); + fireEvent.click(checkbox); + + expect(checkbox).toBeChecked(); + expect(screen.getByText('Add Selected (1)')).toBeInTheDocument(); + }); + + it('should allow selecting multiple agents', () => { + renderWithQueryClient(); + + const researchCheckbox = screen.getByRole('checkbox', { name: /Research Agent/i }); + const productCheckbox = screen.getByRole('checkbox', { name: /Product Agent/i }); + + fireEvent.click(researchCheckbox); + fireEvent.click(productCheckbox); + + expect(researchCheckbox).toBeChecked(); + expect(productCheckbox).toBeChecked(); + expect(screen.getByText('Add Selected (2)')).toBeInTheDocument(); + }); + + it('should call onAddAgents with selected agents when Add button is clicked', async () => { + // Mock fetchAgent to return agent details + const mockFetchAgent = vi.mocked(fetchAgent); + mockFetchAgent.mockResolvedValue({ + AgentBuilderParams: { mockAgentBuilderParam: 'value' }, + LlmParams: { mockLlmParam: 'value' } + }); + + renderWithQueryClient(); + + const checkbox = screen.getByRole('checkbox', { name: /Research Agent/i }); + fireEvent.click(checkbox); + + const addButton = screen.getByRole('button', { name: /Add Selected \(1\)/i }); + fireEvent.click(addButton); + + // Wait for the async operation to complete + await waitFor(() => { + expect(mockOnAddAgents).toHaveBeenCalledWith([ + { + useCaseId: 'agent-1', + useCaseName: 'Research Agent', + useCaseDescription: 'Specialized in conducting research', + useCaseType: 'AgentBuilder', + agentBuilderParams: { mockAgentBuilderParam: 'value' }, + llmParams: { mockLlmParam: 'value' } + } + ]); + }); + }); + + it('should disable Add button when no agents are selected', () => { + renderWithQueryClient(); + + const addButton = screen.getByRole('button', { name: /Add Selected \(0\)/i }); + expect(addButton).toBeDisabled(); + }); + + it('should call onDismiss when Cancel button is clicked', () => { + renderWithQueryClient(); + + const cancelButton = screen.getByRole('button', { name: /Cancel/i }); + fireEvent.click(cancelButton); + + expect(mockOnDismiss).toHaveBeenCalled(); + }); + }); + + describe('Search Functionality', () => { + beforeEach(() => { + vi.spyOn(QueryHooks, 'useAgentsQuery').mockReturnValue({ + data: mockAgentsData, + isLoading: false, + error: null, + refetch: vi.fn() + } as any); + }); + + it('should filter agents by name', () => { + renderWithQueryClient(); + + const searchInput = screen.getByPlaceholderText('Search by name or description...'); + fireEvent.change(searchInput, { target: { value: 'Research' } }); + + expect(screen.getByText('Research Agent')).toBeInTheDocument(); + expect(screen.queryByText('Product Agent')).not.toBeInTheDocument(); + expect(screen.queryByText('Travel Agent')).not.toBeInTheDocument(); + }); + + it('should filter agents by description', () => { + renderWithQueryClient(); + + const searchInput = screen.getByPlaceholderText('Search by name or description...'); + fireEvent.change(searchInput, { target: { value: 'product recommendations' } }); + + expect(screen.getByText('Product Agent')).toBeInTheDocument(); + expect(screen.queryByText('Research Agent')).not.toBeInTheDocument(); + }); + + it('should show "No agents found" message when search has no results', () => { + renderWithQueryClient(); + + const searchInput = screen.getByPlaceholderText('Search by name or description...'); + fireEvent.change(searchInput, { target: { value: 'nonexistent' } }); + + expect(screen.getByText('No agents found')).toBeInTheDocument(); + expect(screen.getByText('Try adjusting your search terms.')).toBeInTheDocument(); + }); + }); + + describe('Exclude Agents', () => { + beforeEach(() => { + vi.spyOn(QueryHooks, 'useAgentsQuery').mockReturnValue({ + data: mockAgentsData, + isLoading: false, + error: null, + refetch: vi.fn() + } as any); + }); + + it('should not display excluded agents', () => { + const propsWithExclusions = { + ...defaultProps, + excludeAgentIds: ['agent-1', 'agent-2'] + }; + + renderWithQueryClient(); + + expect(screen.queryByText('Research Agent')).not.toBeInTheDocument(); + expect(screen.queryByText('Product Agent')).not.toBeInTheDocument(); + expect(screen.getByText('Travel Agent')).toBeInTheDocument(); + }); + }); + + describe('Selection Limits', () => { + beforeEach(() => { + vi.spyOn(QueryHooks, 'useAgentsQuery').mockReturnValue({ + data: mockAgentsData, + isLoading: false, + error: null, + refetch: vi.fn() + } as any); + }); + + it('should show warning when selection limit is reached', () => { + const propsWithLimit = { + ...defaultProps, + maxSelectableAgents: 2 + }; + + renderWithQueryClient(); + + const researchCheckbox = screen.getByRole('checkbox', { name: /Research Agent/i }); + const productCheckbox = screen.getByRole('checkbox', { name: /Product Agent/i }); + + fireEvent.click(researchCheckbox); + fireEvent.click(productCheckbox); + + expect(screen.getByText(/You have reached the maximum number of agents/i)).toBeInTheDocument(); + }); + + it('should disable unselected checkboxes when limit is reached', () => { + const propsWithLimit = { + ...defaultProps, + maxSelectableAgents: 1 + }; + + renderWithQueryClient(); + + const researchCheckbox = screen.getByRole('checkbox', { name: /Research Agent/i }); + const productCheckbox = screen.getByRole('checkbox', { name: /Product Agent/i }); + + fireEvent.click(researchCheckbox); + + expect(researchCheckbox).not.toBeDisabled(); + expect(productCheckbox).toBeDisabled(); + }); + }); + + describe('Error State', () => { + it('should show error message when fetch fails', () => { + const mockRefetch = vi.fn(); + vi.spyOn(QueryHooks, 'useAgentsQuery').mockReturnValue({ + data: undefined, + isLoading: false, + error: new Error('Network error'), + refetch: mockRefetch + } as any); + + renderWithQueryClient(); + + expect(screen.getByText('Error loading agents')).toBeInTheDocument(); + expect(screen.getByText('Failed to load available agents. Please try again.')).toBeInTheDocument(); + }); + + it('should call refetch when Retry button is clicked', () => { + const mockRefetch = vi.fn(); + vi.spyOn(QueryHooks, 'useAgentsQuery').mockReturnValue({ + data: undefined, + isLoading: false, + error: new Error('Network error'), + refetch: mockRefetch + } as any); + + renderWithQueryClient(); + + const retryButton = screen.getByRole('button', { name: /Retry/i }); + fireEvent.click(retryButton); + + expect(mockRefetch).toHaveBeenCalled(); + }); + }); + + describe('Empty State', () => { + it('should show "No agents found" when no agents are available', () => { + vi.spyOn(QueryHooks, 'useAgentsQuery').mockReturnValue({ + data: { deployments: [], numUseCases: 0 }, + isLoading: false, + error: null, + refetch: vi.fn() + } as any); + + renderWithQueryClient(); + + expect(screen.getByText('No agents found')).toBeInTheDocument(); + expect(screen.getByText('No agents are available to add.')).toBeInTheDocument(); + }); + }); + + describe('Modal Visibility', () => { + beforeEach(() => { + vi.spyOn(QueryHooks, 'useAgentsQuery').mockReturnValue({ + data: mockAgentsData, + isLoading: false, + error: null, + refetch: vi.fn() + } as any); + }); + + it('should reset selection when modal becomes visible', () => { + const { rerender } = renderWithQueryClient(); + + rerender( + + + + ); + + const addButton = screen.getByRole('button', { name: /Add Selected \(0\)/i }); + expect(addButton).toBeDisabled(); + }); + }); + + describe('Agent without description', () => { + it('should show fallback description for agents without description', () => { + const agentsWithoutDescription: AgentListResponse = { + deployments: [ + { + UseCaseId: 'agent-no-desc', + Name: 'No Description Agent', + CreatedDate: '2024-01-15T10:00:00Z', + status: 'CREATE_COMPLETE', + UseCaseType: 'AgentBuilder' + } + ], + numUseCases: 1 + }; + + vi.spyOn(QueryHooks, 'useAgentsQuery').mockReturnValue({ + data: agentsWithoutDescription, + isLoading: false, + error: null, + refetch: vi.fn() + } as any); + + renderWithQueryClient(); + + expect(screen.getByText(/No description is available./i)).toBeInTheDocument(); + }); + }); +}); diff --git a/source/ui-deployment/src/components/__tests__/wizard/utils.test.jsx b/source/ui-deployment/src/components/__tests__/wizard/utils.test.jsx index 81d450d9..95bb2f34 100644 --- a/source/ui-deployment/src/components/__tests__/wizard/utils.test.jsx +++ b/source/ui-deployment/src/components/__tests__/wizard/utils.test.jsx @@ -8,18 +8,23 @@ import { KNOWLEDGE_BASE_TYPES } from '@/components/wizard/steps-config'; import { + createUseCaseInfoApiParams, + createVpcApiParams, createAgentApiParams, + createAgentBuilderApiParams, + createKnowledgeBaseApiParams, + createLLMParamsApiParams, + createConversationMemoryApiParams, createBedrockLlmParams, + createWorkflowApiParams +} from '../../wizard/params-builder'; +import { createDeployRequestPayload, createUpdateRequestPayload, - createUseCaseInfoApiParams, - createConversationMemoryApiParams, - createLLMParamsApiParams, - createVpcApiParams, - createKnowledgeBaseApiParams, generateKnowledgeBaseStepInfoFromDeployment, mapKendraKnowledgeBaseParams, - mapBedrockKnowledgeBaseParams + mapBedrockKnowledgeBaseParams, + mapWorkflowStepInfoFromDeployment } from '../../wizard/utils'; // eslint-disable-next-line jest/no-mocks-import import { sampleDeployUseCaseFormData } from '../__mocks__/deployment-steps-form-data'; @@ -227,7 +232,8 @@ describe('createDeployRequestPayload', () => { }, 'enableGuardrails': false, 'modelName': 'fake-model', - 'bedrockInferenceType': BEDROCK_INFERENCE_TYPES.QUICK_START_MODELS, + 'bedrockInferenceType': BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILES, + 'inferenceProfileId': 'fake-profile', 'accessibility': 'on', 'encryption': 'off', 'upgrades': 'off', @@ -238,14 +244,19 @@ describe('createDeployRequestPayload', () => { 'verbose': false, 'streaming': true }; - expect(createLLMParamsApiParams(stepInfo, sampleDeployUseCaseFormData.prompt, true)).toEqual({ + expect( + createLLMParamsApiParams(stepInfo, { + promptStepInfo: sampleDeployUseCaseFormData.prompt, + isRagEnabled: true + }) + ).toEqual({ LlmParams: { Streaming: true, Verbose: false, ModelProvider: 'Bedrock', BedrockLlmParams: { - BedrockInferenceType: 'QUICK_START', - ModelId: 'fake-model' + BedrockInferenceType: 'INFERENCE_PROFILE', + InferenceProfileId: 'fake-profile' }, PromptParams: { MaxInputTextLength: 30000, @@ -283,7 +294,12 @@ describe('createDeployRequestPayload', () => { 'verbose': false, 'streaming': true }; - expect(createLLMParamsApiParams(stepInfo, sampleDeployUseCaseFormData.prompt, true)).toEqual({ + expect( + createLLMParamsApiParams(stepInfo, { + promptStepInfo: sampleDeployUseCaseFormData.prompt, + isRagEnabled: true + }) + ).toEqual({ LlmParams: { Streaming: true, Verbose: false, @@ -315,7 +331,8 @@ describe('createDeployRequestPayload', () => { 'value': 'Bedrock' }, 'modelName': 'fake-model', - 'bedrockInferenceType': BEDROCK_INFERENCE_TYPES.QUICK_START_MODELS, + 'bedrockInferenceType': BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILES, + 'inferenceProfileId': 'fake-profile', 'enableGuardrails': true, 'guardrailVersion': 'draft', 'guardrailIdentifier': 'fake-guardrail', @@ -329,14 +346,19 @@ describe('createDeployRequestPayload', () => { 'verbose': false, 'streaming': true }; - expect(createLLMParamsApiParams(stepInfo, sampleDeployUseCaseFormData.prompt, true)).toEqual({ + expect( + createLLMParamsApiParams(stepInfo, { + promptStepInfo: sampleDeployUseCaseFormData.prompt, + isRagEnabled: true + }) + ).toEqual({ LlmParams: { Streaming: true, Verbose: false, ModelProvider: 'Bedrock', BedrockLlmParams: { - BedrockInferenceType: 'QUICK_START', - ModelId: 'fake-model', + BedrockInferenceType: 'INFERENCE_PROFILE', + InferenceProfileId: 'fake-profile', GuardrailIdentifier: 'fake-guardrail', GuardrailVersion: 'draft' }, @@ -375,7 +397,12 @@ describe('createDeployRequestPayload', () => { 'verbose': false, 'streaming': true }; - expect(createLLMParamsApiParams(stepInfo, sampleDeployUseCaseFormData.prompt, true)).toEqual({ + expect( + createLLMParamsApiParams(stepInfo, { + promptStepInfo: sampleDeployUseCaseFormData.prompt, + isRagEnabled: true + }) + ).toEqual({ LlmParams: { Streaming: true, Verbose: false, @@ -400,7 +427,12 @@ describe('createDeployRequestPayload', () => { } }); delete stepInfo.sagemakerInputSchema; - expect(createLLMParamsApiParams(stepInfo, sampleDeployUseCaseFormData.prompt, true)).toEqual({ + expect( + createLLMParamsApiParams(stepInfo, { + promptStepInfo: sampleDeployUseCaseFormData.prompt, + isRagEnabled: true + }) + ).toEqual({ LlmParams: { Streaming: true, Verbose: false, @@ -443,7 +475,8 @@ describe('createDeployRequestPayload', () => { DeployUI: true, FeedbackParams: { FeedbackEnabled: false - } + }, + ProvisionedConcurrencyValue: 0 }); }); @@ -525,7 +558,7 @@ describe('createDeployRequestPayload', () => { Verbose: false, ModelProvider: 'Bedrock', BedrockLlmParams: { - BedrockInferenceType: 'QUICK_START', + BedrockInferenceType: 'OTHER_FOUNDATION', ModelId: 'fake-model' }, ModelParams: { @@ -570,7 +603,8 @@ describe('createDeployRequestPayload', () => { ExistingRestApiId: 'a1b2c3', FeedbackParams: { FeedbackEnabled: false - } + }, + ProvisionedConcurrencyValue: 0 }); }); @@ -604,7 +638,406 @@ describe('createDeployRequestPayload', () => { ExistingRestApiId: 'a1b2c3', FeedbackParams: { FeedbackEnabled: false + }, + ProvisionedConcurrencyValue: 0 + }); + }); + + it('should create valid MCP Server deploy request payload with DeployUI forced to false', () => { + const formDataCopy = cloneDeep(sampleDeployUseCaseFormData); + formDataCopy.useCase.useCaseType = USECASE_TYPES.MCP_SERVER; + formDataCopy.useCase.deployUI = true; // Set to true to test override + formDataCopy.mcpServer = { + creationMethod: 'gateway', + targets: [ + { + targetName: 'test-target', + targetDescription: 'Test target description', + targetType: 'lambda', + lambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function' + } + ] + }; + + const payload = createDeployRequestPayload(formDataCopy, { + 'RestApiEndpoint': 'https://a1b2c3.execute-api.aws-region.amazonaws.com/prod', + 'RestApiRootResourceId': 'd4e5f5' + }); + + expect(payload).toEqual({ + UseCaseName: 'test-use-case', + UseCaseType: 'MCPServer', + UseCaseDescription: 'test use case description', + DeployUI: false, // Should be forced to false regardless of input + ExistingRestApiId: 'a1b2c3', + FeedbackParams: { + FeedbackEnabled: false + }, + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'test-target', + TargetDescription: 'Test target description', + SchemaUri: undefined, + TargetType: 'lambda', + LambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function' + } + ] + } + } + }); + }); + + it('should create valid MCP Server deploy request payload with runtime method', () => { + const formDataCopy = cloneDeep(sampleDeployUseCaseFormData); + formDataCopy.useCase.useCaseType = USECASE_TYPES.MCP_SERVER; + formDataCopy.useCase.deployUI = false; + formDataCopy.mcpServer = { + creationMethod: 'runtime', + ecrConfig: { + imageUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest' + } + }; + + const payload = createDeployRequestPayload(formDataCopy, { + 'RestApiEndpoint': 'https://a1b2c3.execute-api.aws-region.amazonaws.com/prod', + 'RestApiRootResourceId': 'd4e5f5' + }); + + expect(payload).toEqual({ + UseCaseName: 'test-use-case', + UseCaseType: 'MCPServer', + UseCaseDescription: 'test use case description', + DeployUI: false, + ExistingRestApiId: 'a1b2c3', + FeedbackParams: { + FeedbackEnabled: false + }, + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/my-mcp-server:latest' + } + } + }); + }); +}); + +describe('createWorkflowApiParams', () => { + it('should create workflow params with minimal configuration', () => { + const workflowStepInfo = { + systemPrompt: 'You are a customer support coordinator.', + orchestrationPattern: 'agents-as-tools', + memoryEnabled: false, + selectedAgents: [{ useCaseId: 'agent-123', useCaseName: 'Research Agent', useCaseType: 'agent' }] + }; + + expect(createWorkflowApiParams(workflowStepInfo)).toEqual({ + WorkflowParams: { + SystemPrompt: 'You are a customer support coordinator.', + OrchestrationPattern: 'agents-as-tools', + MemoryConfig: { + LongTermEnabled: false + }, + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-123', + UseCaseType: 'agent', + UseCaseName: 'Research Agent', + UseCaseDescription: undefined, + AgentBuilderParams: { + MemoryConfig: { + LongTermEnabled: false + }, + SystemPrompt: '' + }, + LlmParams: { + BedrockLlmParams: { + BedrockInferenceType: 'OTHER_FOUNDATION', + ModelId: '' + }, + ModelParams: {}, + ModelProvider: 'Bedrock', + RAGEnabled: undefined, + Streaming: false, + Temperature: 0, + Verbose: false + } + } + ] + } + } + }); + }); + + it('should create workflow params with multiple agents', () => { + const workflowStepInfo = { + systemPrompt: 'You are a multi-agent coordinator.', + orchestrationPattern: 'agents-as-tools', + memoryEnabled: true, + selectedAgents: [ + { useCaseId: 'agent-123', useCaseName: 'Research Agent', useCaseType: 'agent' }, + { useCaseId: 'agent-456', useCaseName: 'Product Agent', useCaseType: 'agent' }, + { useCaseId: 'workflow-789', useCaseName: 'Support Workflow', useCaseType: 'workflow' } + ] + }; + + expect(createWorkflowApiParams(workflowStepInfo)).toEqual({ + WorkflowParams: { + SystemPrompt: 'You are a multi-agent coordinator.', + OrchestrationPattern: 'agents-as-tools', + MemoryConfig: { + LongTermEnabled: true + }, + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-123', + UseCaseType: 'agent', + UseCaseName: 'Research Agent', + UseCaseDescription: undefined, + AgentBuilderParams: { + MemoryConfig: { + LongTermEnabled: false + }, + SystemPrompt: '' + }, + LlmParams: { + BedrockLlmParams: { + BedrockInferenceType: 'OTHER_FOUNDATION', + ModelId: '' + }, + ModelParams: {}, + ModelProvider: 'Bedrock', + RAGEnabled: undefined, + Streaming: false, + Temperature: 0, + Verbose: false + } + }, + { + UseCaseId: 'agent-456', + UseCaseType: 'agent', + UseCaseName: 'Product Agent', + UseCaseDescription: undefined, + AgentBuilderParams: { + MemoryConfig: { + LongTermEnabled: false + }, + SystemPrompt: '' + }, + LlmParams: { + BedrockLlmParams: { + BedrockInferenceType: 'OTHER_FOUNDATION', + ModelId: '' + }, + ModelParams: {}, + ModelProvider: 'Bedrock', + RAGEnabled: undefined, + Streaming: false, + Temperature: 0, + Verbose: false + } + }, + { + UseCaseId: 'workflow-789', + UseCaseType: 'workflow', + UseCaseName: 'Support Workflow', + UseCaseDescription: undefined, + AgentBuilderParams: { + MemoryConfig: { + LongTermEnabled: false + }, + SystemPrompt: '' + }, + LlmParams: { + BedrockLlmParams: { + BedrockInferenceType: 'OTHER_FOUNDATION', + ModelId: '' + }, + ModelParams: {}, + ModelProvider: 'Bedrock', + RAGEnabled: undefined, + Streaming: false, + Temperature: 0, + Verbose: false + } + } + ] + } + } + }); + }); + + it('should handle empty selected agents array', () => { + const workflowStepInfo = { + systemPrompt: 'You are a coordinator.', + orchestrationPattern: 'agents-as-tools', + memoryEnabled: false, + selectedAgents: [] + }; + + expect(createWorkflowApiParams(workflowStepInfo)).toEqual({ + WorkflowParams: { + SystemPrompt: 'You are a coordinator.', + OrchestrationPattern: 'agents-as-tools', + MemoryConfig: { + LongTermEnabled: false + }, + AgentsAsToolsParams: { + Agents: [] + } + } + }); + }); +}); + +describe('mapWorkflowStepInfoFromDeployment', () => { + test('should map complete deployment data correctly with new schema', () => { + const mockDeployment = { + WorkflowParams: { + SystemPrompt: 'You are a customer support coordinator.', + OrchestrationPattern: 'agents-as-tools', + MemoryConfig: { + LongTermEnabled: true + }, + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-123', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Support Agent', + UseCaseDescription: 'Handles customer support queries', + AgentBuilderParams: { SystemPrompt: 'You are a support agent.' }, + LlmParams: { ModelProvider: 'Bedrock' } + }, + { + UseCaseId: 'agent-456', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Sales Agent', + UseCaseDescription: 'Handles sales inquiries', + AgentBuilderParams: { SystemPrompt: 'You are a sales agent.' }, + LlmParams: { ModelProvider: 'Bedrock' } + } + ] + } } + }; + + const result = mapWorkflowStepInfoFromDeployment(mockDeployment); + + expect(result).toEqual({ + systemPrompt: 'You are a customer support coordinator.', + orchestrationPattern: 'agents-as-tools', + selectedAgents: [ + { + useCaseId: 'agent-123', + useCaseType: 'AgentBuilder', + useCaseName: 'Support Agent', + useCaseDescription: 'Handles customer support queries', + agentBuilderParams: { SystemPrompt: 'You are a support agent.' }, + llmParams: { ModelProvider: 'Bedrock' } + }, + { + useCaseId: 'agent-456', + useCaseType: 'AgentBuilder', + useCaseName: 'Sales Agent', + useCaseDescription: 'Handles sales inquiries', + agentBuilderParams: { SystemPrompt: 'You are a sales agent.' }, + llmParams: { ModelProvider: 'Bedrock' } + } + ], + memoryEnabled: true, + inError: false + }); + }); + + test('should handle legacy deployment with SelectedAgents', () => { + const mockDeployment = { + WorkflowParams: { + SystemPrompt: 'You are a customer support coordinator.', + OrchestrationPattern: 'agents-as-tools', + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-123', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Research Agent', + UseCaseDescription: 'Handles research queries', + AgentBuilderParams: { SystemPrompt: 'You are a research agent.' }, + LlmParams: { ModelProvider: 'Bedrock' } + }, + { + UseCaseId: 'agent-456', + UseCaseType: 'AgentBuilder', + UseCaseName: 'Support Agent', + UseCaseDescription: 'Handles support queries', + AgentBuilderParams: { SystemPrompt: 'You are a support agent.' }, + LlmParams: { ModelProvider: 'Bedrock' } + } + ] + } + } + }; + + const result = mapWorkflowStepInfoFromDeployment(mockDeployment); + + expect(result).toEqual({ + systemPrompt: 'You are a customer support coordinator.', + orchestrationPattern: 'agents-as-tools', + selectedAgents: [ + { + useCaseId: 'agent-123', + useCaseType: 'AgentBuilder', + useCaseName: 'Research Agent', + useCaseDescription: 'Handles research queries', + agentBuilderParams: { SystemPrompt: 'You are a research agent.' }, + llmParams: { ModelProvider: 'Bedrock' } + }, + { + useCaseId: 'agent-456', + useCaseType: 'AgentBuilder', + useCaseName: 'Support Agent', + useCaseDescription: 'Handles support queries', + agentBuilderParams: { SystemPrompt: 'You are a support agent.' }, + llmParams: { ModelProvider: 'Bedrock' } + } + ], + memoryEnabled: false, + inError: false + }); + }); + + test('should handle deployment without WorkflowParams', () => { + const mockDeployment = { + UseCaseConfig: { + UseCaseName: 'Test Workflow' + } + }; + + const result = mapWorkflowStepInfoFromDeployment(mockDeployment); + + expect(result).toEqual({ + systemPrompt: + "You are an assistant that routes queries to specialized agents. Analyze the user's request and select the most appropriate agent(s) to handle their query based on each agent's capabilities.", + orchestrationPattern: 'agents-as-tools', + selectedAgents: [], + memoryEnabled: false, + inError: false + }); + }); + + test('should handle null deployment', () => { + const result = mapWorkflowStepInfoFromDeployment(null); + + expect(result).toEqual({ + systemPrompt: + "You are an assistant that routes queries to specialized agents. Analyze the user's request and select the most appropriate agent(s) to handle their query based on each agent's capabilities.", + orchestrationPattern: 'agents-as-tools', + selectedAgents: [], + memoryEnabled: false, + inError: false }); }); }); @@ -612,8 +1045,8 @@ describe('createDeployRequestPayload', () => { describe('createBedrockLlmParams', () => { it('should map UI inference type to API inference type correctly', () => { const modelStepInfo = { - bedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START_MODELS, - modelName: 'amazon.titan-text-express-v1' + bedrockInferenceType: BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILES, + inferenceProfileId: 'us.amazon.titan-text-express-v1' }; const result = createBedrockLlmParams(modelStepInfo); @@ -621,8 +1054,8 @@ describe('createBedrockLlmParams', () => { expect(result).toEqual({ ModelProvider: 'Bedrock', BedrockLlmParams: { - BedrockInferenceType: 'QUICK_START', - ModelId: 'amazon.titan-text-express-v1' + BedrockInferenceType: 'INFERENCE_PROFILE', + InferenceProfileId: 'us.amazon.titan-text-express-v1' } }); }); @@ -680,8 +1113,8 @@ describe('createBedrockLlmParams', () => { it('should include guardrail parameters when enabled', () => { const modelStepInfo = { - bedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START_MODELS, - modelName: 'amazon.titan-text-express-v1', + bedrockInferenceType: BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILES, + inferenceProfileId: 'us.amazon.titan-text-express-v1', enableGuardrails: true, guardrailIdentifier: 'guardrail-123', guardrailVersion: 'DRAFT' @@ -692,8 +1125,8 @@ describe('createBedrockLlmParams', () => { expect(result).toEqual({ ModelProvider: 'Bedrock', BedrockLlmParams: { - BedrockInferenceType: 'QUICK_START', - ModelId: 'amazon.titan-text-express-v1', + BedrockInferenceType: 'INFERENCE_PROFILE', + InferenceProfileId: 'us.amazon.titan-text-express-v1', GuardrailIdentifier: 'guardrail-123', GuardrailVersion: 'DRAFT' } @@ -702,8 +1135,8 @@ describe('createBedrockLlmParams', () => { it('should set guardrail parameters to null when disabled in edit mode', () => { const modelStepInfo = { - bedrockInferenceType: BEDROCK_INFERENCE_TYPES.QUICK_START_MODELS, - modelName: 'amazon.titan-text-express-v1', + bedrockInferenceType: BEDROCK_INFERENCE_TYPES.INFERENCE_PROFILES, + inferenceProfileId: 'us.amazon.titan-text-express-v1', enableGuardrails: false }; @@ -712,15 +1145,15 @@ describe('createBedrockLlmParams', () => { expect(result).toEqual({ ModelProvider: 'Bedrock', BedrockLlmParams: { - BedrockInferenceType: 'QUICK_START', - ModelId: 'amazon.titan-text-express-v1', + BedrockInferenceType: 'INFERENCE_PROFILE', + InferenceProfileId: 'us.amazon.titan-text-express-v1', GuardrailIdentifier: null, GuardrailVersion: null } }); }); - it('should default to QUICK_START if inference type mapping not found', () => { + it('should default to OTHER_FOUNDATION if inference type mapping not found', () => { const modelStepInfo = { bedrockInferenceType: 'UNKNOWN_TYPE', modelName: 'amazon.titan-text-express-v1' @@ -731,7 +1164,7 @@ describe('createBedrockLlmParams', () => { expect(result).toEqual({ ModelProvider: 'Bedrock', BedrockLlmParams: { - BedrockInferenceType: 'QUICK_START', + BedrockInferenceType: 'OTHER_FOUNDATION', ModelId: 'amazon.titan-text-express-v1' } }); @@ -767,7 +1200,7 @@ describe('createUpdateRequestPayload', () => { Verbose: false, ModelProvider: 'Bedrock', BedrockLlmParams: { - BedrockInferenceType: 'QUICK_START', + BedrockInferenceType: 'OTHER_FOUNDATION', GuardrailIdentifier: 'fake-guardrail', GuardrailVersion: 'DRAFT' }, @@ -806,10 +1239,11 @@ describe('createUpdateRequestPayload', () => { UseCaseType: 'Text', UseCaseDescription: 'test use case description', DeployUI: true, - ExistingRestApiId: "a1b2c3", + ExistingRestApiId: 'a1b2c3', FeedbackParams: { FeedbackEnabled: false - } + }, + ProvisionedConcurrencyValue: 0 }); }); @@ -837,7 +1271,7 @@ describe('createUpdateRequestPayload', () => { Verbose: false, ModelProvider: 'Bedrock', BedrockLlmParams: { - BedrockInferenceType: 'QUICK_START', + BedrockInferenceType: 'OTHER_FOUNDATION', GuardrailIdentifier: null, GuardrailVersion: null }, @@ -876,10 +1310,11 @@ describe('createUpdateRequestPayload', () => { UseCaseType: 'Text', UseCaseDescription: 'test use case description', DeployUI: true, - ExistingRestApiId: "a1b2c3", + ExistingRestApiId: 'a1b2c3', FeedbackParams: { FeedbackEnabled: false - } + }, + ProvisionedConcurrencyValue: 0 }); }); @@ -906,10 +1341,97 @@ describe('createUpdateRequestPayload', () => { FeedbackParams: { FeedbackEnabled: false }, + ProvisionedConcurrencyValue: 0, UseCaseType: 'Agent', UseCaseDescription: 'test use case description', DeployUI: true, - ExistingRestApiId: "a1b2c3", + ExistingRestApiId: 'a1b2c3' + }); + }); + + it('should create valid update request payload for MCP Server use case with DeployUI forced to false', () => { + const formDataCopy = cloneDeep(sampleDeployUseCaseFormData); + formDataCopy.useCase.useCaseType = USECASE_TYPES.MCP_SERVER; + formDataCopy.useCase.deployUI = true; // Set to true to test override + formDataCopy.mcpServer = { + creationMethod: 'gateway', + targets: [ + { + targetName: 'test-target-update', + targetDescription: 'Updated test target description', + targetType: 'openApiSchema', + uploadedSchemaKey: 'mcp/schemas/openApiSchema/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json', + outboundAuth: { + authType: 'API_KEY', + providerArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-target-update-auth-bearer' + } + } + ] + }; + + const payload = createUpdateRequestPayload(formDataCopy, { + 'RestApiEndpoint': 'https://a1b2c3.execute-api.aws-region.amazonaws.com/prod', + 'RestApiRootResourceId': 'd4e5f5' + }); + + expect(payload).toEqual({ + UseCaseType: 'MCPServer', + UseCaseDescription: 'test use case description', + DeployUI: false, // Should be forced to false regardless of input + ExistingRestApiId: 'a1b2c3', + FeedbackParams: { + FeedbackEnabled: false + }, + MCPParams: { + GatewayParams: { + TargetParams: [ + { + TargetName: 'test-target-update', + TargetDescription: 'Updated test target description', + TargetType: 'openApiSchema', + SchemaUri: 'mcp/schemas/openApiSchema/e9b1801d-2516-40fe-859e-a0c7d81da2f3.json', + OutboundAuthParams: { + OutboundAuthProviderArn: + 'arn:aws:bedrock-agentcore:us-east-1:123456789012:token-vault/test-vault/apikeycredentialprovider/test-target-update-auth-bearer', + OutboundAuthProviderType: 'API_KEY' + } + } + ] + } + } + }); + }); + + it('should create valid update request payload for MCP Server use case with runtime method', () => { + const formDataCopy = cloneDeep(sampleDeployUseCaseFormData); + formDataCopy.useCase.useCaseType = USECASE_TYPES.MCP_SERVER; + formDataCopy.useCase.deployUI = false; + formDataCopy.mcpServer = { + creationMethod: 'runtime', + ecrConfig: { + imageUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/updated-mcp-server:v2' + } + }; + + const payload = createUpdateRequestPayload(formDataCopy, { + 'RestApiEndpoint': 'https://a1b2c3.execute-api.aws-region.amazonaws.com/prod', + 'RestApiRootResourceId': 'd4e5f5' + }); + + expect(payload).toEqual({ + UseCaseType: 'MCPServer', + UseCaseDescription: 'test use case description', + DeployUI: false, + ExistingRestApiId: 'a1b2c3', + FeedbackParams: { + FeedbackEnabled: false + }, + MCPParams: { + RuntimeParams: { + EcrUri: '123456789012.dkr.ecr.us-east-1.amazonaws.com/updated-mcp-server:v2' + } + } }); }); }); @@ -1097,4 +1619,594 @@ describe('When transforming the deployment data into the knowledgebase step of t expect(result.scoreThreshold).toBe(0.0); }); }); + + describe('createAgentBuilderApiParams', () => { + it('should create valid agent builder params with system prompt only', () => { + const agentBuilderStepInfo = { + systemPrompt: 'You are a helpful assistant.' + }; + + expect(createAgentBuilderApiParams(agentBuilderStepInfo)).toEqual({ + AgentParams: { + SystemPrompt: 'You are a helpful assistant.', + MemoryConfig: { + LongTermEnabled: undefined + } + } + }); + }); + + it('should create valid agent builder params with MCP servers', () => { + const agentBuilderStepInfo = { + systemPrompt: 'You are a helpful assistant.', + mcpServers: [ + { useCaseId: 'mcp-server-1', url: 'https://example.com/mcp1', type: 'gateway' }, + { useCaseId: 'mcp-server-2', url: 'https://example.com/mcp2', type: 'runtime' } + ] + }; + + expect(createAgentBuilderApiParams(agentBuilderStepInfo)).toEqual({ + AgentParams: { + SystemPrompt: 'You are a helpful assistant.', + MemoryConfig: { + LongTermEnabled: undefined + }, + MCPServers: [ + { UseCaseId: 'mcp-server-1', Url: 'https://example.com/mcp1', Type: 'gateway' }, + { UseCaseId: 'mcp-server-2', Url: 'https://example.com/mcp2', Type: 'runtime' } + ] + } + }); + }); + + it('should create valid agent builder params with memory configuration', () => { + const agentBuilderStepInfo = { + systemPrompt: 'You are a helpful assistant.', + memoryEnabled: true + }; + + expect(createAgentBuilderApiParams(agentBuilderStepInfo)).toEqual({ + AgentParams: { + SystemPrompt: 'You are a helpful assistant.', + MemoryConfig: { + LongTermEnabled: true + } + } + }); + }); + + it('should create complete agent builder params', () => { + const agentBuilderStepInfo = { + systemPrompt: 'You are a comprehensive assistant.', + mcpServers: [{ useCaseId: 'mcp-server-1', url: 'https://example.com/mcp1', type: 'gateway' }], + memoryEnabled: true + }; + + expect(createAgentBuilderApiParams(agentBuilderStepInfo)).toEqual({ + AgentParams: { + SystemPrompt: 'You are a comprehensive assistant.', + MemoryConfig: { + LongTermEnabled: true + }, + MCPServers: [{ UseCaseId: 'mcp-server-1', Url: 'https://example.com/mcp1', Type: 'gateway' }] + } + }); + }); + + it('should handle empty MCP servers array', () => { + const agentBuilderStepInfo = { + systemPrompt: 'You are a helpful assistant.', + mcpServers: [] + }; + + expect(createAgentBuilderApiParams(agentBuilderStepInfo)).toEqual({ + AgentParams: { + SystemPrompt: 'You are a helpful assistant.', + MemoryConfig: { + LongTermEnabled: undefined + } + } + }); + }); + + it('should handle memory disabled', () => { + const agentBuilderStepInfo = { + systemPrompt: 'You are a helpful assistant.', + memoryEnabled: false + }; + + expect(createAgentBuilderApiParams(agentBuilderStepInfo)).toEqual({ + AgentParams: { + SystemPrompt: 'You are a helpful assistant.', + MemoryConfig: { + LongTermEnabled: false + } + } + }); + }); + + it('should handle undefined memory enabled', () => { + const agentBuilderStepInfo = { + systemPrompt: 'You are a helpful assistant.' + }; + + expect(createAgentBuilderApiParams(agentBuilderStepInfo)).toEqual({ + AgentParams: { + SystemPrompt: 'You are a helpful assistant.', + MemoryConfig: { + LongTermEnabled: undefined + } + } + }); + }); + + it('should handle memory enabled without strategy', () => { + const agentBuilderStepInfo = { + systemPrompt: 'You are a helpful assistant.', + memoryEnabled: true + }; + + expect(createAgentBuilderApiParams(agentBuilderStepInfo)).toEqual({ + AgentParams: { + SystemPrompt: 'You are a helpful assistant.', + MemoryConfig: { + LongTermEnabled: true + } + } + }); + }); + }); +}); + +describe('Workflow API Integration Tests', () => { + it('should create valid workflow deploy request payload', () => { + const formDataCopy = cloneDeep(sampleDeployUseCaseFormData); + formDataCopy.useCase.useCaseType = USECASE_TYPES.WORKFLOW; + formDataCopy.model.MultimodalParams = { MultimodalEnabled: false }; + // Ensure model name is properly set for workflow tests + formDataCopy.model.modelName = 'fake-model'; + formDataCopy.workflow = { + systemPrompt: 'You are a customer support coordinator that routes inquiries to specialized agents.', + orchestrationPattern: 'agents-as-tools', + memoryEnabled: false, + selectedAgents: [ + { useCaseId: 'agent-123', useCaseName: 'Research Agent', useCaseType: 'agent' }, + { useCaseId: 'agent-456', useCaseName: 'Product Agent', useCaseType: 'agent' } + ] + }; + + const payload = createDeployRequestPayload(formDataCopy, { + 'RestApiEndpoint': 'https://a1b2c3.execute-api.aws-region.amazonaws.com/prod', + 'RestApiRootResourceId': 'd4e5f5' + }); + + expect(payload).toEqual({ + UseCaseName: 'test-use-case', + UseCaseType: 'Workflow', + UseCaseDescription: 'test use case description', + DeployUI: true, + ExistingRestApiId: 'a1b2c3', + FeedbackParams: { + FeedbackEnabled: false + }, + LlmParams: { + Streaming: true, + Verbose: false, + ModelProvider: 'Bedrock', + BedrockLlmParams: { + BedrockInferenceType: 'OTHER_FOUNDATION', + ModelId: 'fake-model' + }, + ModelParams: { + 'fake-param': { + Value: '1', + Type: 'integer' + }, + 'fake-param2': { + Value: '0.9', + Type: 'float' + } + }, + Temperature: 0.1, + RAGEnabled: false, + MultimodalParams: { + MultimodalEnabled: false + } + }, + WorkflowParams: { + SystemPrompt: 'You are a customer support coordinator that routes inquiries to specialized agents.', + OrchestrationPattern: 'agents-as-tools', + MemoryConfig: { + LongTermEnabled: false + }, + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-123', + UseCaseType: 'agent', + UseCaseName: 'Research Agent', + UseCaseDescription: undefined, + AgentBuilderParams: { + MemoryConfig: { + LongTermEnabled: false + }, + SystemPrompt: '' + }, + LlmParams: { + BedrockLlmParams: { + BedrockInferenceType: 'OTHER_FOUNDATION', + ModelId: '' + }, + ModelParams: {}, + ModelProvider: 'Bedrock', + RAGEnabled: undefined, + Streaming: false, + Temperature: 0, + Verbose: false + } + }, + { + UseCaseId: 'agent-456', + UseCaseType: 'agent', + UseCaseName: 'Product Agent', + UseCaseDescription: undefined, + AgentBuilderParams: { + MemoryConfig: { + LongTermEnabled: false + }, + SystemPrompt: '' + }, + LlmParams: { + BedrockLlmParams: { + BedrockInferenceType: 'OTHER_FOUNDATION', + ModelId: '' + }, + ModelParams: {}, + ModelProvider: 'Bedrock', + RAGEnabled: undefined, + Streaming: false, + Temperature: 0, + Verbose: false + } + } + ] + } + } + }); + }); + + it('should create valid workflow update request payload', () => { + const formDataCopy = cloneDeep(sampleDeployUseCaseFormData); + formDataCopy.useCase.useCaseType = USECASE_TYPES.WORKFLOW; + formDataCopy.model.MultimodalParams = { MultimodalEnabled: false }; + // Ensure model name is properly set for workflow tests + formDataCopy.model.modelName = 'fake-model'; + formDataCopy.workflow = { + systemPrompt: 'Updated workflow coordinator prompt.', + orchestrationPattern: 'agents-as-tools', + memoryEnabled: true, + selectedAgents: [{ useCaseId: 'agent-789', useCaseName: 'Updated Agent', useCaseType: 'agent' }] + }; + + const payload = createUpdateRequestPayload(formDataCopy, { + 'RestApiEndpoint': 'https://a1b2c3.execute-api.aws-region.amazonaws.com/prod', + 'RestApiRootResourceId': 'd4e5f5' + }); + + expect(payload).toEqual({ + UseCaseType: 'Workflow', + UseCaseDescription: 'test use case description', + DeployUI: true, + ExistingRestApiId: 'a1b2c3', + FeedbackParams: { + FeedbackEnabled: false + }, + LlmParams: { + Streaming: true, + Verbose: false, + ModelProvider: 'Bedrock', + BedrockLlmParams: { + BedrockInferenceType: 'OTHER_FOUNDATION', + GuardrailIdentifier: null, + GuardrailVersion: null, + ModelId: 'fake-model' + }, + ModelParams: { + 'fake-param': { + Value: '1', + Type: 'integer' + }, + 'fake-param2': { + Value: '0.9', + Type: 'float' + } + }, + Temperature: 0.1, + RAGEnabled: undefined, + MultimodalParams: { + MultimodalEnabled: false + } + }, + WorkflowParams: { + SystemPrompt: 'Updated workflow coordinator prompt.', + OrchestrationPattern: 'agents-as-tools', + MemoryConfig: { + LongTermEnabled: true + }, + AgentsAsToolsParams: { + Agents: [ + { + UseCaseId: 'agent-789', + UseCaseType: 'agent', + UseCaseName: 'Updated Agent', + UseCaseDescription: undefined, + AgentBuilderParams: { + MemoryConfig: { + LongTermEnabled: false + } + }, + LlmParams: { + BedrockLlmParams: { + BedrockInferenceType: 'OTHER_FOUNDATION' + }, + ModelParams: {}, + ModelProvider: 'Bedrock', + RAGEnabled: undefined, + Streaming: false, + Temperature: 0, + Verbose: false + } + } + ] + } + } + }); + }); + + it('should create workflow payload with maximum agents', () => { + const selectedAgents = Array.from({ length: 10 }, (_, i) => ({ + useCaseId: `agent-${i + 1}`, + useCaseName: `Agent ${i + 1}`, + useCaseType: 'agent' + })); + + const formDataCopy = cloneDeep(sampleDeployUseCaseFormData); + formDataCopy.useCase.useCaseType = USECASE_TYPES.WORKFLOW; + formDataCopy.workflow = { + systemPrompt: 'Multi-agent coordinator.', + orchestrationPattern: 'agents-as-tools', + selectedAgents + }; + + const payload = createDeployRequestPayload(formDataCopy, {}); + + expect(payload.WorkflowParams.AgentsAsToolsParams.Agents).toHaveLength(10); + expect(payload.WorkflowParams.AgentsAsToolsParams.Agents[0]).toEqual({ + UseCaseId: 'agent-1', + UseCaseType: 'agent', + UseCaseName: 'Agent 1', + UseCaseDescription: undefined, + AgentBuilderParams: { + MemoryConfig: { + LongTermEnabled: false + }, + SystemPrompt: '' + }, + LlmParams: { + BedrockLlmParams: { + BedrockInferenceType: 'OTHER_FOUNDATION', + ModelId: '' + }, + ModelParams: {}, + ModelProvider: 'Bedrock', + RAGEnabled: undefined, + Streaming: false, + Temperature: 0, + Verbose: false + } + }); + expect(payload.WorkflowParams.AgentsAsToolsParams.Agents[9]).toEqual({ + UseCaseId: 'agent-10', + UseCaseType: 'agent', + UseCaseName: 'Agent 10', + UseCaseDescription: undefined, + AgentBuilderParams: { + MemoryConfig: { + LongTermEnabled: false + }, + SystemPrompt: '' + }, + LlmParams: { + BedrockLlmParams: { + BedrockInferenceType: 'OTHER_FOUNDATION', + ModelId: '' + }, + ModelParams: {}, + ModelProvider: 'Bedrock', + RAGEnabled: undefined, + Streaming: false, + Temperature: 0, + Verbose: false + } + }); + }); + + it('should create workflow payload with mixed agent and workflow types', () => { + const formDataCopy = cloneDeep(sampleDeployUseCaseFormData); + formDataCopy.useCase.useCaseType = USECASE_TYPES.WORKFLOW; + formDataCopy.workflow = { + systemPrompt: 'Mixed coordinator.', + orchestrationPattern: 'agents-as-tools', + selectedAgents: [ + { useCaseId: 'agent-123', useCaseName: 'Research Agent', useCaseType: 'agent' }, + { useCaseId: 'workflow-456', useCaseName: 'Analysis Workflow', useCaseType: 'workflow' }, + { useCaseId: 'agent-789', useCaseName: 'Report Agent', useCaseType: 'agent' } + ] + }; + + const payload = createDeployRequestPayload(formDataCopy, {}); + + expect(payload.WorkflowParams.AgentsAsToolsParams.Agents).toEqual([ + { + UseCaseId: 'agent-123', + UseCaseType: 'agent', + UseCaseName: 'Research Agent', + UseCaseDescription: undefined, + AgentBuilderParams: { + MemoryConfig: { + LongTermEnabled: false + }, + SystemPrompt: '' + }, + LlmParams: { + BedrockLlmParams: { + BedrockInferenceType: 'OTHER_FOUNDATION', + ModelId: '' + }, + ModelParams: {}, + ModelProvider: 'Bedrock', + RAGEnabled: undefined, + Streaming: false, + Temperature: 0, + Verbose: false + } + }, + { + UseCaseId: 'workflow-456', + UseCaseType: 'workflow', + UseCaseName: 'Analysis Workflow', + UseCaseDescription: undefined, + AgentBuilderParams: { + MemoryConfig: { + LongTermEnabled: false + }, + SystemPrompt: '' + }, + LlmParams: { + BedrockLlmParams: { + BedrockInferenceType: 'OTHER_FOUNDATION', + ModelId: '' + }, + ModelParams: {}, + ModelProvider: 'Bedrock', + RAGEnabled: undefined, + Streaming: false, + Temperature: 0, + Verbose: false + } + }, + { + UseCaseId: 'agent-789', + UseCaseType: 'agent', + UseCaseName: 'Report Agent', + UseCaseDescription: undefined, + AgentBuilderParams: { + MemoryConfig: { + LongTermEnabled: false + }, + SystemPrompt: '' + }, + LlmParams: { + BedrockLlmParams: { + BedrockInferenceType: 'OTHER_FOUNDATION', + ModelId: '' + }, + ModelParams: {}, + ModelProvider: 'Bedrock', + RAGEnabled: undefined, + Streaming: false, + Temperature: 0, + Verbose: false + } + } + ]); + }); + + it('should handle workflow payload with empty selected agents', () => { + const formDataCopy = cloneDeep(sampleDeployUseCaseFormData); + formDataCopy.useCase.useCaseType = USECASE_TYPES.WORKFLOW; + formDataCopy.workflow = { + systemPrompt: 'Empty coordinator.', + orchestrationPattern: 'agents-as-tools', + selectedAgents: [] + }; + + const payload = createDeployRequestPayload(formDataCopy, {}); + + expect(payload.WorkflowParams.AgentsAsToolsParams.Agents).toEqual([]); + }); + + it('should handle workflow payload with long system prompt', () => { + const longPrompt = + 'You are a comprehensive customer support coordinator that analyzes customer inquiries, determines the appropriate specialized agents to handle each request, and orchestrates their collaboration to provide complete solutions. '.repeat( + 10 + ); + + const formDataCopy = cloneDeep(sampleDeployUseCaseFormData); + formDataCopy.useCase.useCaseType = USECASE_TYPES.WORKFLOW; + formDataCopy.workflow = { + systemPrompt: longPrompt, + orchestrationPattern: 'agents-as-tools', + selectedAgents: [{ useCaseId: 'agent-123', useCaseName: 'Support Agent', useCaseType: 'agent' }] + }; + + const payload = createDeployRequestPayload(formDataCopy, {}); + + expect(payload.WorkflowParams.SystemPrompt).toBe(longPrompt); + expect(payload.WorkflowParams.SystemPrompt.length).toBeGreaterThan(1000); + }); + + it('should handle MCP Server payload when model step is undefined', () => { + const formDataCopy = cloneDeep(sampleDeployUseCaseFormData); + formDataCopy.useCase.useCaseType = USECASE_TYPES.MCP_SERVER; + formDataCopy.mcpServer = { + creationMethod: 'gateway', + targets: [ + { + targetName: 'test-target', + targetDescription: 'Test target description', + targetType: 'lambda', + lambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function' + } + ] + }; + + // Remove model step to simulate MCP Server use case where model step is not present + delete formDataCopy.model; + + // This should not throw an error when accessing stepsInfo.model.multimodalEnabled + expect(() => { + const payload = createDeployRequestPayload(formDataCopy, {}); + expect(payload).toBeDefined(); + expect(payload.UseCaseType).toBe('MCPServer'); + expect(payload.DeployUI).toBe(false); + }).not.toThrow(); + }); + + it('should handle update request payload when model step is undefined', () => { + const formDataCopy = cloneDeep(sampleDeployUseCaseFormData); + formDataCopy.useCase.useCaseType = USECASE_TYPES.MCP_SERVER; + formDataCopy.mcpServer = { + creationMethod: 'gateway', + targets: [ + { + targetName: 'test-target', + targetDescription: 'Test target description', + targetType: 'lambda', + lambdaArn: 'arn:aws:lambda:us-east-1:123456789012:function:test-function' + } + ] + }; + + // Remove model step to simulate MCP Server use case where model step is not present + delete formDataCopy.model; + + // This should not throw an error when accessing stepsInfo.model.multimodalEnabled + expect(() => { + const payload = createUpdateRequestPayload(formDataCopy, {}); + expect(payload).toBeDefined(); + expect(payload.UseCaseType).toBe('MCPServer'); + expect(payload.DeployUI).toBe(false); + }).not.toThrow(); + }); }); diff --git a/source/ui-deployment/src/components/common/Notifications.tsx b/source/ui-deployment/src/components/common/Notifications.tsx new file mode 100644 index 00000000..7a20b8c9 --- /dev/null +++ b/source/ui-deployment/src/components/common/Notifications.tsx @@ -0,0 +1,46 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import React from 'react'; +import { Flashbar, FlashbarProps, Button } from '@cloudscape-design/components'; +import { NotificationItem } from '../../hooks/useNotifications'; + +export interface NotificationsProps { + notifications: NotificationItem[]; + onDismiss: (id: string) => void; + stackItems?: boolean; + maxItems?: number; +} + +export const Notifications: React.FC = ({ + notifications, + onDismiss, + stackItems = false, + maxItems +}) => { + const displayNotifications = maxItems ? notifications.slice(-maxItems) : notifications; + + const flashbarItems: FlashbarProps.MessageDefinition[] = displayNotifications.map((notification) => ({ + type: notification.type, + header: notification.header, + content: notification.content, + dismissible: notification.dismissible ?? true, + dismissLabel: 'Dismiss notification', + onDismiss: () => onDismiss(notification.id), + loading: notification.loading, + action: notification.action ? ( + + ) : undefined, + id: notification.id + })); + + if (flashbarItems.length === 0) { + return null; + } + + return ; +}; + +export default Notifications; diff --git a/source/ui-deployment/src/components/common/index.ts b/source/ui-deployment/src/components/common/index.ts new file mode 100644 index 00000000..6695be64 --- /dev/null +++ b/source/ui-deployment/src/components/common/index.ts @@ -0,0 +1,27 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export { useNotifications } from '../../hooks/useNotifications'; +export type { NotificationItem, NotificationOptions } from '../../hooks/useNotifications'; + +export { Notifications } from './Notifications'; +export type { NotificationsProps } from './Notifications'; + +export { + createApiErrorNotification, + createValidationErrorNotification, + createSuccessWithActionNotification, + createProgressNotification, + createNetworkErrorNotification, + createPermissionErrorNotification, + createTimeoutNotification, + createBulkOperationNotification +} from '../../utils/notificationHelpers'; + +import { useNotifications } from '../../hooks/useNotifications'; +import { Notifications } from './Notifications'; + +export default { + useNotifications, + Notifications +}; diff --git a/source/ui-deployment/src/components/commons/__tests__/notifications.test.tsx b/source/ui-deployment/src/components/commons/__tests__/notifications.test.tsx new file mode 100644 index 00000000..6d7933df --- /dev/null +++ b/source/ui-deployment/src/components/commons/__tests__/notifications.test.tsx @@ -0,0 +1,142 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import React from 'react'; +import { render, screen } from '@testing-library/react'; +import { describe, test, expect, vi } from 'vitest'; +import { Notifications } from '../notifications'; +import { DEPLOYMENT_STATUS_NOTIFICATION } from '@/utils/constants'; + +describe('Notifications', () => { + const mockOnSuccessButtonAction = vi.fn(); + + afterEach(() => { + vi.clearAllMocks(); + }); + + test('renders success notification', () => { + render( + + ); + + expect(screen.getByText('Use case deployment request submitted successfully. You can view the deployment status in the deployment dashboard.')).toBeInTheDocument(); + }); + + test('renders failure notification', () => { + render( + + ); + + expect(screen.getByText('Failed to deploy use case. Please contact your administrator for support.')).toBeInTheDocument(); + }); + + test('renders pending notification', () => { + render( + + ); + + expect(screen.getByText('Deployment request is in progress..')).toBeInTheDocument(); + }); + + test('renders schema upload pending notification for single file', () => { + render( + + ); + + expect(screen.getByText('MCP schema file is being uploaded...')).toBeInTheDocument(); + expect(screen.getByText('Uploading Schema File')).toBeInTheDocument(); + }); + + test('renders schema upload pending notification for multiple files', () => { + render( + + ); + + expect(screen.getByText('MCP schema files are being uploaded...')).toBeInTheDocument(); + expect(screen.getByText('Uploading Schema Files')).toBeInTheDocument(); + }); + + test('renders schema upload failure notification for single file', () => { + render( + + ); + + expect(screen.getByText('MCP schema file upload failed. Please check your file and try again.')).toBeInTheDocument(); + expect(screen.getByText('Schema Upload Failed')).toBeInTheDocument(); + }); + + test('renders schema upload failure notification for multiple files', () => { + render( + + ); + + expect(screen.getByText('MCP schema files upload failed. Please check your files and try again.')).toBeInTheDocument(); + expect(screen.getByText('Schema Upload Failed')).toBeInTheDocument(); + }); + + test('renders custom schema upload error message', () => { + const customErrorMessage = 'MCP schema file upload failed for: test-file.json'; + + render( + + ); + + expect(screen.getByText(customErrorMessage)).toBeInTheDocument(); + }); + + test('handles zero file count gracefully', () => { + render( + + ); + + // Should default to plural form for 0 files + expect(screen.getByText('MCP schema files are being uploaded...')).toBeInTheDocument(); + }); + + test('does not render notification when status is empty', () => { + const { container } = render( + + ); + + // Should render empty flashbar + expect(container.firstChild).toBeInTheDocument(); + expect(screen.queryByText('Use case deployed successfully')).not.toBeInTheDocument(); + }); +}); \ No newline at end of file diff --git a/source/ui-deployment/src/components/commons/deploy-confirmation-modal.jsx b/source/ui-deployment/src/components/commons/deploy-confirmation-modal.jsx index 968dca8f..bf6276fc 100644 --- a/source/ui-deployment/src/components/commons/deploy-confirmation-modal.jsx +++ b/source/ui-deployment/src/components/commons/deploy-confirmation-modal.jsx @@ -7,7 +7,14 @@ import { useContext } from 'react'; import { INTERNAL_USER_GENAI_POLICY_URL, LEGAL_DISCLAIMER, deploymentActionText } from '../../utils/constants'; -export function ConfirmDeployModal({ visible, onDiscard, onConfirm, deploymentAction, isThirdPartyProvider }) { +export function ConfirmDeployModal({ + visible, + onDiscard, + onConfirm, + deploymentAction, + isThirdPartyProvider, + modelData +}) { const { state: { runtimeConfig } } = useContext(HomeContext); diff --git a/source/ui-deployment/src/components/commons/full-page-header.tsx b/source/ui-deployment/src/components/commons/full-page-header.tsx index 42a3ca1d..9e935723 100644 --- a/source/ui-deployment/src/components/commons/full-page-header.tsx +++ b/source/ui-deployment/src/components/commons/full-page-header.tsx @@ -5,7 +5,8 @@ import { Button, Header, HeaderProps, SpaceBetween } from '@cloudscape-design/co import React, { useContext, useEffect } from 'react'; import { useNavigate } from 'react-router-dom'; import HomeContext from '../../contexts/home.context'; -import { CFN_STACK_STATUS_INDICATOR, DEPLOYMENT_ACTIONS, USECASE_TYPE_ROUTE } from '../../utils/constants'; +import { CFN_STACK_STATUS_INDICATOR, DEPLOYMENT_ACTIONS } from '../../utils/constants'; +import { getUseCaseRoute } from '../../utils/utils'; import { statusIndicatorTypeSelector } from '../dashboard/deployments'; import { InfoLink } from './info-link'; @@ -38,12 +39,10 @@ export function FullPageHeader({ dispatch: homeDispatch } = useContext(HomeContext); - type UseCaseType = keyof typeof USECASE_TYPE_ROUTE; - const navigateWizardDestination = - USECASE_TYPE_ROUTE[selectedDeployment.UseCaseType?.toUpperCase() as UseCaseType] ?? USECASE_TYPE_ROUTE.TEXT; + const navigateWizardDestination = getUseCaseRoute(selectedDeployment.UseCaseType); function handleOnDeploymentIdClick() { - navigate(`/deployment-details/${selectedDeployment.UseCaseId}`); + navigate(`/deployment-details/${selectedDeployment.UseCaseType}/${selectedDeployment.UseCaseId}`); } function handleEditDeploymentClick() { diff --git a/source/ui-deployment/src/components/commons/notifications.tsx b/source/ui-deployment/src/components/commons/notifications.tsx index e3fd6521..be3094de 100644 --- a/source/ui-deployment/src/components/commons/notifications.tsx +++ b/source/ui-deployment/src/components/commons/notifications.tsx @@ -1,13 +1,22 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -import { useId, useState } from 'react'; +import { useId, useState, useEffect } from 'react'; import Flashbar, { FlashbarProps } from '@cloudscape-design/components/flashbar'; import { Button } from '@cloudscape-design/components'; import { DEPLOYMENT_STATUS_NOTIFICATION } from '../../utils/constants'; -function useNotifications(onSuccessButtonAction: any, status = '') { +function useNotifications(onSuccessButtonAction: any, status = '', schemaUploadErrorMessage = '', fileCount = 0) { const [isDismissed, setDismissed] = useState(false); + const [lastStatus, setLastStatus] = useState(status); + + // Reset dismissed state when status changes + useEffect(() => { + if (status !== lastStatus) { + setDismissed(false); + setLastStatus(status); + } + }, [status, lastStatus]); const notifications: Array = []; const notificationId = useId(); @@ -45,6 +54,33 @@ function useNotifications(onSuccessButtonAction: any, status = '') { dismissible: false, id: notificationId }); + } else if (status === DEPLOYMENT_STATUS_NOTIFICATION.SCHEMA_UPLOAD_PENDING && !isDismissed) { + const isPlural = fileCount !== 1; + const fileText = isPlural ? 'files' : 'file'; + notifications.push({ + type: 'info', + loading: true, + content: `MCP schema ${fileText} ${isPlural ? 'are' : 'is'} being uploaded...`, + header: `Uploading Schema ${isPlural ? 'Files' : 'File'}`, + statusIconAriaLabel: 'info', + dismissLabel: 'Dismiss message', + dismissible: false, + id: notificationId + }); + } else if (status === DEPLOYMENT_STATUS_NOTIFICATION.SCHEMA_UPLOAD_FAILURE && !isDismissed) { + const errorContent = + schemaUploadErrorMessage || + `MCP schema ${fileCount === 1 ? 'file' : 'files'} upload failed. Please check your ${fileCount === 1 ? 'file' : 'files'} and try again.`; + notifications.push({ + type: 'error', + content: errorContent, + header: 'Schema Upload Failed', + statusIconAriaLabel: 'error', + dismissLabel: 'Dismiss message', + dismissible: true, + onDismiss: () => setDismissed(true), + id: notificationId + }); } return notifications; @@ -53,9 +89,16 @@ function useNotifications(onSuccessButtonAction: any, status = '') { export interface NotificationsProps { status: string; onSuccessButtonAction: any; + schemaUploadErrorMessage?: string; + fileCount?: number; } -export function Notifications({ status, onSuccessButtonAction }: NotificationsProps) { - const notifications = useNotifications(onSuccessButtonAction, status); +export function Notifications({ + status, + onSuccessButtonAction, + schemaUploadErrorMessage, + fileCount = 0 +}: NotificationsProps) { + const notifications = useNotifications(onSuccessButtonAction, status, schemaUploadErrorMessage, fileCount); return ; } diff --git a/source/ui-deployment/src/components/dashboard/DashboardView.jsx b/source/ui-deployment/src/components/dashboard/DashboardView.jsx index 979b6ea7..fb3b0ba5 100644 --- a/source/ui-deployment/src/components/dashboard/DashboardView.jsx +++ b/source/ui-deployment/src/components/dashboard/DashboardView.jsx @@ -236,7 +236,8 @@ export default function DashboardView() { const [toolsOpen, setToolsOpen] = useState(false); const handleOnDeploymentIdClick = (deploymentItem) => { - navigate(`/deployment-details/${deploymentItem.UseCaseId}`); + const useCaseType = deploymentItem.UseCaseType ?? USECASE_TYPES.TEXT; + navigate(`/deployment-details/${useCaseType}/${deploymentItem.UseCaseId}`); }; const createDetailsPageLink = (item) => { diff --git a/source/ui-deployment/src/components/useCaseDetails/UseCaseView.jsx b/source/ui-deployment/src/components/useCaseDetails/UseCaseView.jsx index 2fa78253..c81b334c 100644 --- a/source/ui-deployment/src/components/useCaseDetails/UseCaseView.jsx +++ b/source/ui-deployment/src/components/useCaseDetails/UseCaseView.jsx @@ -1,10 +1,18 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -import { createRef, useState, useContext, useEffect, useRef } from 'react'; +import { createRef, useState, useContext, useEffect, useRef, useMemo } from 'react'; import { useNavigate, useParams } from 'react-router-dom'; -import { Alert, AppLayout, Button, ContentLayout, SpaceBetween, StatusIndicator, Tabs } from '@cloudscape-design/components'; +import { + Alert, + AppLayout, + Button, + ContentLayout, + SpaceBetween, + StatusIndicator, + Tabs +} from '@cloudscape-design/components'; import { Navigation, Notifications } from '../commons/common-components'; import { appLayoutAriaLabels } from '../../i18n-strings'; @@ -12,12 +20,8 @@ import { ToolsContent } from '../../utils/tools-content'; import HomeContext from '../../contexts/home.context'; import { parseStackName } from '../commons/table-config'; import { DeleteDeploymentModal, onDeleteConfirm } from '../commons/delete-modal'; -import { - CFN_STACK_STATUS_INDICATOR, - DEPLOYMENT_ACTIONS, - USECASE_TYPE_ROUTE, - USECASE_TYPES -} from '../../utils/constants'; +import { CFN_STACK_STATUS_INDICATOR, DEPLOYMENT_ACTIONS, USECASE_TYPES } from '../../utils/constants'; +import { getUseCaseRoute } from '../../utils/utils'; import { statusIndicatorTypeSelector } from '../dashboard/deployments'; import { useUseCaseDetailsQuery } from '@/hooks/useQueries'; @@ -29,15 +33,22 @@ import { Prompt } from './prompt/Prompt'; import { PageHeader } from './layout/PageHeader'; import { Breadcrumbs } from './layout/Breadcrumbs'; import { Agent } from './agent/Agent'; +import { Targets } from './targets'; +import { Gateway } from './gateway'; +import { Runtime } from './runtime'; +import { MCPs } from './mcps'; +import { AgentDetails } from './agentDetails'; +import { WorkflowDetails } from './workflowDetails'; +import { WorkflowOrchestration } from './workflowOrchestration'; export default function UseCaseView() { - const { useCaseId } = useParams(); // Get useCaseId from URL params + const { useCaseId, useCaseType } = useParams(); // Get useCaseId and useCaseType from URL params const { data: apiResponse, isLoading, isSuccess, error - } = useUseCaseDetailsQuery(useCaseId, { + } = useUseCaseDetailsQuery(useCaseId, useCaseType, { refetchOnWindowFocus: false }); @@ -54,7 +65,9 @@ export default function UseCaseView() { const hasUpdatedRef = useRef(false); - const selectedDeployment = isSuccess ? mapApiResponseToSelectedDeployment(apiResponse) : null; + const selectedDeployment = useMemo(() => { + return isSuccess ? mapApiResponseToSelectedDeployment(apiResponse) : null; + }, [isSuccess, apiResponse]); // Update context when data is successfully loaded useEffect(() => { @@ -67,10 +80,10 @@ export default function UseCaseView() { } }, [isSuccess, selectedDeployment, homeDispatch]); - // Reset the ref when the ID changes + // Reset the ref when the ID or type changes useEffect(() => { hasUpdatedRef.current = false; - }, [useCaseId]); + }, [useCaseId, useCaseType]); // Handle loading state if (isLoading) { @@ -94,11 +107,14 @@ export default function UseCaseView() { // Handle case where data is not available after loading if (!selectedDeployment) { - return
No deployment details found for ID: {useCaseId}
; + return ( +
+ No deployment details found for ID: {useCaseId} (Type: {useCaseType}) +
+ ); } - const navigateDestination = - USECASE_TYPE_ROUTE[selectedDeployment.UseCaseType?.toUpperCase()] ?? USECASE_TYPE_ROUTE.TEXT; + const navigateDestination = getUseCaseRoute(selectedDeployment.UseCaseType); const onDeleteInit = () => setShowDeleteModal(true); const onDeleteDiscard = () => setShowDeleteModal(false); @@ -147,6 +163,98 @@ export default function UseCaseView() { ]; } + if (selectedDeployment.UseCaseType === USECASE_TYPES.AGENT_BUILDER) { + tabs = [ + { + label: 'Agent Details', + id: 'agentDetails', + content: ( + + ), + key: 'agentDetails' + }, + { + label: 'Tools and Resources', + id: 'mcps', + content: , + key: 'mcps' + }, + { + label: 'Model', + id: 'model', + content: , + key: 'model' + } + ]; + } + + if (selectedDeployment.UseCaseType === USECASE_TYPES.WORKFLOW) { + tabs = [ + { + label: 'Workflow Configuration', + id: 'workflowDetails', + content: ( + + ), + key: 'workflowDetails' + }, + { + label: 'Multi-Agent Orchestration', + id: 'workflowOrchestration', + content: ( + + ), + key: 'workflowOrchestration' + }, + { + label: 'Model', + id: 'model', + content: , + key: 'model' + } + ]; + } + + if (selectedDeployment.UseCaseType === USECASE_TYPES.MCP_SERVER && selectedDeployment.MCPParams?.GatewayParams) { + tabs = [ + { + label: 'Gateway', + id: 'gateway', + content: ( + + ), + key: 'gateway' + }, + { + label: 'Targets', + id: 'targets', + content: ( + + ), + key: 'targets' + } + ]; + } + + if (selectedDeployment.UseCaseType === USECASE_TYPES.MCP_SERVER && selectedDeployment.MCPParams?.RuntimeParams) { + tabs = [ + { + label: 'Runtime', + id: 'runtime', + content: ( + + ), + key: 'runtime' + } + ]; + } + const onEditClickAction = () => { homeDispatch({ field: 'selectedDeployment', @@ -176,6 +284,7 @@ export default function UseCaseView() { }; const currentDeploymentStatus = statusIndicatorTypeSelector(selectedDeployment.status); + const isEditEnabled = currentDeploymentStatus === CFN_STACK_STATUS_INDICATOR.SUCCESS || currentDeploymentStatus === CFN_STACK_STATUS_INDICATOR.WARNING; diff --git a/source/ui-deployment/src/components/useCaseDetails/agentDetails/AgentDetails.tsx b/source/ui-deployment/src/components/useCaseDetails/agentDetails/AgentDetails.tsx new file mode 100644 index 00000000..d4782fb8 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/agentDetails/AgentDetails.tsx @@ -0,0 +1,53 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { InfoLink } from '@/components/commons'; +import { Container, Header, SpaceBetween } from '@cloudscape-design/components'; +import { MemoryDetails } from '../memory/MemoryDetails'; +import { SystemPromptDetails } from '../systemPrompt/SystemPromptDetails'; + +export interface AgentDetailsProps { + loadHelpPanelContent: (index: number) => void; + selectedDeployment: any; +} + +export function AgentDetails({ loadHelpPanelContent, selectedDeployment }: AgentDetailsProps) { + return ( + + loadHelpPanelContent(1)} + ariaLabel={'Information about Memory Configuration.'} + /> + } + > + Memory Configuration + + } + > + + + loadHelpPanelContent(1)} + ariaLabel={'Information about System Prompt.'} + /> + } + > + System Prompt + + } + > + + + + ); +} diff --git a/source/ui-deployment/src/components/useCaseDetails/agentDetails/index.ts b/source/ui-deployment/src/components/useCaseDetails/agentDetails/index.ts new file mode 100644 index 00000000..d50ce6a9 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/agentDetails/index.ts @@ -0,0 +1,4 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export { AgentDetails } from './AgentDetails'; diff --git a/source/ui-deployment/src/components/useCaseDetails/gateway/Gateway.tsx b/source/ui-deployment/src/components/useCaseDetails/gateway/Gateway.tsx new file mode 100644 index 00000000..a81a81cc --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/gateway/Gateway.tsx @@ -0,0 +1,35 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { InfoLink } from '@/components/commons'; +import { Container, Header, SpaceBetween } from '@cloudscape-design/components'; +import { GatewayDetails } from './GatewayDetails'; + +export interface GatewayProps { + loadHelpPanelContent: (index: number) => void; + selectedDeployment: any; +} + +export function Gateway({ loadHelpPanelContent, selectedDeployment }: GatewayProps) { + return ( + + loadHelpPanelContent(1)} + ariaLabel={'Information about Agentcore Gateway.'} + /> + } + > + Gateway Configuration + + } + > + + + + ); +} diff --git a/source/ui-deployment/src/components/useCaseDetails/gateway/GatewayDetails.tsx b/source/ui-deployment/src/components/useCaseDetails/gateway/GatewayDetails.tsx new file mode 100644 index 00000000..27a06ddd --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/gateway/GatewayDetails.tsx @@ -0,0 +1,28 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { ColumnLayout, SpaceBetween } from '@cloudscape-design/components'; +import { ValueWithLabel } from '../../../utils/ValueWithLabel'; + +export interface GatewayDetailsProps { + selectedDeployment: any; +} + +export function GatewayDetails({ selectedDeployment }: GatewayDetailsProps) { + const gatewayParams = selectedDeployment?.MCPParams?.GatewayParams; + + return ( + + + + {gatewayParams?.GatewayId || 'N/A'} + + + + + {gatewayParams?.GatewayUrl || 'N/A'} + + + + ); +} diff --git a/source/ui-deployment/src/components/useCaseDetails/gateway/index.ts b/source/ui-deployment/src/components/useCaseDetails/gateway/index.ts new file mode 100644 index 00000000..087ffa6c --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/gateway/index.ts @@ -0,0 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export { GatewayDetails } from './GatewayDetails'; +export { Gateway } from './Gateway'; diff --git a/source/ui-deployment/src/components/useCaseDetails/general/GeneralConfig.tsx b/source/ui-deployment/src/components/useCaseDetails/general/GeneralConfig.tsx index da0f5d51..38884c9f 100644 --- a/source/ui-deployment/src/components/useCaseDetails/general/GeneralConfig.tsx +++ b/source/ui-deployment/src/components/useCaseDetails/general/GeneralConfig.tsx @@ -165,8 +165,25 @@ export const GeneralConfig = ({ selectedDeployment, runtimeConfig }: Partial )} + {'ProvisionedConcurrencyValue' in (selectedDeployment ?? {}) && ( + + {selectedDeployment.ProvisionedConcurrencyValue > 0 + ? `Enabled (${selectedDeployment.ProvisionedConcurrencyValue})` + : 'Disabled'} + + )} + + {selectedDeployment.UseCaseType === USECASE_TYPES.MCP_SERVER && ( + + {selectedDeployment.MCPParams?.GatewayParams ? 'Gateway' : 'Runtime'} + + + )} + {'FeedbackEnabled' in (selectedDeployment.FeedbackParams ?? {}) && ( - {getBooleanString(selectedDeployment.FeedbackParams.FeedbackEnabled)} + + {getBooleanString(selectedDeployment.FeedbackParams.FeedbackEnabled)} + )} ); diff --git a/source/ui-deployment/src/components/useCaseDetails/mcps/MCPItem.tsx b/source/ui-deployment/src/components/useCaseDetails/mcps/MCPItem.tsx new file mode 100644 index 00000000..7ca2a814 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/mcps/MCPItem.tsx @@ -0,0 +1,43 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { ColumnLayout, Container, Link } from '@cloudscape-design/components'; +import { ValueWithLabel } from '../../../utils/ValueWithLabel'; + +interface MCPItemProps { + mcpServer: any; + index: number; +} + +export const MCPItem = ({ mcpServer, index }: MCPItemProps) => { + const handleLinkClick = (e: any) => { + e.preventDefault(); + const url = `/deployment-details/MCPServer/${mcpServer.UseCaseId}`; + window.open(url, '_blank'); + }; + + return ( + + + + {mcpServer.Type || 'N/A'} + + + {mcpServer.UseCaseName || 'N/A'} + + + {mcpServer.UseCaseId ? ( + + {mcpServer.UseCaseId} + + ) : ( + 'N/A' + )} + + + {mcpServer.Url || 'N/A'} + + + + ); +}; diff --git a/source/ui-deployment/src/components/useCaseDetails/mcps/MCPs.tsx b/source/ui-deployment/src/components/useCaseDetails/mcps/MCPs.tsx new file mode 100644 index 00000000..8abf78ea --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/mcps/MCPs.tsx @@ -0,0 +1,53 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { InfoLink } from '@/components/commons'; +import { Container, Header, SpaceBetween } from '@cloudscape-design/components'; +import { MCPsList } from './MCPsList'; +import { ToolsList } from './ToolsList'; + +export interface MCPsProps { + loadHelpPanelContent: (index: number) => void; + selectedDeployment: any; +} + +export function MCPs({ loadHelpPanelContent, selectedDeployment }: MCPsProps) { + return ( + + loadHelpPanelContent(1)} + ariaLabel={'Information about Tools and Resources.'} + /> + } + > + MCP Servers + + } + > + + + loadHelpPanelContent(2)} + ariaLabel={'Information about Strands Tools.'} + /> + } + > + Strands Tools + + } + > + + + + ); +} diff --git a/source/ui-deployment/src/components/useCaseDetails/mcps/MCPsList.tsx b/source/ui-deployment/src/components/useCaseDetails/mcps/MCPsList.tsx new file mode 100644 index 00000000..e7ba8853 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/mcps/MCPsList.tsx @@ -0,0 +1,31 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Box, SpaceBetween } from '@cloudscape-design/components'; +import { MCPItem } from './MCPItem'; + +export interface MCPsListProps { + selectedDeployment: any; +} + +export function MCPsList({ selectedDeployment }: MCPsListProps) { + const mcpServers = selectedDeployment?.AgentBuilderParams?.MCPServers || []; + + if (mcpServers.length === 0) { + return ( + + + No MCP servers configured for this agent. + + + ); + } + + return ( + + {mcpServers.map((mcpServer: any, index: number) => ( + + ))} + + ); +} diff --git a/source/ui-deployment/src/components/useCaseDetails/mcps/ToolItem.tsx b/source/ui-deployment/src/components/useCaseDetails/mcps/ToolItem.tsx new file mode 100644 index 00000000..6c7dc025 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/mcps/ToolItem.tsx @@ -0,0 +1,21 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { ValueWithLabel } from '../../../utils/ValueWithLabel'; + +interface Tool { + ToolId: string; +} + +interface ToolItemProps { + tool: Tool; + index: number; +} + +export const ToolItem = ({ tool, index }: ToolItemProps) => { + return ( + + {tool?.ToolId || 'N/A'} + + ); +}; diff --git a/source/ui-deployment/src/components/useCaseDetails/mcps/ToolsList.tsx b/source/ui-deployment/src/components/useCaseDetails/mcps/ToolsList.tsx new file mode 100644 index 00000000..ab4f88f8 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/mcps/ToolsList.tsx @@ -0,0 +1,33 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Box, ColumnLayout, SpaceBetween } from '@cloudscape-design/components'; +import { ToolItem } from './ToolItem'; + +export interface ToolsListProps { + selectedDeployment: any; +} + +export function ToolsList({ selectedDeployment }: ToolsListProps) { + const tools = selectedDeployment?.AgentBuilderParams?.Tools || []; + + if (tools.length === 0) { + return ( + + + No Strands Tools configured for this agent. + + + ); + } + + return ( + + + {tools.map((tool: any, index: number) => ( + + ))} + + + ); +} diff --git a/source/ui-deployment/src/components/useCaseDetails/mcps/index.ts b/source/ui-deployment/src/components/useCaseDetails/mcps/index.ts new file mode 100644 index 00000000..c4efc2fc --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/mcps/index.ts @@ -0,0 +1,7 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export { MCPs } from './MCPs'; +export { MCPsList } from './MCPsList'; +export { ToolsList } from './ToolsList'; +export { ToolItem } from './ToolItem'; diff --git a/source/ui-deployment/src/components/useCaseDetails/memory/Memory.tsx b/source/ui-deployment/src/components/useCaseDetails/memory/Memory.tsx new file mode 100644 index 00000000..7654d271 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/memory/Memory.tsx @@ -0,0 +1,35 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { InfoLink } from '@/components/commons'; +import { Container, Header, SpaceBetween } from '@cloudscape-design/components'; +import { MemoryDetails } from './MemoryDetails'; + +export interface MemoryProps { + loadHelpPanelContent: (index: number) => void; + selectedDeployment: any; +} + +export function Memory({ loadHelpPanelContent, selectedDeployment }: MemoryProps) { + return ( + + loadHelpPanelContent(1)} + ariaLabel={'Information about Memory Configuration.'} + /> + } + > + Memory Configuration + + } + > + + + + ); +} diff --git a/source/ui-deployment/src/components/useCaseDetails/memory/MemoryDetails.tsx b/source/ui-deployment/src/components/useCaseDetails/memory/MemoryDetails.tsx new file mode 100644 index 00000000..65b51de2 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/memory/MemoryDetails.tsx @@ -0,0 +1,30 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Box, ColumnLayout } from '@cloudscape-design/components'; +import { ValueWithLabel } from '../../../utils/ValueWithLabel'; + +export interface MemoryDetailsProps { + selectedDeployment: any; +} + +export function MemoryDetails({ selectedDeployment }: MemoryDetailsProps) { + const memoryConfig = selectedDeployment?.AgentBuilderParams?.MemoryConfig; + + if (!memoryConfig) { + return ( + + No memory configuration + This agent has no memory configuration set. + + ); + } + + return ( + + + {memoryConfig.LongTermEnabled !== undefined ? (memoryConfig.LongTermEnabled ? 'Yes' : 'No') : 'N/A'} + + + ); +} diff --git a/source/ui-deployment/src/components/useCaseDetails/memory/index.ts b/source/ui-deployment/src/components/useCaseDetails/memory/index.ts new file mode 100644 index 00000000..aaa226a5 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/memory/index.ts @@ -0,0 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export { Memory } from './Memory'; +export { MemoryDetails } from './MemoryDetails'; diff --git a/source/ui-deployment/src/components/useCaseDetails/model/BedrockDetails.tsx b/source/ui-deployment/src/components/useCaseDetails/model/BedrockDetails.tsx index 8df0539a..23aa5e4d 100644 --- a/source/ui-deployment/src/components/useCaseDetails/model/BedrockDetails.tsx +++ b/source/ui-deployment/src/components/useCaseDetails/model/BedrockDetails.tsx @@ -24,10 +24,10 @@ export const BedrockDetails = ({ selectedDeployment }: Partial = { - 'QUICK_START': 'Quick Start Models', - 'OTHER_FOUNDATION': 'Other Foundation Models', + 'OTHER_FOUNDATION': 'Foundation Models', 'INFERENCE_PROFILE': 'Inference Profiles', - 'PROVISIONED': 'Provisioned Models' + 'PROVISIONED': 'Provisioned Models', + 'QUICK_START': 'Foundation Models' // Legacy mapping for backward compatibility }; return ( @@ -42,6 +42,7 @@ export const BedrockDetails = ({ selectedDeployment }: Partial{bedrockParams.ModelId} )} diff --git a/source/ui-deployment/src/components/useCaseDetails/model/ModelDetails.tsx b/source/ui-deployment/src/components/useCaseDetails/model/ModelDetails.tsx index be0a9b63..7885a766 100644 --- a/source/ui-deployment/src/components/useCaseDetails/model/ModelDetails.tsx +++ b/source/ui-deployment/src/components/useCaseDetails/model/ModelDetails.tsx @@ -2,12 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 import { ColumnLayout, SpaceBetween, StatusIndicator } from '@cloudscape-design/components'; -import { BEDROCK_MODEL_PROVIDER_NAME, SAGEMAKER_MODEL_PROVIDER_NAME } from '../../../utils/constants'; +import { BEDROCK_MODEL_PROVIDER_NAME, SAGEMAKER_MODEL_PROVIDER_NAME, MULTIMODAL_SUPPORTED_USE_CASES } from '../../../utils/constants'; import { BedrockDetails } from './BedrockDetails'; import { SageMakerDetails } from './SageMakerDetails'; import { FormattedModelParams } from './FormattedModelParams'; import { ValueWithLabel } from '../../../utils/ValueWithLabel'; import { BaseDetailsContainerProps } from '../types'; +import { getBooleanString } from '@/components/wizard/utils'; /** * Renders the model details section of the deployment details view. @@ -58,6 +59,11 @@ export const ModelDetails = ({ selectedDeployment }: Partial {modelComponent} + {MULTIMODAL_SUPPORTED_USE_CASES.includes(selectedDeployment.UseCaseType) && ( + + {getBooleanString(selectedDeployment.LlmParams.MultimodalParams?.MultimodalEnabled)} + + )} {modelParams.length > 0 && ( {modelParams.slice(0, Math.ceil(modelParams.length / 2))} diff --git a/source/ui-deployment/src/components/useCaseDetails/runtime/Runtime.tsx b/source/ui-deployment/src/components/useCaseDetails/runtime/Runtime.tsx new file mode 100644 index 00000000..072a3257 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/runtime/Runtime.tsx @@ -0,0 +1,35 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { InfoLink } from '@/components/commons'; +import { Container, Header, SpaceBetween } from '@cloudscape-design/components'; +import { RuntimeDetails } from './RuntimeDetails'; + +export interface RuntimeProps { + loadHelpPanelContent: (index: number) => void; + selectedDeployment: any; +} + +export function Runtime({ loadHelpPanelContent, selectedDeployment }: RuntimeProps) { + return ( + + loadHelpPanelContent(1)} + ariaLabel={'Information about Runtime Configuration.'} + /> + } + > + Runtime Configuration + + } + > + + + + ); +} diff --git a/source/ui-deployment/src/components/useCaseDetails/runtime/RuntimeDetails.tsx b/source/ui-deployment/src/components/useCaseDetails/runtime/RuntimeDetails.tsx new file mode 100644 index 00000000..2a570df9 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/runtime/RuntimeDetails.tsx @@ -0,0 +1,37 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { ColumnLayout, SpaceBetween } from '@cloudscape-design/components'; +import { ValueWithLabel } from '../../../utils/ValueWithLabel'; + +export interface RuntimeDetailsProps { + selectedDeployment: any; +} + +export function RuntimeDetails({ selectedDeployment }: RuntimeDetailsProps) { + const runtimeParams = selectedDeployment?.MCPParams?.RuntimeParams; + + return ( + + + + {runtimeParams?.RuntimeUrl || 'N/A'} + + + {runtimeParams?.RuntimeId || 'N/A'} + + + {runtimeParams?.RuntimeArn || 'N/A'} + + + + + {runtimeParams?.RuntimeName || 'N/A'} + + + {runtimeParams?.EcrUri || 'N/A'} + + + + ); +} diff --git a/source/ui-deployment/src/components/useCaseDetails/runtime/index.ts b/source/ui-deployment/src/components/useCaseDetails/runtime/index.ts new file mode 100644 index 00000000..116c1ae2 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/runtime/index.ts @@ -0,0 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export { Runtime } from './Runtime'; +export { RuntimeDetails } from './RuntimeDetails'; diff --git a/source/ui-deployment/src/components/useCaseDetails/systemPrompt/SystemPrompt.tsx b/source/ui-deployment/src/components/useCaseDetails/systemPrompt/SystemPrompt.tsx new file mode 100644 index 00000000..8bed212e --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/systemPrompt/SystemPrompt.tsx @@ -0,0 +1,35 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { InfoLink } from '@/components/commons'; +import { Container, Header, SpaceBetween } from '@cloudscape-design/components'; +import { SystemPromptDetails } from './SystemPromptDetails'; + +export interface SystemPromptProps { + loadHelpPanelContent: (index: number) => void; + selectedDeployment: any; +} + +export function SystemPrompt({ loadHelpPanelContent, selectedDeployment }: SystemPromptProps) { + return ( + + loadHelpPanelContent(1)} + ariaLabel={'Information about System Prompt.'} + /> + } + > + System Prompt + + } + > + + + + ); +} diff --git a/source/ui-deployment/src/components/useCaseDetails/systemPrompt/SystemPromptDetails.tsx b/source/ui-deployment/src/components/useCaseDetails/systemPrompt/SystemPromptDetails.tsx new file mode 100644 index 00000000..96c31dbf --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/systemPrompt/SystemPromptDetails.tsx @@ -0,0 +1,30 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Box } from '@cloudscape-design/components'; +import { ValueWithLabel } from '../../../utils/ValueWithLabel'; + +export interface SystemPromptDetailsProps { + selectedDeployment: any; +} + +export function SystemPromptDetails({ selectedDeployment }: SystemPromptDetailsProps) { + const systemPrompt = selectedDeployment?.AgentBuilderParams?.SystemPrompt; + + if (!systemPrompt) { + return ( + + No system prompt + This agent has no system prompt configured. + + ); + } + + return ( + + +
{systemPrompt}
+
+
+ ); +} diff --git a/source/ui-deployment/src/components/useCaseDetails/systemPrompt/index.ts b/source/ui-deployment/src/components/useCaseDetails/systemPrompt/index.ts new file mode 100644 index 00000000..a1f68b0f --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/systemPrompt/index.ts @@ -0,0 +1,5 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export { SystemPrompt } from './SystemPrompt'; +export { SystemPromptDetails } from './SystemPromptDetails'; diff --git a/source/ui-deployment/src/components/useCaseDetails/targets/TargetItem.tsx b/source/ui-deployment/src/components/useCaseDetails/targets/TargetItem.tsx new file mode 100644 index 00000000..b23e7064 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/targets/TargetItem.tsx @@ -0,0 +1,27 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Badge, ColumnLayout, Container, Header } from '@cloudscape-design/components'; +import { ValueWithLabel } from '../../../utils/ValueWithLabel'; + +interface TargetItemProps { + target: any; + index: number; +} + +export const TargetItem = ({ target, index }: TargetItemProps) => { + return ( + {target.TargetName || `Target ${index + 1}`}} + data-testid={`target-item-${index}`} + > + + {target.TargetId || "" } + + {target.TargetType || 'Unknown'} + + {target.TargetDescription || '-'} + + + ); +}; diff --git a/source/ui-deployment/src/components/useCaseDetails/targets/Targets.tsx b/source/ui-deployment/src/components/useCaseDetails/targets/Targets.tsx new file mode 100644 index 00000000..041a114a --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/targets/Targets.tsx @@ -0,0 +1,31 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { InfoLink } from '@/components/commons'; +import { Container, Header } from '@cloudscape-design/components'; +import { TargetsList } from './TargetsList'; +import { BaseDetailsContainerProps } from '../types'; +import { ErrorBoundary } from '@/components/commons/ErrorBoundary'; + +export const Targets = ({ loadHelpPanelContent, selectedDeployment }: BaseDetailsContainerProps) => ( + loadHelpPanelContent(1)} + ariaLabel={'Information about MCP server targets.'} + /> + } + > + Targets + + } + data-testid="targets-details-container" + > + + + + +); diff --git a/source/ui-deployment/src/components/useCaseDetails/targets/TargetsList.tsx b/source/ui-deployment/src/components/useCaseDetails/targets/TargetsList.tsx new file mode 100644 index 00000000..cf3a3f54 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/targets/TargetsList.tsx @@ -0,0 +1,28 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Box, SpaceBetween } from '@cloudscape-design/components'; +import { TargetItem } from './TargetItem'; +import { BaseDetailsContainerProps } from '../types'; + +export const TargetsList = ({ selectedDeployment }: Partial) => { + // Get targets from the deployment object + const targets = selectedDeployment?.MCPParams?.GatewayParams?.TargetParams || []; + + if (!targets || targets.length === 0) { + return ( + + No targets configured + This MCP server deployment has no targets configured yet. + + ); + } + + return ( + + {targets.map((target: any, index: number) => ( + + ))} + + ); +}; diff --git a/source/ui-deployment/src/components/useCaseDetails/targets/index.ts b/source/ui-deployment/src/components/useCaseDetails/targets/index.ts new file mode 100644 index 00000000..581061cb --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/targets/index.ts @@ -0,0 +1,6 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export { Targets } from './Targets'; +export { TargetItem } from './TargetItem'; +export { TargetsList } from './TargetsList'; diff --git a/source/ui-deployment/src/components/useCaseDetails/workflowDetails/WorkflowDetails.jsx b/source/ui-deployment/src/components/useCaseDetails/workflowDetails/WorkflowDetails.jsx new file mode 100644 index 00000000..13a6ea96 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/workflowDetails/WorkflowDetails.jsx @@ -0,0 +1,58 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { ColumnLayout, Container, Header, SpaceBetween, Box } from '@cloudscape-design/components'; +import { ORCHESTRATION_PATTERNS } from '@/utils/constants'; +import { getBooleanString } from '../../wizard/utils'; +import { ValueWithLabel } from '@/utils/ValueWithLabel'; + +export const WorkflowDetails = ({ loadHelpPanelContent, selectedDeployment }) => { + const workflowParams = selectedDeployment.WorkflowParams || {}; + const orchestrationPattern = ORCHESTRATION_PATTERNS.get(workflowParams.OrchestrationPattern); + const memoryEnabled = workflowParams.MemoryConfig?.LongTermEnabled; + const selectedAgents = workflowParams.AgentsAsToolsParams?.Agents || []; + + return ( + + {/* Client Agent Configuration */} + + Client agent configuration + + } + data-testid="workflow-client-agent-container" + > + + + + {workflowParams.SystemPrompt || 'No system prompt configured'} + + + + {getBooleanString(memoryEnabled)} + + + + {/* Multi-Agent Configuration */} + + Multi-agent configuration + + } + data-testid="workflow-multiagent-container" + > + + + {orchestrationPattern?.name || workflowParams.OrchestrationPattern || 'Not configured'} + + + {selectedAgents.length || 0} + + + + ); +}; + +export default WorkflowDetails; diff --git a/source/ui-deployment/src/components/useCaseDetails/workflowDetails/index.js b/source/ui-deployment/src/components/useCaseDetails/workflowDetails/index.js new file mode 100644 index 00000000..1f0d4079 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/workflowDetails/index.js @@ -0,0 +1,4 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export { WorkflowDetails } from './WorkflowDetails'; diff --git a/source/ui-deployment/src/components/useCaseDetails/workflowOrchestration/WorkflowOrchestration.jsx b/source/ui-deployment/src/components/useCaseDetails/workflowOrchestration/WorkflowOrchestration.jsx new file mode 100644 index 00000000..462bcce1 --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/workflowOrchestration/WorkflowOrchestration.jsx @@ -0,0 +1,85 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { ColumnLayout, Container, Header, SpaceBetween, Box } from '@cloudscape-design/components'; +import { ORCHESTRATION_PATTERNS } from '@/utils/constants'; +import { ExternalLink } from '@/components/commons/external-link'; +import { ValueWithLabel } from '@/utils/ValueWithLabel'; + +export const WorkflowOrchestration = ({ loadHelpPanelContent, selectedDeployment }) => { + const workflowParams = selectedDeployment.WorkflowParams || {}; + const orchestrationPattern = ORCHESTRATION_PATTERNS.get(workflowParams.OrchestrationPattern); + const selectedAgents = workflowParams.AgentsAsToolsParams?.Agents || []; + + return ( + + {/* Orchestration Pattern */} + + Orchestration pattern + + } + data-testid="workflow-orchestration-pattern-container" + > + + + {orchestrationPattern?.name || workflowParams.OrchestrationPattern || 'Not configured'} + + + {orchestrationPattern?.description && ( + {orchestrationPattern.description} + )} + + + + {/* Selected Agents */} + {selectedAgents.length > 0 && ( + + Selected agents ({selectedAgents.length}) + + } + data-testid="workflow-selected-agents-container" + > + + {selectedAgents.map((agent, index) => ( + + + + + {agent.UseCaseName || `Agent ${index + 1}`} + + + + {agent.UseCaseDescription || 'No description is available.'} + + + + ))} + + + )} + + {selectedAgents.length === 0 && ( + + Selected agents + + } + data-testid="workflow-no-agents-container" + > + + No agents have been selected for this workflow. + + + )} + + ); +}; + +export default WorkflowOrchestration; diff --git a/source/ui-deployment/src/components/useCaseDetails/workflowOrchestration/index.js b/source/ui-deployment/src/components/useCaseDetails/workflowOrchestration/index.js new file mode 100644 index 00000000..cca347ae --- /dev/null +++ b/source/ui-deployment/src/components/useCaseDetails/workflowOrchestration/index.js @@ -0,0 +1,4 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export { WorkflowOrchestration } from './WorkflowOrchestration'; diff --git a/source/ui-deployment/src/components/wizard/AgentBuilder/AgentBuilder.tsx b/source/ui-deployment/src/components/wizard/AgentBuilder/AgentBuilder.tsx new file mode 100644 index 00000000..3ccc9442 --- /dev/null +++ b/source/ui-deployment/src/components/wizard/AgentBuilder/AgentBuilder.tsx @@ -0,0 +1,77 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import React from 'react'; +import { StepContentProps } from '../interfaces/Steps'; +import { SpaceBetween } from '@cloudscape-design/components'; +import SystemPrompt from './SystemPrompt'; +import Memory from './Memory'; +import Tools from './Tools'; +import { DEFAULT_AGENT_SYSTEM_PROMPT } from '@/utils/constants'; + +const AgentBuilder = ({ info: { agentBuilder }, onChange, setHelpPanelContent }: StepContentProps) => { + const [, setNumFieldsInError] = React.useState(0); + + // System Prompt states + const [defaultSystemPrompt, setDefaultSystemPrompt] = React.useState(DEFAULT_AGENT_SYSTEM_PROMPT); + const [systemPromptInError, setSystemPromptInError] = React.useState(false); + + // Tools states + const [toolsInError, setToolsInError] = React.useState(false); + + // Set the global error state of this wizard step based on errors propagated from sub components + React.useEffect(() => { + onChange({ + inError: systemPromptInError || toolsInError + }); + }, [systemPromptInError, toolsInError]); + + React.useEffect(() => { + setDefaultSystemPrompt(DEFAULT_AGENT_SYSTEM_PROMPT); + + // Only initialize if values are truly undefined (not set in steps-config) + if (agentBuilder.memoryEnabled === undefined) { + onChange({ memoryEnabled: false }); + } + + if (agentBuilder.mcpServers === undefined) { + onChange({ mcpServers: [] }); + } + + if (agentBuilder.tools === undefined) { + onChange({ tools: [] }); + } + }, []); + + return ( + + + + + + ); +}; + +export default AgentBuilder; diff --git a/source/ui-deployment/src/components/wizard/AgentBuilder/Memory.tsx b/source/ui-deployment/src/components/wizard/AgentBuilder/Memory.tsx new file mode 100644 index 00000000..50755a2b --- /dev/null +++ b/source/ui-deployment/src/components/wizard/AgentBuilder/Memory.tsx @@ -0,0 +1,101 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import React from 'react'; +import { Box, Container, FormField, Header, RadioGroup, SpaceBetween } from '@cloudscape-design/components'; +import { BaseFormComponentProps } from '../interfaces'; +import { InfoLink } from '@/components/commons'; +import { IG_DOCS } from '@/utils/constants'; + +export interface MemoryProps extends BaseFormComponentProps { + memoryEnabled: boolean; + 'data-testid'?: string; +} + +export const Memory = (props: MemoryProps) => { + const handleMemoryChange = (detail: { value: string }) => { + props.onChangeFn({ memoryEnabled: detail.value === 'yes' }); + }; + + return ( + Memory management} + data-testid={props['data-testid']} + > + + props.setHelpPanelContent!(memoryInfoPanel)} + data-testid="memory-info-link" + /> + } + description="Enable your agent to retain information across multiple conversations" + data-testid="memory-form-field" + > + handleMemoryChange(detail)} + value={props.memoryEnabled ? 'yes' : 'no'} + items={[ + { + value: 'yes', + label: 'Yes', + description: 'Store conversation data for extended periods to improve context retention' + }, + { + value: 'no', + label: 'No', + description: "Don't retain conversation history between sessions" + } + ]} + data-testid="memory-radio-group" + /> + + + + ); +}; + +export default Memory; + +const memoryInfoPanel = { + title: 'Memory Management', + content: ( +
+ + Memory management controls how your AI agent handles conversation history and context retention across + multiple interactions. + + Long-term Memory: +
    +
  • + Enabled: Agent retains context from previous conversations, improving personalization and + continuity +
  • +
  • + Disabled: Each conversation starts fresh with no memory of previous interactions +
  • +
+ Benefits of Long-term Memory: +
    +
  • Improved context awareness across sessions
  • +
  • Better personalization based on user preferences
  • +
  • Continuity in ongoing projects or discussions
  • +
  • Enhanced user experience through remembered context
  • +
+ Considerations: +
    +
  • Memory storage may impact response time
  • +
  • Consider privacy implications of storing conversation data
  • +
  • Memory capacity may have limits depending on your configuration
  • +
+
+ ), + links: [ + { + href: IG_DOCS.AGENT_USE_CASE, + text: 'Agent use case documentation' + } + ] +}; diff --git a/source/ui-deployment/src/components/wizard/AgentBuilder/SystemPrompt.tsx b/source/ui-deployment/src/components/wizard/AgentBuilder/SystemPrompt.tsx new file mode 100644 index 00000000..b56408c1 --- /dev/null +++ b/source/ui-deployment/src/components/wizard/AgentBuilder/SystemPrompt.tsx @@ -0,0 +1,165 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import React from 'react'; +import { + Box, + Button, + Container, + FormField, + Header, + InputProps, + SpaceBetween, + Textarea +} from '@cloudscape-design/components'; +import { BaseFormComponentProps } from '../interfaces'; +import { InfoLink } from '@/components/commons'; +import { IG_DOCS, MAX_AGENT_SYSTEM_PROMPT_LENGTH, DEFAULT_AGENT_SYSTEM_PROMPT } from '@/utils/constants'; +import { ConfirmUnsavedChangesModal } from '@/components/commons/confirm-unsaved-changes-modal'; + +export interface SystemPromptProps extends BaseFormComponentProps { + defaultSystemPrompt?: string; + systemPrompt: string; + setNumFieldsInError: React.Dispatch>; + setSystemPromptInError: React.Dispatch>; + headerTitle?: string; + 'data-testid'?: string; +} + +export const SystemPrompt = (props: SystemPromptProps) => { + const [modalVisible, setModalVisible] = React.useState(false); + + const validateSystemPrompt = (prompt: string): string => { + if (!prompt || prompt.trim().length === 0) { + return 'System prompt is required'; + } + + if (prompt.length > MAX_AGENT_SYSTEM_PROMPT_LENGTH) { + return `System prompt is too long. Character count: ${prompt.length}/${MAX_AGENT_SYSTEM_PROMPT_LENGTH}`; + } + + return ''; + }; + + const promptError = validateSystemPrompt(props.systemPrompt); + + // Propagate error to parent component on any changes to the error messages + React.useEffect(() => { + props.setSystemPromptInError(promptError.length > 0); + }, [promptError]); + + const handleSystemPromptChange = (detail: InputProps.ChangeDetail) => { + props.onChangeFn({ systemPrompt: detail.value }); + }; + + const handleResetClick = () => { + props.onChangeFn({ systemPrompt: props.defaultSystemPrompt }); + setModalVisible(false); + }; + + return ( + + + +
+ } + data-testid="system-prompt-header" + > + {props.headerTitle ?? 'Prompt'} + + } + data-testid={props['data-testid']} + > + + props.setHelpPanelContent!(systemPromptInfoPanel)} + data-testid="system-prompt-info-link" + /> + } + errorText={promptError} + stretch={true} + description="Define the behavior and personality of your AI agent. This prompt will guide how the agent responds to user interactions." + data-testid="system-prompt-form-field" + > +