diff --git a/.github/workflows/codebuild-ci.yml b/.github/workflows/codebuild-ci.yml index 609c81b0340..c366e538217 100644 --- a/.github/workflows/codebuild-ci.yml +++ b/.github/workflows/codebuild-ci.yml @@ -142,56 +142,9 @@ jobs: ./gradlew collectDelegatedArtifactSizeMetrics -PpullRequest=$PULL_REQUEST - name: Analyze Artifact Size Metrics run: ./gradlew analyzeArtifactSizeMetrics - - name: Show Results - uses: actions/github-script@v7 - with: - script: | - const getComments = - `query { - repository(owner:"${context.repo.owner}", name:"${context.repo.repo}"){ - pullRequest(number: ${context.issue.number ?? process.env.SDK_PR}) { - id - comments(last:100) { - nodes { - id - body - author { - login - } - isMinimized - } - } - } - } - }` - - const response = await github.graphql(getComments) - const comments = response.repository.pullRequest.comments.nodes - - const mutations = comments - .filter(comment => comment.author.login == 'github-actions' && !comment.isMinimized && comment.body.startsWith('Affected Artifacts')) - .map(comment => - github.graphql( - `mutation { - minimizeComment(input:{subjectId:"${comment.id}", classifier:OUTDATED}){ - clientMutationId - } - }` - ) - ) - await Promise.all(mutations) - const fs = require('node:fs') - const comment = fs.readFileSync('build/reports/metrics/artifact-analysis.md', 'utf8') - - const writeComment = - `mutation { - addComment(input:{body:"""${comment}""", subjectId:"${response.repository.pullRequest.id}"}){ - clientMutationId - } - }` - - await github.graphql(writeComment) + - name: Show Results + uses: awslabs/aws-kotlin-repo-tools/.github/actions/artifact-size-metrics/show-results@main - name: Evaluate Result if: ${{ !contains(github.event.pull_request.labels.*.name, 'acknowledge-artifact-size-increase') }} diff --git a/.github/workflows/merge-main.yml b/.github/workflows/merge-main.yml index 560876bdf3f..1b354b2ad72 100644 --- a/.github/workflows/merge-main.yml +++ b/.github/workflows/merge-main.yml @@ -1,11 +1,11 @@ name: Merge main on: - schedule: - - cron: "0 7 * * 1-5" # At 07:00 UTC (00:00 PST, 03:00 EST), Monday through Friday + push: + branches: [ main ] workflow_dispatch: jobs: - test: + merge: runs-on: ubuntu-latest steps: - name: Merge main diff --git a/.github/workflows/sync-mirror.yml b/.github/workflows/sync-mirror.yml new file mode 100644 index 00000000000..ffdb5731f9c --- /dev/null +++ b/.github/workflows/sync-mirror.yml @@ -0,0 +1,20 @@ +name: Sync Mirror + +on: + push: + branches: [ main ] + workflow_dispatch: + +jobs: + git-sync: + # Only sync when pushing to source repo + if: github.repository == 'awslabs/aws-sdk-kotlin' + runs-on: ubuntu-latest + steps: + - name: git-sync + uses: wei/git-sync@v3 + with: + source_repo: "https://aws-sdk-kotlin-ci:${{ secrets.CI_USER_PAT }}@github.com/awslabs/aws-sdk-kotlin.git" + source_branch: "main" + destination_repo: "https://aws-sdk-kotlin-ci:${{ secrets.CI_USER_PAT }}@github.com/awslabs/private-aws-sdk-kotlin-staging.git" + destination_branch: "main" \ No newline at end of file diff --git a/.github/workflows/update-canary.yml b/.github/workflows/update-canary.yml new file mode 100644 index 00000000000..959c8f19c69 --- /dev/null +++ b/.github/workflows/update-canary.yml @@ -0,0 +1,76 @@ +# After every GitHub release, verify that the Maven artifacts are available, then kick off +# a canary deployment with the latest version of the SDK. +name: Update Canary +on: + release: + types: [ published ] + +jobs: + update-canary: + runs-on: ubuntu-latest + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} + aws-region: us-west-2 + + + - name: Verify artifact is available on Maven + shell: bash + # Maven can take up to 2 hours after the release has succeeded to publish our artifacts + # FIXME Track execution duration over time and see if this can be shortened + timeout-minutes: 120 + run: | + TAG="${{ github.event.release.tag_name }}" + VERSION="${TAG#v}" + MAVEN_URL="https://repo.maven.apache.org/maven2/aws/sdk/kotlin/s3/${VERSION}/" + + echo "Checking for an artifact at $MAVEN_URL" + + while true; do + STATUS=$(curl -i -s -o /dev/null -w "%{http_code}" "$MAVEN_URL") + echo "Status: $STATUS" + + if [[ "$STATUS" == "200" ]]; then + echo "Artifact is available at $MAVEN_URL" + exit 0 + fi + + sleep 30 + done + + - name: Update canary + shell: bash + timeout-minutes: 15 + run: | + set -euo pipefail + + TAG="${{ github.event.release.tag_name }}" + EXECUTION_NAME="update-canary-${TAG}" + STATE_MACHINE_ARN="arn:aws:states:us-west-2:${{ secrets.CI_USER }}:stateMachine:DeployLatestSdkVersion" + + echo "Starting step function: $EXECUTION_NAME" + EXECUTION_ARN=$(aws stepfunctions start-execution \ + --state-machine-arn "$STATE_MACHINE_ARN" \ + --name "$EXECUTION_NAME" \ + --input '{}' \ + --query 'executionArn' \ + --output text) + + echo "Waiting for step function to complete..." + + while true; do + STATUS=$(aws stepfunctions describe-execution --execution-arn "$EXECUTION_ARN" --query 'status' --output text) + echo "Status: $STATUS" + + if [[ "$STATUS" == "SUCCEEDED" ]]; then + echo "Step Function completed successfully" + exit 0 + elif [[ "$STATUS" == "FAILED" || "$STATUS" == "TIMED_OUT" || "$STATUS" == "ABORTED" ]]; then + echo "Step Function failed with status: $STATUS" + exit 1 + fi + + sleep 10 + done \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 53d85dee33c..b1999bbdec6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,203 @@ # Changelog +## [1.4.63] - 04/14/2025 + +### Features +* (**entityresolution**) This is to add new metrics to our GetIdMappingJob API and also update uniqueId naming for batchDeleteUniqueIds API to be more accurate +* (**taxsettings**) Indonesia SOR Tax Registration Launch + +## [1.4.62] - 04/11/2025 + +### Features +* (**connectcontactlens**) Making sentiment optional for ListRealtimeContactAnalysisSegments Response depending on conversational analytics configuration +* (**datazone**) Raise hard limit of authorized principals per SubscriptionTarget from 10 to 20. +* (**detective**) Add support for Detective DualStack endpoints +* (**marketplaceentitlementservice**) Add support for Marketplace Entitlement Service dual-stack endpoints for CN and GOV regions +* (**marketplacemetering**) Add support for Marketplace Metering Service dual-stack endpoints for CN regions +* (**pcs**) Changed the minimum length of clusterIdentifier, computeNodeGroupIdentifier, and queueIdentifier to 3. +* (**verifiedpermissions**) Adds deletion protection support to policy stores. Deletion protection is disabled by default, can be enabled via the CreatePolicyStore or UpdatePolicyStore APIs, and is visible in GetPolicyStore. + +### Documentation +* (**dynamodb**) Doc only update for API descriptions. + +## [1.4.61] - 04/10/2025 + +### Features +* (**applicationautoscaling**) Application Auto Scaling now supports horizontal scaling for Elasticache Memcached self-designed clusters using target tracking scaling policies and scheduled scaling. +* (**elasticache**) AWS ElastiCache SDK now supports using MemcachedUpgradeConfig parameter with ModifyCacheCluster API to enable updating Memcached cache node types. Please refer to updated AWS ElastiCache public documentation for detailed information on API usage and implementation. +* (**m2**) Introduce three new APIs: CreateDataSetExportTask, GetDataSetExportTask and ListDataSetExportHistory. Add support for batch restart for Blu Age applications. +* (**medialive**) AWS Elemental MediaLive / Features : Add support for CMAF Ingest CaptionLanguageMappings, TimedMetadataId3 settings, and Link InputResolution. +* (**qbusiness**) Adds functionality to enable/disable a new Q Business Hallucination Reduction feature. If enabled, Q Business will detect and attempt to remove Hallucinations from certain Chat requests. +* (**quicksight**) Add support to analysis and sheet level highlighting in QuickSight. + +## [1.4.60] - 04/09/2025 + +### Features +* (**controlcatalog**) The GetControl API now surfaces a control's Severity, CreateTime, and Identifier for a control's Implementation. The ListControls API now surfaces a control's Behavior, Severity, CreateTime, and Identifier for a control's Implementation. +* (**glue**) The TableOptimizer APIs in AWS Glue now return the DpuHours field in each TableOptimizerRun, providing clients visibility to the DPU-hours used for billing in managed Apache Iceberg table compaction optimization. +* (**groundstation**) Support tagging Agents and adjust input field validations +* (**transfer**) This launch includes 2 enhancements to SFTP connectors user-experience: 1) Customers can self-serve concurrent connections setting for their connectors, and 2) Customers can discover the public host key of remote servers using their SFTP connectors. + +### Documentation +* (**dynamodb**) Documentation update for secondary indexes and Create_Table. + +## [1.4.59] - 04/08/2025 + +### Features +* (**bedrockruntime**) This release introduces our latest bedrock runtime API, InvokeModelWithBidirectionalStream. The API supports both input and output streams and is supported by only HTTP2.0. +* (**costexplorer**) This release supports Pagination traits on Cost Anomaly Detection APIs. +* (**costoptimizationhub**) This release adds resource type "MemoryDbReservedInstances" and resource type "DynamoDbReservedCapacity" to the GetRecommendation, ListRecommendations, and ListRecommendationSummaries APIs to support new MemoryDB and DynamoDB RI recommendations. +* (**iotfleetwise**) This release adds the option to update the strategy of state templates already associated to a vehicle, without the need to remove and re-add them. +* (**storagegateway**) Added new ActiveDirectoryStatus value, ListCacheReports paginator, and support for longer pagination tokens. +* (**taxsettings**) Uzbekistan Launch on TaxSettings Page + +### Documentation +* (**securityhub**) Documentation updates for AWS Security Hub. + +## [1.4.58] - 04/08/2025 + +## [1.4.57] - 04/07/2025 + +⚠️ **IMPORTANT**: This version should not be used, as it depends on an unreleased snapshot version and will not work correctly. Please use version 1.4.58 or later. + +### Features +* (**bedrock**) New options for how to handle harmful content detected by Amazon Bedrock Guardrails. +* (**bedrockruntime**) New options for how to handle harmful content detected by Amazon Bedrock Guardrails. +* (**codebuild**) AWS CodeBuild now offers an enhanced debugging experience. +* (**glue**) Add input validations for multiple Glue APIs +* (**medialive**) AWS Elemental MediaLive now supports SDI inputs to MediaLive Anywhere Channels in workflows that use AWS SDKs. +* (**personalize**) Add support for eventsConfig for CreateSolution, UpdateSolution, DescribeSolution, DescribeSolutionVersion. Add support for GetSolutionMetrics to return weighted NDCG metrics when eventsConfig is enabled for the solution. +* (**transfer**) This launch enables customers to manage contents of their remote directories, by deleting old files or moving files to archive folders in remote servers once they have been retrieved. Customers will be able to automate the process using event-driven architecture. + +## [1.4.56] - 04/04/2025 + +### Features +* (**eventbridge**) Amazon EventBridge adds support for customer-managed keys on Archives and validations for two fields: eventSourceArn and kmsKeyIdentifier. + +### Documentation +* (**directoryservicedata**) Doc only update - fixed broken links. +* (**ec2**) Doc-only updates for Amazon EC2 +* (**s3control**) Updated max size of Prefixes parameter of Scope data type. + +## [1.4.55] - 04/03/2025 + +### Features +* (**bedrockagent**) Added optional "customMetadataField" for Amazon Aurora knowledge bases, allowing single-column metadata. Also added optional "textIndexName" for MongoDB Atlas knowledge bases, enabling hybrid search support. +* (**chimesdkvoice**) Added FOC date as an attribute of PhoneNumberOrder, added AccessDeniedException as a possible return type of ValidateE911Address +* (**mailmanager**) Add support for Dual_Stack and PrivateLink types of IngressPoint. For configuration requests, SES Mail Manager will now accept both IPv4/IPv6 dual-stack endpoints and AWS PrivateLink VPC endpoints for email receiving. +* (**route53**) Added us-gov-east-1 and us-gov-west-1 as valid Latency Based Routing regions for change-resource-record-sets. +* (**sagemaker**) Adds support for i3en, m7i, r7i instance types for SageMaker Hyperpod +* (**sesv2**) This release enables customers to provide attachments in the SESv2 SendEmail and SendBulkEmail APIs. +* (**transcribe**) This Feature Adds Support for the "zh-HK" Locale for Batch Operations + +### Documentation +* (**opensearch**) Improve descriptions for various API commands and data types. + +## [1.4.54] - 04/02/2025 + +### Features +* (**applicationsignals**) Application Signals now supports creating Service Level Objectives on service dependencies. Users can now create or update SLOs on discovered service dependencies to monitor their standard application metrics. +* (**codebuild**) This release adds support for environment type WINDOWS_SERVER_2022_CONTAINER in ProjectEnvironment +* (**lexmodelsv2**) Release feature of errorlogging for lex bot, customer can config this feature in bot version to generate log for error exception which helps debug +* (**medialive**) Added support for SMPTE 2110 inputs when running a channel in a MediaLive Anywhere cluster. This feature enables ingestion of SMPTE 2110-compliant video, audio, and ancillary streams by reading SDP files that AWS Elemental MediaLive can retrieve from a network source. + +### Documentation +* (**ecr**) Fix for customer issues related to AWS account ID and size limitation for token. +* (**ecs**) This is an Amazon ECS documentation only update to address various tickets. + +## [1.4.53] - 04/01/2025 + +### Features +* (**cleanrooms**) This release adds support for updating the analytics engine of a collaboration. +* (**sagemaker**) Added tagging support for SageMaker notebook instance lifecycle configurations + +## [1.4.52] - 03/31/2025 + +### Features +* (**bedrockruntime**) Add Prompt Caching support to Converse and ConverseStream APIs +* (**deadline**) With this release you can use a new field to specify the search term match type. Search term match types currently support fuzzy and contains matching. +* (**ec2**) Release VPC Route Server, a new feature allowing dynamic routing in VPCs. +* (**eks**) Add support for updating RemoteNetworkConfig for hybrid nodes on EKS UpdateClusterConfig API +* (**marketplaceentitlementservice**) Add support for Marketplace Entitlement Service dual-stack endpoints. +* (**outposts**) Enabling Asset Level Capacity Management feature, which allows customers to create a Capacity Task for a single Asset on their active Outpost. +* (**s3**) Amazon S3 adds support for S3 Access Points for directory buckets in AWS Dedicated Local Zones +* (**s3control**) Amazon S3 adds support for S3 Access Points for directory buckets in AWS Dedicated Local Zones +* (**sesv2**) Add dual-stack support to global endpoints. +* (**transfer**) Add WebAppEndpointPolicy support for WebApps + +## [1.4.51] - 03/28/2025 + +### Features +* (**apigateway**) Adds support for setting the IP address type to allow dual-stack or IPv4 address types to invoke your APIs or domain names. +* (**apigatewayv2**) Adds support for setting the IP address type to allow dual-stack or IPv4 address types to invoke your APIs or domain names. +* (**bedrockruntime**) Launching Multi-modality Content Filter for Amazon Bedrock Guardrails. +* (**codebuild**) This release adds support for cacheNamespace in ProjectCache +* (**marketplacemetering**) Add support for Marketplace Metering Service dual-stack endpoints. +* (**networkmanager**) Add support for NetworkManager Dualstack endpoints. +* (**paymentcryptography**) The service adds support for transferring AES-256 and other keys between the service and other service providers and HSMs. This feature uses ECDH to derive a one-time key transport key to enable these secure key exchanges. +* (**quicksight**) RLS permission dataset with userAs: RLS_RULES flag, Q in QuickSight/Threshold Alerts/Schedules/Snapshots in QS embedding, toggle dataset refresh email alerts via API, transposed table with options: column width, type and index, toggle Q&A on dashboards, Oracle Service Name when creating data source. +* (**sagemaker**) TransformAmiVersion for Batch Transform and SageMaker Search Service Aggregate Search API Extension + +### Documentation +* (**ecs**) This is an Amazon ECS documentation only release that addresses tickets. + +## [1.4.50] - 03/27/2025 + +### Features +* (**batch**) This release will enable two features: Firelens log driver, and Execute Command on Batch jobs on ECS. Both features will be passed through to ECS. +* (**bcmpricingcalculator**) Added standaloneAccountRateTypeSelections for GetPreferences and UpdatePreferences APIs. Added STALE enum value to status attribute in GetBillScenario and UpdateBillScenario APIs. +* (**bedrockagentruntime**) bedrock flow now support node action trace. +* (**cloudformation**) Adding support for the new parameter "ScanFilters" in the CloudFormation StartResourceScan API. When this parameter is included, the StartResourceScan API will initiate a scan limited to the resource types specified by the parameter. +* (**datazone**) This release adds new action type of Create Listing Changeset for the Metadata Enforcement Rule feature. +* (**eks**) Added support for BOTTLEROCKET FIPS AMIs to AMI types in US regions. +* (**gamelift**) Amazon GameLift Servers add support for additional instance types. +* (**iam**) Update IAM dual-stack endpoints for BJS, IAD and PDT partitions +* (**sagemaker**) add: recovery mode for SageMaker Studio apps +* (**ssooidc**) This release adds AwsAdditionalDetails in the CreateTokenWithIAM API response. + +## [1.4.49] - 03/26/2025 + +### Features +* (**arczonalshift**) Add new shiftType field for ARC zonal shifts. +* (**directconnect**) With this release, AWS Direct Connect allows you to tag your Direct Connect gateways. Tags are metadata that you can create and use to manage your Direct Connect gateways. For more information about tagging, see AWS Tagging Strategies. +* (**mediaconvert**) This release adds a configurable Quality Level setting for the top rendition of Auto ABR jobs +* (**mediatailor**) Add support for log filtering which allow customers to filter out selected event types from logs. +* (**polly**) Added support for the new voice - Jihye (ko-KR). Jihye is available as a Neural voice only. +* (**wafv2**) This release adds the ability to associate an AWS WAF v2 web ACL with an AWS Amplify App. + +### Documentation +* (**rds**) Add note about the Availability Zone where RDS restores the DB cluster for the RestoreDBClusterToPointInTime operation. + +## [1.4.48] - 03/25/2025 + +### Features +* (**bedrockagent**) Adding support for Amazon OpenSearch Managed clusters as a vector database in Knowledge Bases for Amazon Bedrock +* (**eks**) Added support to override upgrade-blocking readiness checks via force flag when updating a cluster. +* (**gameliftstreams**) Minor updates to improve developer experience. +* (**keyspaces**) Removing replication region limitation for Amazon Keyspaces Multi-Region Replication APIs. +* (**marketplaceentitlementservice**) This release enhances the GetEntitlements API to support new filter CUSTOMER_AWS_ACCOUNT_ID in request and CustomerAWSAccountId field in response. +* (**marketplacemetering**) This release enhances the BatchMeterUsage API to support new field CustomerAWSAccountId in request and response and making CustomerIdentifier optional. CustomerAWSAccountId or CustomerIdentifier must be provided in request but not both. +* (**sagemaker**) This release adds support for customer-managed KMS keys in Amazon SageMaker Partner AI Apps +* (**workspacesthinclient**) Deprecate tags field in Get API responses + +## [1.4.47] - 03/24/2025 + +### Features +* (**iotwireless**) Mark EutranCid under LteNmr optional. +* (**pcs**) ClusterName/ClusterIdentifier, ComputeNodeGroupName/ComputeNodeGroupIdentifier, and QueueName/QueueIdentifier can now have 10 characters, and a minimum of 3 characters. The TagResource API action can now return ServiceQuotaExceededException. +* (**ssm**) This release adds the AvailableSecurityUpdatesComplianceStatus field to patch baseline operations, as well as the AvailableSecurityUpdateCount and InstancesWithAvailableSecurityUpdates to patch state operations. Applies to Windows Server managed nodes only. + +### Documentation +* (**qconnect**) Provides the correct value for supported model ID. + +## [1.4.46] - 03/21/2025 + +### Features +* (**bedrock**) A CustomModelUnit(CMU) is an abstract view of the hardware utilization that Bedrock needs to host a a single copy of your custom imported model. Bedrock determines the number of CMUs that a model copy needs when you import the custom model. You can use CMUs to estimate the cost of Inference's. +* (**datazone**) Add support for overriding selection of default AWS IAM Identity Center instance as part of Amazon DataZone domain APIs. +* (**route53recoverycontrolconfig**) Adds dual-stack (IPv4 and IPv6) endpoint support for route53-recovery-control-config operations, opt-in dual-stack addresses for cluster endpoints, and UpdateCluster API to update the network-type of clusters between IPv4 and dual-stack. +* (**sagemaker**) This release does the following: 1.) Adds DurationHours as a required field to the SearchTrainingPlanOfferings action in the SageMaker AI API; 2.) Adds support for G6e instance types for SageMaker AI inference optimization jobs. + ## [1.4.45] - 03/20/2025 ### Features diff --git a/codegen/aws-sdk-codegen/src/main/kotlin/aws/sdk/kotlin/codegen/ModuleDocumentationIntegration.kt b/codegen/aws-sdk-codegen/src/main/kotlin/aws/sdk/kotlin/codegen/ModuleDocumentationIntegration.kt index 268da668c20..ec92f4b1621 100644 --- a/codegen/aws-sdk-codegen/src/main/kotlin/aws/sdk/kotlin/codegen/ModuleDocumentationIntegration.kt +++ b/codegen/aws-sdk-codegen/src/main/kotlin/aws/sdk/kotlin/codegen/ModuleDocumentationIntegration.kt @@ -112,8 +112,7 @@ class ModuleDocumentationIntegration( ?.value appendLine("## Code Examples") - append("To see full code examples, see the ${title ?: sdkId} examples in the AWS code example library. ") - appendLine("See $codeExampleLink") + appendLine("Explore code examples for ${title ?: sdkId} in the AWS code example library.") appendLine() } } diff --git a/codegen/aws-sdk-codegen/src/main/resources/aws/sdk/kotlin/codegen/endpoints.json b/codegen/aws-sdk-codegen/src/main/resources/aws/sdk/kotlin/codegen/endpoints.json index afc1fc0a6f7..b7170b04caf 100644 --- a/codegen/aws-sdk-codegen/src/main/resources/aws/sdk/kotlin/codegen/endpoints.json +++ b/codegen/aws-sdk-codegen/src/main/resources/aws/sdk/kotlin/codegen/endpoints.json @@ -1625,6 +1625,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "api-fips.sagemaker.ca-central-1.amazonaws.com", @@ -1662,6 +1663,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -2219,178 +2221,256 @@ "applicationinsights" : { "endpoints" : { "af-south-1" : { - "credentialScope" : { - "region" : "af-south-1" - }, - "hostname" : "applicationinsights.af-south-1.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-east-1" : { - "credentialScope" : { - "region" : "ap-east-1" - }, - "hostname" : "applicationinsights.ap-east-1.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-1" : { - "credentialScope" : { - "region" : "ap-northeast-1" - }, - "hostname" : "applicationinsights.ap-northeast-1.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-2" : { - "credentialScope" : { - "region" : "ap-northeast-2" - }, - "hostname" : "applicationinsights.ap-northeast-2.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-3" : { - "credentialScope" : { - "region" : "ap-northeast-3" - }, - "hostname" : "applicationinsights.ap-northeast-3.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-south-1" : { - "credentialScope" : { - "region" : "ap-south-1" - }, - "hostname" : "applicationinsights.ap-south-1.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-south-2" : { - "credentialScope" : { - "region" : "ap-south-2" - }, - "hostname" : "applicationinsights.ap-south-2.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-1" : { - "credentialScope" : { - "region" : "ap-southeast-1" - }, - "hostname" : "applicationinsights.ap-southeast-1.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-2" : { - "credentialScope" : { - "region" : "ap-southeast-2" - }, - "hostname" : "applicationinsights.ap-southeast-2.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-3" : { - "credentialScope" : { - "region" : "ap-southeast-3" - }, - "hostname" : "applicationinsights.ap-southeast-3.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-4" : { - "credentialScope" : { - "region" : "ap-southeast-4" - }, - "hostname" : "applicationinsights.ap-southeast-4.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ca-central-1" : { - "credentialScope" : { - "region" : "ca-central-1" - }, - "hostname" : "applicationinsights.ca-central-1.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "applicationinsights-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "applicationinsights.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ca-west-1" : { - "credentialScope" : { - "region" : "ca-west-1" - }, - "hostname" : "applicationinsights.ca-west-1.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "applicationinsights-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "applicationinsights.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-central-1" : { - "credentialScope" : { - "region" : "eu-central-1" - }, - "hostname" : "applicationinsights.eu-central-1.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-central-2" : { - "credentialScope" : { - "region" : "eu-central-2" - }, - "hostname" : "applicationinsights.eu-central-2.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-north-1" : { - "credentialScope" : { - "region" : "eu-north-1" - }, - "hostname" : "applicationinsights.eu-north-1.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-south-1" : { - "credentialScope" : { - "region" : "eu-south-1" - }, - "hostname" : "applicationinsights.eu-south-1.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-south-2" : { - "credentialScope" : { - "region" : "eu-south-2" - }, - "hostname" : "applicationinsights.eu-south-2.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-west-1" : { - "credentialScope" : { - "region" : "eu-west-1" - }, - "hostname" : "applicationinsights.eu-west-1.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-west-2" : { - "credentialScope" : { - "region" : "eu-west-2" - }, - "hostname" : "applicationinsights.eu-west-2.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-west-3" : { + "variants" : [ { + "hostname" : "applicationinsights.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "fips-ca-central-1" : { "credentialScope" : { - "region" : "eu-west-3" + "region" : "ca-central-1" }, - "hostname" : "applicationinsights.eu-west-3.amazonaws.com" + "deprecated" : true, + "hostname" : "applicationinsights-fips.ca-central-1.amazonaws.com" }, - "il-central-1" : { + "fips-ca-west-1" : { "credentialScope" : { - "region" : "il-central-1" + "region" : "ca-west-1" }, - "hostname" : "applicationinsights.il-central-1.amazonaws.com" + "deprecated" : true, + "hostname" : "applicationinsights-fips.ca-west-1.amazonaws.com" }, - "me-central-1" : { + "fips-us-east-1" : { "credentialScope" : { - "region" : "me-central-1" + "region" : "us-east-1" }, - "hostname" : "applicationinsights.me-central-1.amazonaws.com" + "deprecated" : true, + "hostname" : "applicationinsights-fips.us-east-1.amazonaws.com" }, - "me-south-1" : { + "fips-us-east-2" : { "credentialScope" : { - "region" : "me-south-1" + "region" : "us-east-2" }, - "hostname" : "applicationinsights.me-south-1.amazonaws.com" + "deprecated" : true, + "hostname" : "applicationinsights-fips.us-east-2.amazonaws.com" }, - "sa-east-1" : { + "fips-us-west-1" : { "credentialScope" : { - "region" : "sa-east-1" + "region" : "us-west-1" }, - "hostname" : "applicationinsights.sa-east-1.amazonaws.com" + "deprecated" : true, + "hostname" : "applicationinsights-fips.us-west-1.amazonaws.com" }, - "us-east-1" : { + "fips-us-west-2" : { "credentialScope" : { - "region" : "us-east-1" + "region" : "us-west-2" }, - "hostname" : "applicationinsights.us-east-1.amazonaws.com" + "deprecated" : true, + "hostname" : "applicationinsights-fips.us-west-2.amazonaws.com" + }, + "il-central-1" : { + "variants" : [ { + "hostname" : "applicationinsights.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "applicationinsights.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "applicationinsights.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "applicationinsights.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "applicationinsights-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "applicationinsights-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "applicationinsights.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "us-east-2" : { - "credentialScope" : { - "region" : "us-east-2" - }, - "hostname" : "applicationinsights.us-east-2.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "applicationinsights-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "applicationinsights.us-east-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "us-west-1" : { - "credentialScope" : { - "region" : "us-west-1" - }, - "hostname" : "applicationinsights.us-west-1.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "applicationinsights-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "applicationinsights.us-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "us-west-2" : { - "credentialScope" : { - "region" : "us-west-2" - }, - "hostname" : "applicationinsights.us-west-2.amazonaws.com" + "variants" : [ { + "hostname" : "applicationinsights-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "applicationinsights-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "applicationinsights.us-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] } } }, @@ -2720,34 +2800,174 @@ }, "appsync" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "af-south-1" : { + "variants" : [ { + "hostname" : "appsync.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "appsync.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "appsync.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "appsync.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "appsync.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "appsync.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "appsync.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "appsync.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "appsync.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "appsync.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "appsync.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "appsync.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "appsync.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "appsync.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "appsync.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "appsync.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "appsync.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "appsync.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "appsync.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "appsync.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "il-central-1" : { + "variants" : [ { + "hostname" : "appsync.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "appsync.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "appsync.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "appsync.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "appsync.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "appsync.us-east-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "appsync.us-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "appsync.us-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + } } }, "apptest" : { @@ -2828,17 +3048,38 @@ "us-east-1" : { "variants" : [ { "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { + "tags" : [ "fips" ] } ] }, + "us-east-1-fips" : { + "deprecated" : true + }, "us-east-2" : { "variants" : [ { "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { + "tags" : [ "fips" ] } ] }, + "us-east-2-fips" : { + "deprecated" : true + }, "us-west-2" : { "variants" : [ { "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { + "tags" : [ "fips" ] } ] + }, + "us-west-2-fips" : { + "deprecated" : true } } }, @@ -4047,6 +4288,9 @@ "variants" : [ { "hostname" : "cleanrooms-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cleanrooms-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "cleanrooms.us-east-1.api.aws", "tags" : [ "dualstack" ] @@ -4056,6 +4300,9 @@ "variants" : [ { "hostname" : "cleanrooms-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cleanrooms-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "cleanrooms.us-east-2.api.aws", "tags" : [ "dualstack" ] @@ -4065,6 +4312,9 @@ "variants" : [ { "hostname" : "cleanrooms-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cleanrooms-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "cleanrooms.us-west-2.api.aws", "tags" : [ "dualstack" ] @@ -5858,20 +6108,66 @@ "protocols" : [ "https" ] }, "endpoints" : { - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "comprehend.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "comprehend.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "comprehend.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "comprehend.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "comprehend.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "comprehend-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "comprehend-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "comprehend.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "comprehend.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "comprehend.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "comprehend.eu-west-2.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -5904,18 +6200,36 @@ "variants" : [ { "hostname" : "comprehend-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "comprehend-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "comprehend.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "comprehend-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "comprehend-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "comprehend.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "comprehend-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "comprehend-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "comprehend.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -7931,6 +8245,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "ds-fips.ca-central-1.amazonaws.com", @@ -7996,6 +8311,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -9859,6 +10175,7 @@ "tags" : [ "fips" ] } ] }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-south-1" : { }, @@ -9937,7 +10254,12 @@ } }, "endpoints" : { - "us-east-1" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "entitlement-marketplace.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + } } }, "es" : { @@ -10185,29 +10507,144 @@ }, "events" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ap-southeast-5" : { }, - "ap-southeast-7" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "events.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "events.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "events.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "events.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "events.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "events.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "events.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "events.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "events.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "events.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "events.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "events.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "events.ap-southeast-7.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "events.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "events.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "events.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "events.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "events.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "events.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "events.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "events.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "events.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "events.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -10236,33 +10673,82 @@ "deprecated" : true, "hostname" : "events-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "mx-central-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "events.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "events.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "events.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "events.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "events.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "events-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "events-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "events.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "events-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "events-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "events.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "events-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "events-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "events.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "events-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "events-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "events.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -11912,6 +12398,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { "variants" : [ { @@ -12049,6 +12536,9 @@ "variants" : [ { "hostname" : "iam-fips.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "iam.global.api.aws", + "tags" : [ "dualstack" ] } ] }, "aws-global-fips" : { @@ -13682,6 +14172,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "kinesisanalytics-fips.ca-central-1.amazonaws.com", @@ -13747,6 +14238,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -14295,6 +14787,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "lakeformation.ap-southeast-7.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "lakeformation.ca-central-1.api.aws", @@ -14401,6 +14899,12 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "lakeformation.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "lakeformation.sa-east-1.api.aws", @@ -14707,6 +15211,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -14748,6 +15253,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -14873,6 +15379,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -14914,6 +15421,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -15873,18 +16381,78 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-4" : { }, - "ca-central-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "mediapackagev2-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "mediapackagev2-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "mediapackagev2-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "mediapackagev2-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "mediapackagev2-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "mediapackagev2-fips.us-west-2.amazonaws.com" + }, "me-central-1" : { }, "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "mediapackagev2-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "mediapackagev2-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "mediapackagev2-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "mediapackagev2-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "mediastore" : { @@ -16006,34 +16574,174 @@ } }, "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "af-south-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "metering-marketplace.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "metering-marketplace.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "metering-marketplace.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "metering-marketplace.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "metering-marketplace.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "metering-marketplace.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "metering-marketplace.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "metering-marketplace.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "metering-marketplace.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "metering-marketplace.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "il-central-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "metering-marketplace.us-east-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.us-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "metering-marketplace.us-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + } } }, "metrics.sagemaker" : { @@ -16423,6 +17131,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -16464,6 +17173,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -16626,6 +17336,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "network-firewall-fips.ca-central-1.amazonaws.com", @@ -16679,6 +17390,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -16716,6 +17428,12 @@ "variants" : [ { "hostname" : "networkmanager-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "networkmanager-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "networkmanager.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "fips-aws-global" : { @@ -16953,6 +17671,12 @@ }, "hostname" : "oidc.ap-southeast-4.amazonaws.com" }, + "ap-southeast-5" : { + "credentialScope" : { + "region" : "ap-southeast-5" + }, + "hostname" : "oidc.ap-southeast-5.amazonaws.com" + }, "ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -17180,6 +17904,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "sa-east-1" : { }, @@ -17802,6 +18527,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "polly.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "polly-fips.ca-central-1.amazonaws.com", @@ -18015,6 +18746,12 @@ }, "hostname" : "portal.sso.ap-southeast-4.amazonaws.com" }, + "ap-southeast-5" : { + "credentialScope" : { + "region" : "ap-southeast-5" + }, + "hostname" : "portal.sso.ap-southeast-5.amazonaws.com" + }, "ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -19373,15 +20110,46 @@ }, "rekognition" : { "endpoints" : { - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "rekognition.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "rekognition.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "rekognition.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "rekognition.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "rekognition.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "rekognition-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rekognition-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rekognition.ca-central-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ca-central-1-fips" : { @@ -19391,11 +20159,36 @@ "deprecated" : true, "hostname" : "rekognition-fips.ca-central-1.amazonaws.com" }, - "eu-central-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "il-central-1" : { }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "rekognition.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "rekognition.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "rekognition.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "rekognition.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "il-central-1" : { + "variants" : [ { + "hostname" : "rekognition.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "rekognition-fips.ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -19485,6 +20278,12 @@ "variants" : [ { "hostname" : "rekognition-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rekognition-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rekognition.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-1-fips" : { @@ -19498,6 +20297,12 @@ "variants" : [ { "hostname" : "rekognition-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rekognition-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rekognition.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2-fips" : { @@ -19511,6 +20316,12 @@ "variants" : [ { "hostname" : "rekognition-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rekognition-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rekognition.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1-fips" : { @@ -19524,6 +20335,12 @@ "variants" : [ { "hostname" : "rekognition-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rekognition-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rekognition.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2-fips" : { @@ -19672,6 +20489,8 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "resource-explorer-2-fips.ca-central-1.amazonaws.com", @@ -19743,6 +20562,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -20069,12 +20889,18 @@ }, "ca-central-1" : { "variants" : [ { + "hostname" : "route53profiles-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "route53profiles.ca-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "variants" : [ { + "hostname" : "route53profiles-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "route53profiles.ca-west-1.api.aws", "tags" : [ "dualstack" ] } ] @@ -20153,24 +20979,36 @@ }, "us-east-1" : { "variants" : [ { + "hostname" : "route53profiles-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "route53profiles.us-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { + "hostname" : "route53profiles-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "route53profiles.us-east-2.api.aws", "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { + "hostname" : "route53profiles-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "route53profiles.us-west-1.api.aws", "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { + "hostname" : "route53profiles-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "route53profiles.us-west-2.api.aws", "tags" : [ "dualstack" ] } ] @@ -20182,23 +21020,94 @@ "protocols" : [ "https" ] }, "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ap-southeast-5" : { }, - "ap-southeast-7" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "route53resolver.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "route53resolver.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "route53resolver.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "route53resolver.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "route53resolver.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "route53resolver.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "route53resolver.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "route53resolver.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "route53resolver.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "route53resolver.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "route53resolver.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "route53resolver.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "route53resolver.ap-southeast-7.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "route53resolver-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "route53resolver-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "route53resolver.ca-central-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ca-central-1-fips" : { @@ -20212,6 +21121,12 @@ "variants" : [ { "hostname" : "route53resolver-fips.ca-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "route53resolver-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "route53resolver.ca-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ca-west-1-fips" : { @@ -20221,23 +21136,94 @@ "deprecated" : true, "hostname" : "route53resolver-fips.ca-west-1.amazonaws.com" }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "mx-central-1" : { }, - "sa-east-1" : { }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "route53resolver.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "route53resolver.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "route53resolver.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "route53resolver.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "route53resolver.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "route53resolver.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "route53resolver.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "route53resolver.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "il-central-1" : { + "variants" : [ { + "hostname" : "route53resolver.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "route53resolver.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "route53resolver.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "route53resolver.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "route53resolver.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "route53resolver-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "route53resolver-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "route53resolver.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-1-fips" : { @@ -20251,6 +21237,12 @@ "variants" : [ { "hostname" : "route53resolver-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "route53resolver-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "route53resolver.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2-fips" : { @@ -20264,6 +21256,12 @@ "variants" : [ { "hostname" : "route53resolver-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "route53resolver-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "route53resolver.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1-fips" : { @@ -20277,6 +21275,12 @@ "variants" : [ { "hostname" : "route53resolver-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "route53resolver-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "route53resolver.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2-fips" : { @@ -21942,24 +22946,36 @@ "variants" : [ { "hostname" : "securitylake-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "securitylake-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "securitylake-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "securitylake-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "securitylake-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "securitylake-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "securitylake-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "securitylake-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] } ] } } @@ -22891,36 +23907,138 @@ }, "sms-voice" : { "endpoints" : { - "af-south-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "sms-voice.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "sms-voice.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "sms-voice.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "sms-voice.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "sms-voice.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "sms-voice.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "sms-voice.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "sms-voice.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "sms-voice.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "sms-voice.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "sms-voice-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "sms-voice-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "sms-voice.ca-central-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "variants" : [ { "hostname" : "sms-voice-fips.ca-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "sms-voice-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "sms-voice.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "sms-voice.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "sms-voice.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "sms-voice.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "sms-voice.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "sms-voice.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "sms-voice.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "sms-voice.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "sms-voice.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -22963,32 +24081,76 @@ "deprecated" : true, "hostname" : "sms-voice-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "sms-voice.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "sms-voice.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "sms-voice.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "sms-voice.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "sms-voice-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "sms-voice-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "sms-voice.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "sms-voice-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "sms-voice-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "sms-voice.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "sms-voice-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "sms-voice-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "sms-voice.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "sms-voice-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "sms-voice-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "sms-voice.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -23689,39 +24851,150 @@ "sslCommonName" : "{region}.queue.{dnsSuffix}" }, "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ap-southeast-5" : { }, - "ap-southeast-7" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "sqs.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "sqs.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "sqs.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "sqs.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "sqs.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "sqs.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "sqs.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "sqs.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "sqs.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "sqs.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "sqs.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "sqs.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "sqs.ap-southeast-7.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "sqs-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "sqs.ca-central-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "variants" : [ { "hostname" : "sqs-fips.ca-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "sqs.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "sqs.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "sqs.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "sqs.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "sqs.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "sqs.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "sqs.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "sqs.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "sqs.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -23764,34 +25037,71 @@ "deprecated" : true, "hostname" : "sqs-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "mx-central-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "sqs.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "sqs.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "sqs.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "sqs.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "sqs.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "sslCommonName" : "queue.{dnsSuffix}", "variants" : [ { "hostname" : "sqs-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "sqs.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "sqs-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "sqs.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "sqs-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "sqs.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "sqs-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "sqs.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -24467,6 +25777,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -25661,21 +26972,72 @@ }, "transcribestreaming" : { "endpoints" : { - "af-south-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "transcribestreaming.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "transcribestreaming.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "transcribestreaming.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "transcribestreaming.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "transcribestreaming.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "transcribestreaming.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "transcribestreaming-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "transcribestreaming-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "transcribestreaming.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "transcribestreaming.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "transcribestreaming.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "transcribestreaming.eu-west-2.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -25704,23 +27066,46 @@ "deprecated" : true, "hostname" : "transcribestreaming-fips.us-west-2.amazonaws.com" }, - "sa-east-1" : { }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "transcribestreaming.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "transcribestreaming-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "transcribestreaming-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "transcribestreaming.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "transcribestreaming-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "transcribestreaming-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "transcribestreaming.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "transcribestreaming-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "transcribestreaming-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "transcribestreaming.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -27767,16 +29152,16 @@ "applicationinsights" : { "endpoints" : { "cn-north-1" : { - "credentialScope" : { - "region" : "cn-north-1" - }, - "hostname" : "applicationinsights.cn-north-1.amazonaws.com.cn" + "variants" : [ { + "hostname" : "applicationinsights.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] }, "cn-northwest-1" : { - "credentialScope" : { - "region" : "cn-northwest-1" - }, - "hostname" : "applicationinsights.cn-northwest-1.amazonaws.com.cn" + "variants" : [ { + "hostname" : "applicationinsights.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] } } }, @@ -27798,8 +29183,18 @@ }, "appsync" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "appsync.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "appsync.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "arc-zonal-shift" : { @@ -28265,7 +29660,11 @@ "region" : "cn-northwest-1" }, "hostname" : "entitlement-marketplace.cn-northwest-1.amazonaws.com.cn", - "protocols" : [ "https" ] + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "entitlement-marketplace.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] } } }, @@ -28287,8 +29686,18 @@ }, "events" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "events.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "events.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "firehose" : { @@ -28885,8 +30294,18 @@ "protocols" : [ "https" ] }, "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "route53resolver.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "route53resolver.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "runtime.sagemaker" : { @@ -29120,8 +30539,18 @@ "sslCommonName" : "{region}.queue.{dnsSuffix}" }, "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "sqs.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "sqs.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "ssm" : { @@ -29244,8 +30673,18 @@ }, "transcribestreaming" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "transcribestreaming.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "transcribestreaming.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "transfer" : { @@ -29835,17 +31274,43 @@ }, "applicationinsights" : { "endpoints" : { - "us-gov-east-1" : { + "fips-us-gov-east-1" : { "credentialScope" : { "region" : "us-gov-east-1" }, - "hostname" : "applicationinsights.us-gov-east-1.amazonaws.com" + "deprecated" : true, + "hostname" : "applicationinsights-fips.us-gov-east-1.amazonaws.com" }, - "us-gov-west-1" : { + "fips-us-gov-west-1" : { "credentialScope" : { "region" : "us-gov-west-1" }, - "hostname" : "applicationinsights.us-gov-west-1.amazonaws.com" + "deprecated" : true, + "hostname" : "applicationinsights-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "applicationinsights-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "applicationinsights-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "applicationinsights.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "applicationinsights-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "applicationinsights-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "applicationinsights.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] } } }, @@ -30490,6 +31955,12 @@ "variants" : [ { "hostname" : "comprehend-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "comprehend-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "comprehend.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -30823,8 +32294,14 @@ "endpoints" : { "us-gov-east-1" : { "variants" : [ { + "hostname" : "dlm-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "dlm.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-east-1-fips" : { @@ -30836,8 +32313,14 @@ }, "us-gov-west-1" : { "variants" : [ { + "hostname" : "dlm-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "dlm.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1-fips" : { @@ -32617,8 +34100,18 @@ } }, "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "metering-marketplace.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + } } }, "metrics.sagemaker" : { @@ -32810,6 +34303,12 @@ "variants" : [ { "hostname" : "networkmanager.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "networkmanager.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] + }, { + "hostname" : "networkmanager.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] } ] }, "fips-aws-us-gov-global" : { @@ -33275,6 +34774,12 @@ "variants" : [ { "hostname" : "rekognition-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rekognition-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rekognition.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1-fips" : { @@ -33426,12 +34931,18 @@ "endpoints" : { "us-gov-east-1" : { "variants" : [ { + "hostname" : "route53profiles-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "route53profiles.us-gov-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { + "hostname" : "route53profiles-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "route53profiles.us-gov-west-1.api.aws", "tags" : [ "dualstack" ] } ] @@ -33444,6 +34955,12 @@ "variants" : [ { "hostname" : "route53resolver.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "route53resolver.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] + }, { + "hostname" : "route53resolver.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] } ] }, "us-gov-east-1-fips" : { @@ -33454,6 +34971,12 @@ "variants" : [ { "hostname" : "route53resolver.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "route53resolver.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] + }, { + "hostname" : "route53resolver.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] } ] }, "us-gov-west-1-fips" : { @@ -33740,6 +35263,9 @@ "variants" : [ { "hostname" : "securitylake.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "securitylake.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] } ] }, "us-gov-east-1-fips" : { @@ -33753,6 +35279,9 @@ "variants" : [ { "hostname" : "securitylake.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "securitylake.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] } ] }, "us-gov-west-1-fips" : { @@ -34058,12 +35587,24 @@ "variants" : [ { "hostname" : "sms-voice-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "sms-voice-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "sms-voice.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { "hostname" : "sms-voice-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "sms-voice-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "sms-voice.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -34088,12 +35629,24 @@ "variants" : [ { "hostname" : "snowball-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { "hostname" : "snowball-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -34137,19 +35690,33 @@ } ] }, "endpoints" : { - "us-gov-east-1" : { + "fips-us-gov-east-1" : { "credentialScope" : { "region" : "us-gov-east-1" }, + "deprecated" : true, "hostname" : "sqs.us-gov-east-1.amazonaws.com" }, - "us-gov-west-1" : { + "fips-us-gov-west-1" : { "credentialScope" : { "region" : "us-gov-west-1" }, - "hostname" : "sqs.us-gov-west-1.amazonaws.com", + "deprecated" : true, + "hostname" : "sqs.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "sqs.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { "protocols" : [ "http", "https" ], - "sslCommonName" : "{region}.queue.{dnsSuffix}" + "sslCommonName" : "{region}.queue.{dnsSuffix}", + "variants" : [ { + "hostname" : "sqs.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] } } }, @@ -34570,12 +36137,24 @@ "variants" : [ { "hostname" : "transcribestreaming-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "transcribestreaming-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "transcribestreaming.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { "hostname" : "transcribestreaming-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "transcribestreaming-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "transcribestreaming.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -35372,10 +36951,33 @@ }, "glacier" : { "endpoints" : { + "fips-us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "hostname" : "glacier-fips.us-iso-east-1.c2s.ic.gov" + }, + "fips-us-iso-west-1" : { + "credentialScope" : { + "region" : "us-iso-west-1" + }, + "deprecated" : true, + "hostname" : "glacier-fips.us-iso-west-1.c2s.ic.gov" + }, "us-iso-east-1" : { - "protocols" : [ "http", "https" ] + "protocols" : [ "http", "https" ], + "variants" : [ { + "hostname" : "glacier-fips.us-iso-east-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] }, - "us-iso-west-1" : { } + "us-iso-west-1" : { + "variants" : [ { + "hostname" : "glacier-fips.us-iso-west-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + } } }, "glue" : { @@ -35505,7 +37107,8 @@ }, "oam" : { "endpoints" : { - "us-iso-east-1" : { } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "organizations" : { @@ -35813,10 +37416,33 @@ }, "sqs" : { "endpoints" : { + "fips-us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "hostname" : "sqs.us-iso-east-1.c2s.ic.gov" + }, + "fips-us-iso-west-1" : { + "credentialScope" : { + "region" : "us-iso-west-1" + }, + "deprecated" : true, + "hostname" : "sqs.us-iso-west-1.c2s.ic.gov" + }, "us-iso-east-1" : { - "protocols" : [ "http", "https" ] + "protocols" : [ "http", "https" ], + "variants" : [ { + "hostname" : "sqs.us-iso-east-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] }, - "us-iso-west-1" : { } + "us-iso-west-1" : { + "variants" : [ { + "hostname" : "sqs.us-iso-west-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + } } }, "ssm" : { @@ -36145,6 +37771,11 @@ } } }, + "codebuild" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "codedeploy" : { "endpoints" : { "us-isob-east-1" : { } @@ -36331,7 +37962,19 @@ }, "glacier" : { "endpoints" : { - "us-isob-east-1" : { } + "fips-us-isob-east-1" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "deprecated" : true, + "hostname" : "glacier-fips.us-isob-east-1.sc2s.sgov.gov" + }, + "us-isob-east-1" : { + "variants" : [ { + "hostname" : "glacier-fips.us-isob-east-1.sc2s.sgov.gov", + "tags" : [ "fips" ] + } ] + } } }, "health" : { @@ -36647,7 +38290,19 @@ "sslCommonName" : "{region}.queue.{dnsSuffix}" }, "endpoints" : { - "us-isob-east-1" : { } + "fips-us-isob-east-1" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "deprecated" : true, + "hostname" : "sqs.us-isob-east-1.sc2s.sgov.gov" + }, + "us-isob-east-1" : { + "variants" : [ { + "hostname" : "sqs.us-isob-east-1.sc2s.sgov.gov", + "tags" : [ "fips" ] + } ] + } } }, "ssm" : { @@ -36793,7 +38448,491 @@ "description" : "EU ISOE West" } }, - "services" : { } + "services" : { + "access-analyzer" : { + "endpoints" : { + "eu-isoe-west-1" : { + "variants" : [ { + "hostname" : "access-analyzer.eu-isoe-west-1.api.cloud-aws.adc-e.uk", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "acm" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "acm-pca" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "api.ecr" : { + "endpoints" : { + "eu-isoe-west-1" : { + "credentialScope" : { + "region" : "eu-isoe-west-1" + }, + "hostname" : "api.ecr.eu-isoe-west-1.cloud.adc-e.uk" + } + } + }, + "api.pricing" : { + "defaults" : { + "credentialScope" : { + "service" : "pricing" + } + }, + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "appconfig" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "appconfigdata" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "application-autoscaling" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "arc-zonal-shift" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "athena" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "autoscaling" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "batch" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "budgets" : { + "endpoints" : { + "aws-iso-e-global" : { + "credentialScope" : { + "region" : "eu-isoe-west-1" + }, + "hostname" : "budgets.global.cloud.adc-e.uk" + }, + "eu-isoe-west-1" : { + "credentialScope" : { + "region" : "eu-isoe-west-1" + }, + "hostname" : "budgets.global.cloud.adc-e.uk" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-e-global" + }, + "cloudcontrolapi" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "cloudformation" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "cloudtrail" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "codedeploy" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "compute-optimizer" : { + "endpoints" : { + "eu-isoe-west-1" : { + "credentialScope" : { + "region" : "eu-isoe-west-1" + }, + "hostname" : "compute-optimizer.eu-isoe-west-1.cloud.adc-e.uk" + } + } + }, + "config" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "cost-optimization-hub" : { + "endpoints" : { + "eu-isoe-west-1" : { + "credentialScope" : { + "region" : "eu-isoe-west-1" + }, + "hostname" : "cost-optimization-hub.eu-isoe-west-1.cloud.adc-e.uk" + } + } + }, + "directconnect" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "dlm" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "ds" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "dynamodb" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "ebs" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "ec2" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "ecs" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "eks" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "elasticache" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "elasticfilesystem" : { + "endpoints" : { + "eu-isoe-west-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.eu-isoe-west-1.cloud.adc-e.uk", + "tags" : [ "fips" ] + } ] + }, + "fips-eu-isoe-west-1" : { + "credentialScope" : { + "region" : "eu-isoe-west-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.eu-isoe-west-1.cloud.adc-e.uk" + } + } + }, + "elasticloadbalancing" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "elasticmapreduce" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "emr-serverless" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "es" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "events" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "firehose" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "glue" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "kms" : { + "endpoints" : { + "ProdFips" : { + "credentialScope" : { + "region" : "eu-isoe-west-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.eu-isoe-west-1.cloud.adc-e.uk" + }, + "eu-isoe-west-1" : { + "variants" : [ { + "hostname" : "kms-fips.eu-isoe-west-1.cloud.adc-e.uk", + "tags" : [ "fips" ] + } ] + }, + "eu-isoe-west-1-fips" : { + "credentialScope" : { + "region" : "eu-isoe-west-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.eu-isoe-west-1.cloud.adc-e.uk" + } + } + }, + "lakeformation" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "lambda" : { + "endpoints" : { + "eu-isoe-west-1" : { + "variants" : [ { + "hostname" : "lambda.eu-isoe-west-1.api.cloud-aws.adc-e.uk", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "logs" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "monitoring" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "oam" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "organizations" : { + "endpoints" : { + "aws-iso-e-global" : { + "credentialScope" : { + "region" : "eu-isoe-west-1" + }, + "hostname" : "organizations.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-e-global" + }, + "pi" : { + "endpoints" : { + "eu-isoe-west-1" : { + "protocols" : [ "https" ] + } + } + }, + "pipes" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "ram" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "rbin" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "rds" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "redshift" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "resource-groups" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "route53" : { + "endpoints" : { + "aws-iso-e-global" : { + "credentialScope" : { + "region" : "eu-isoe-west-1" + }, + "hostname" : "route53.cloud.adc-e.uk" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-e-global" + }, + "route53resolver" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "s3" : { + "defaults" : { + "protocols" : [ "http", "https" ], + "signatureVersions" : [ "s3v4" ] + }, + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "savingsplans" : { + "endpoints" : { + "aws-iso-e-global" : { + "credentialScope" : { + "region" : "eu-isoe-west-1" + }, + "hostname" : "savingsplans.cloud.adc-e.uk" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-e-global" + }, + "scheduler" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "servicecatalog" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "servicediscovery" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "servicequotas" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "sns" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "sqs" : { + "defaults" : { + "protocols" : [ "http", "https" ], + "sslCommonName" : "{region}.queue.{dnsSuffix}" + }, + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "ssm" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "states" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "streams.dynamodb" : { + "defaults" : { + "credentialScope" : { + "service" : "dynamodb" + }, + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "sts" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "swf" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "synthetics" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "tagging" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, + "xray" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + } + } }, { "defaults" : { "hostname" : "{service}.{region}.{dnsSuffix}", @@ -37426,6 +39565,12 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-iso-f-global" }, + "route53profiles" : { + "endpoints" : { + "us-isof-east-1" : { }, + "us-isof-south-1" : { } + } + }, "route53resolver" : { "endpoints" : { "us-isof-east-1" : { }, @@ -37468,16 +39613,8 @@ }, "secretsmanager" : { "endpoints" : { - "us-isof-east-1" : { - "variants" : [ { - "tags" : [ "dualstack" ] - } ] - }, - "us-isof-south-1" : { - "variants" : [ { - "tags" : [ "dualstack" ] - } ] - } + "us-isof-east-1" : { }, + "us-isof-south-1" : { } } }, "servicediscovery" : { @@ -37510,8 +39647,32 @@ "sslCommonName" : "{region}.queue.{dnsSuffix}" }, "endpoints" : { - "us-isof-east-1" : { }, - "us-isof-south-1" : { } + "fips-us-isof-east-1" : { + "credentialScope" : { + "region" : "us-isof-east-1" + }, + "deprecated" : true, + "hostname" : "sqs.us-isof-east-1.csp.hci.ic.gov" + }, + "fips-us-isof-south-1" : { + "credentialScope" : { + "region" : "us-isof-south-1" + }, + "deprecated" : true, + "hostname" : "sqs.us-isof-south-1.csp.hci.ic.gov" + }, + "us-isof-east-1" : { + "variants" : [ { + "hostname" : "sqs.us-isof-east-1.csp.hci.ic.gov", + "tags" : [ "fips" ] + } ] + }, + "us-isof-south-1" : { + "variants" : [ { + "hostname" : "sqs.us-isof-south-1.csp.hci.ic.gov", + "tags" : [ "fips" ] + } ] + } } }, "ssm" : { @@ -37599,6 +39760,27 @@ } } } + }, { + "defaults" : { + "hostname" : "{service}.{region}.{dnsSuffix}", + "protocols" : [ "https" ], + "signatureVersions" : [ "v4" ], + "variants" : [ { + "dnsSuffix" : "amazonaws.eu", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "dnsSuffix" : "amazonaws.eu", + "partition" : "aws-eusc", + "partitionName" : "AWS EUSC", + "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", + "regions" : { + "eusc-de-east-1" : { + "description" : "EU (Germany)" + } + }, + "services" : { } } ], "version" : 3 } \ No newline at end of file diff --git a/codegen/aws-sdk-codegen/src/main/resources/aws/sdk/kotlin/codegen/partitions.json b/codegen/aws-sdk-codegen/src/main/resources/aws/sdk/kotlin/codegen/partitions.json index e19224f1b86..a2bfa6ead49 100644 --- a/codegen/aws-sdk-codegen/src/main/resources/aws/sdk/kotlin/codegen/partitions.json +++ b/codegen/aws-sdk-codegen/src/main/resources/aws/sdk/kotlin/codegen/partitions.json @@ -208,6 +208,9 @@ }, "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", "regions" : { + "aws-iso-e-global" : { + "description" : "AWS ISOE (Europe) global region" + }, "eu-isoe-west-1" : { "description" : "EU ISOE West" } @@ -234,6 +237,22 @@ "description" : "US ISOF SOUTH" } } + }, { + "id" : "aws-eusc", + "outputs" : { + "dnsSuffix" : "amazonaws.eu", + "dualStackDnsSuffix" : "amazonaws.eu", + "implicitGlobalRegion" : "eusc-de-east-1", + "name" : "aws-eusc", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", + "regions" : { + "eusc-de-east-1" : { + "description" : "EU (Germany)" + } + } } ], "version" : "1.1" } \ No newline at end of file diff --git a/codegen/aws-sdk-codegen/src/test/kotlin/aws/sdk/kotlin/codegen/ModuleDocumentationIntegrationTest.kt b/codegen/aws-sdk-codegen/src/test/kotlin/aws/sdk/kotlin/codegen/ModuleDocumentationIntegrationTest.kt index d3911776bb7..b928f93b420 100644 --- a/codegen/aws-sdk-codegen/src/test/kotlin/aws/sdk/kotlin/codegen/ModuleDocumentationIntegrationTest.kt +++ b/codegen/aws-sdk-codegen/src/test/kotlin/aws/sdk/kotlin/codegen/ModuleDocumentationIntegrationTest.kt @@ -61,7 +61,7 @@ class ModuleDocumentationIntegrationTest { .shouldContainOnlyOnceWithDiff( """ ## Code Examples - To see full code examples, see the Test Service examples in the AWS code example library. See https://example.com + Explore code examples for Test Service in the AWS code example library """.trimIndent(), ) } diff --git a/codegen/sdk/aws-models/api-gateway.json b/codegen/sdk/aws-models/api-gateway.json index f744b3430f4..899f1988ccb 100644 --- a/codegen/sdk/aws-models/api-gateway.json +++ b/codegen/sdk/aws-models/api-gateway.json @@ -2688,7 +2688,7 @@ "endpointConfiguration": { "target": "com.amazonaws.apigateway#EndpointConfiguration", "traits": { - "smithy.api#documentation": "
The endpoint configuration of this DomainName showing the endpoint types of the domain name.
" + "smithy.api#documentation": "The endpoint configuration of this DomainName showing the endpoint types and IP address types of the domain name.
" } }, "tags": { @@ -3030,7 +3030,7 @@ "endpointConfiguration": { "target": "com.amazonaws.apigateway#EndpointConfiguration", "traits": { - "smithy.api#documentation": "The endpoint configuration of this RestApi showing the endpoint types of the API.
" + "smithy.api#documentation": "The endpoint configuration of this RestApi showing the endpoint types and IP address types of the API.
" } }, "policy": { @@ -5100,7 +5100,7 @@ "endpointConfiguration": { "target": "com.amazonaws.apigateway#EndpointConfiguration", "traits": { - "smithy.api#documentation": "The endpoint configuration of this DomainName showing the endpoint types of the domain name.
" + "smithy.api#documentation": "The endpoint configuration of this DomainName showing the endpoint types and IP address types of the domain name.
" } }, "domainNameStatus": { @@ -5285,6 +5285,12 @@ "smithy.api#documentation": "A list of endpoint types of an API (RestApi) or its custom domain name (DomainName). For an edge-optimized API and its custom domain name, the endpoint type is \"EDGE\". For a regional API and its custom domain name, the endpoint type is REGIONAL. For a private API, the endpoint type is PRIVATE.
The IP address types that can invoke an API (RestApi) or a DomainName. Use ipv4 to allow only IPv4 addresses to\n invoke an API or DomainName, or use dualstack to allow both IPv4 and IPv6 addresses to invoke an API or a DomainName. For the\n PRIVATE endpoint type, only dualstack is supported.
The endpoint configuration to indicate the types of endpoints an API (RestApi) or its custom domain name (DomainName) has.
" + "smithy.api#documentation": "The endpoint configuration to indicate the types of endpoints an API (RestApi) or its custom domain name (DomainName) has and the IP address types that can invoke it.
" } }, "com.amazonaws.apigateway#EndpointType": { @@ -9078,6 +9084,23 @@ "smithy.api#documentation": "The integration type. The valid value is HTTP for integrating an API method with an HTTP backend; AWS with any Amazon Web Services service endpoints; MOCK for testing without actually invoking the backend; HTTP_PROXY for integrating with the HTTP proxy integration; AWS_PROXY for integrating with the Lambda proxy integration.
The endpoint configuration of this RestApi showing the endpoint types of the API.
" + "smithy.api#documentation": "The endpoint configuration of this RestApi showing the endpoint types and IP address types of the API.
" } }, "policy": { diff --git a/codegen/sdk/aws-models/apigatewayv2.json b/codegen/sdk/aws-models/apigatewayv2.json index 6af15a673fc..fbcbe821b33 100644 --- a/codegen/sdk/aws-models/apigatewayv2.json +++ b/codegen/sdk/aws-models/apigatewayv2.json @@ -139,6 +139,13 @@ "smithy.api#jsonName": "importInfo" } }, + "IpAddressType": { + "target": "com.amazonaws.apigatewayv2#IpAddressType", + "traits": { + "smithy.api#documentation": "The IP address types that can invoke the API.
", + "smithy.api#jsonName": "ipAddressType" + } + }, "Name": { "target": "com.amazonaws.apigatewayv2#StringWithLengthBetween1And128", "traits": { @@ -1876,6 +1883,13 @@ "smithy.api#jsonName": "disableExecuteApiEndpoint" } }, + "IpAddressType": { + "target": "com.amazonaws.apigatewayv2#IpAddressType", + "traits": { + "smithy.api#documentation": "The IP address types that can invoke the API.
", + "smithy.api#jsonName": "ipAddressType" + } + }, "Name": { "target": "com.amazonaws.apigatewayv2#StringWithLengthBetween1And128", "traits": { @@ -2008,6 +2022,13 @@ "smithy.api#jsonName": "importInfo" } }, + "IpAddressType": { + "target": "com.amazonaws.apigatewayv2#IpAddressType", + "traits": { + "smithy.api#documentation": "The IP address types that can invoke the API.
", + "smithy.api#jsonName": "ipAddressType" + } + }, "Name": { "target": "com.amazonaws.apigatewayv2#StringWithLengthBetween1And128", "traits": { @@ -4767,6 +4788,13 @@ "smithy.api#jsonName": "hostedZoneId" } }, + "IpAddressType": { + "target": "com.amazonaws.apigatewayv2#IpAddressType", + "traits": { + "smithy.api#documentation": "The IP address types that can invoke the domain name. Use ipv4 to allow only IPv4 addresses to invoke your domain name, or use dualstack to allow both IPv4 and IPv6 addresses to invoke your domain name.
", + "smithy.api#jsonName": "ipAddressType" + } + }, "SecurityPolicy": { "target": "com.amazonaws.apigatewayv2#SecurityPolicy", "traits": { @@ -5230,6 +5258,13 @@ "smithy.api#jsonName": "importInfo" } }, + "IpAddressType": { + "target": "com.amazonaws.apigatewayv2#IpAddressType", + "traits": { + "smithy.api#documentation": "The IP address types that can invoke the API.
", + "smithy.api#jsonName": "ipAddressType" + } + }, "Name": { "target": "com.amazonaws.apigatewayv2#StringWithLengthBetween1And128", "traits": { @@ -7650,6 +7685,13 @@ "smithy.api#jsonName": "importInfo" } }, + "IpAddressType": { + "target": "com.amazonaws.apigatewayv2#IpAddressType", + "traits": { + "smithy.api#documentation": "The IP address types that can invoke the API.
", + "smithy.api#jsonName": "ipAddressType" + } + }, "Name": { "target": "com.amazonaws.apigatewayv2#StringWithLengthBetween1And128", "traits": { @@ -7977,6 +8019,26 @@ "smithy.api#documentation": "Represents an API method integration type.
" } }, + "com.amazonaws.apigatewayv2#IpAddressType": { + "type": "enum", + "members": { + "ipv4": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ipv4" + } + }, + "dualstack": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "dualstack" + } + } + }, + "traits": { + "smithy.api#documentation": "The IP address types that can invoke your API or domain name.
" + } + }, "com.amazonaws.apigatewayv2#JWTConfiguration": { "type": "structure", "members": { @@ -8350,6 +8412,13 @@ "smithy.api#jsonName": "importInfo" } }, + "IpAddressType": { + "target": "com.amazonaws.apigatewayv2#IpAddressType", + "traits": { + "smithy.api#documentation": "The IP address types that can invoke the API.
", + "smithy.api#jsonName": "ipAddressType" + } + }, "Name": { "target": "com.amazonaws.apigatewayv2#StringWithLengthBetween1And128", "traits": { @@ -9302,6 +9371,13 @@ "smithy.api#jsonName": "disableExecuteApiEndpoint" } }, + "IpAddressType": { + "target": "com.amazonaws.apigatewayv2#IpAddressType", + "traits": { + "smithy.api#documentation": "The IP address types that can invoke your API or domain name.
", + "smithy.api#jsonName": "ipAddressType" + } + }, "Name": { "target": "com.amazonaws.apigatewayv2#StringWithLengthBetween1And128", "traits": { @@ -9416,6 +9492,13 @@ "smithy.api#jsonName": "importInfo" } }, + "IpAddressType": { + "target": "com.amazonaws.apigatewayv2#IpAddressType", + "traits": { + "smithy.api#documentation": "The IP address types that can invoke the API.
", + "smithy.api#jsonName": "ipAddressType" + } + }, "Name": { "target": "com.amazonaws.apigatewayv2#StringWithLengthBetween1And128", "traits": { diff --git a/codegen/sdk/aws-models/application-auto-scaling.json b/codegen/sdk/aws-models/application-auto-scaling.json index 152d7e04d7d..3441b2e3ae3 100644 --- a/codegen/sdk/aws-models/application-auto-scaling.json +++ b/codegen/sdk/aws-models/application-auto-scaling.json @@ -150,7 +150,7 @@ "name": "application-autoscaling" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "With Application Auto Scaling, you can configure automatic scaling for the following\n resources:
\nAmazon AppStream 2.0 fleets
\nAmazon Aurora Replicas
\nAmazon Comprehend document classification and entity recognizer endpoints
\nAmazon DynamoDB tables and global secondary indexes throughput capacity
\nAmazon ECS services
\nAmazon ElastiCache for Redis clusters (replication groups)
\nAmazon EMR clusters
\nAmazon Keyspaces (for Apache Cassandra) tables
\nLambda function provisioned concurrency
\nAmazon Managed Streaming for Apache Kafka broker storage
\nAmazon Neptune clusters
\nAmazon SageMaker endpoint variants
\nAmazon SageMaker inference components
\nAmazon SageMaker serverless endpoint provisioned concurrency
\nSpot Fleets (Amazon EC2)
\nPool of WorkSpaces
\nCustom resources provided by your own applications or services
\nTo learn more about Application Auto Scaling, see the Application Auto Scaling User\n Guide.
\n\n API Summary\n
\nThe Application Auto Scaling service API includes three key sets of actions:
\nRegister and manage scalable targets - Register Amazon Web Services or custom resources as scalable\n targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and\n retrieve information on existing scalable targets.
\nConfigure and manage automatic scaling - Define scaling policies to dynamically scale\n your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions,\n and retrieve your recent scaling activity history.
\nSuspend and resume scaling - Temporarily suspend and later resume automatic scaling by\n calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can\n suspend and resume (individually or in combination) scale-out activities that are\n triggered by a scaling policy, scale-in activities that are triggered by a scaling policy,\n and scheduled scaling.
\nWith Application Auto Scaling, you can configure automatic scaling for the following\n resources:
\nAmazon AppStream 2.0 fleets
\nAmazon Aurora Replicas
\nAmazon Comprehend document classification and entity recognizer endpoints
\nAmazon DynamoDB tables and global secondary indexes throughput capacity
\nAmazon ECS services
\nAmazon ElastiCache replication groups (Redis OSS and Valkey) and Memcached clusters
\nAmazon EMR clusters
\nAmazon Keyspaces (for Apache Cassandra) tables
\nLambda function provisioned concurrency
\nAmazon Managed Streaming for Apache Kafka broker storage
\nAmazon Neptune clusters
\nAmazon SageMaker endpoint variants
\nAmazon SageMaker inference components
\nAmazon SageMaker serverless endpoint provisioned concurrency
\nSpot Fleets (Amazon EC2)
\nPool of WorkSpaces
\nCustom resources provided by your own applications or services
\nTo learn more about Application Auto Scaling, see the Application Auto Scaling User\n Guide.
\n\n API Summary\n
\nThe Application Auto Scaling service API includes three key sets of actions:
\nRegister and manage scalable targets - Register Amazon Web Services or custom resources as scalable\n targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and\n retrieve information on existing scalable targets.
\nConfigure and manage automatic scaling - Define scaling policies to dynamically scale\n your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions,\n and retrieve your recent scaling activity history.
\nSuspend and resume scaling - Temporarily suspend and later resume automatic scaling by\n calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can\n suspend and resume (individually or in combination) scale-out activities that are\n triggered by a scaling policy, scale-in activities that are triggered by a scaling policy,\n and scheduled scaling.
\nThe identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Amazon ElastiCache cache cluster - The resource type is cache-cluster and the unique identifier is the cache cluster name.\n Example: cache-cluster/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:cache-cluster:Nodes - The number of nodes for an Amazon ElastiCache cache cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The identifier of the resource associated with the scheduled action.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The identifier of the resource associated with the scheduled action.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Amazon ElastiCache cache cluster - The resource type is cache-cluster and the unique identifier is the cache cluster name.\n Example: cache-cluster/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:cache-cluster:Nodes - The number of nodes for an Amazon ElastiCache cache cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Amazon ElastiCache cache cluster - The resource type is cache-cluster and the unique identifier is the cache cluster name.\n Example: cache-cluster/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:cache-cluster:Nodes - The number of nodes for an Amazon ElastiCache cache cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Amazon ElastiCache cache cluster - The resource type is cache-cluster and the unique identifier is the cache cluster name.\n Example: cache-cluster/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:cache-cluster:Nodes - The number of nodes for an Amazon ElastiCache cache cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The identifier of the resource associated with the scaling activity.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The identifier of the resource associated with the scaling activity.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Amazon ElastiCache cache cluster - The resource type is cache-cluster and the unique identifier is the cache cluster name.\n Example: cache-cluster/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:cache-cluster:Nodes - The number of nodes for an Amazon ElastiCache cache cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Amazon ElastiCache cache cluster - The resource type is cache-cluster and the unique identifier is the cache cluster name.\n Example: cache-cluster/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:cache-cluster:Nodes - The number of nodes for an Amazon ElastiCache cache cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The identifier of the resource associated with the scheduled action.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The identifier of the resource associated with the scheduled action.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Amazon ElastiCache cache cluster - The resource type is cache-cluster and the unique identifier is the cache cluster name.\n Example: cache-cluster/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n If you specify a scalable dimension, you must also specify a resource ID.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:cache-cluster:Nodes - The number of nodes for an Amazon ElastiCache cache cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
Creates or updates a scaling policy for an Application Auto Scaling scalable target.
\nEach scalable target is identified by a service namespace, resource ID, and scalable\n dimension. A scaling policy applies to the scalable target identified by those three\n attributes. You cannot create a scaling policy until you have registered the resource as a\n scalable target.
\nMultiple scaling policies can be in force at the same time for the same scalable target.\n You can have one or more target tracking scaling policies, one or more step scaling\n policies, or both. However, there is a chance that multiple policies could conflict,\n instructing the scalable target to scale out or in at the same time. Application Auto Scaling gives\n precedence to the policy that provides the largest capacity for both scale out and scale\n in. For example, if one policy increases capacity by 3, another policy increases capacity\n by 200 percent, and the current capacity is 10, Application Auto Scaling uses the policy with the highest\n calculated capacity (200% of 10 = 20) and scales out to 30.
\nWe recommend caution, however, when using target tracking scaling policies with step\n scaling policies because conflicts between these policies can cause undesirable behavior.\n For example, if the step scaling policy initiates a scale-in activity before the target\n tracking policy is ready to scale in, the scale-in activity will not be blocked. After the\n scale-in activity completes, the target tracking policy could instruct the scalable target\n to scale out again.
\nFor more information, see Target tracking scaling policies and Step scaling policies in the Application Auto Scaling User Guide.
\nIf a scalable target is deregistered, the scalable target is no longer available to\n use scaling policies. Any scaling policies that were specified for the scalable target\n are deleted.
\nCreates or updates a scaling policy for an Application Auto Scaling scalable target.
\nEach scalable target is identified by a service namespace, resource ID, and scalable\n dimension. A scaling policy applies to the scalable target identified by those three\n attributes. You cannot create a scaling policy until you have registered the resource as a\n scalable target.
\nMultiple scaling policies can be in force at the same time for the same scalable target.\n You can have one or more target tracking scaling policies, one or more step scaling\n policies, or both. However, there is a chance that multiple policies could conflict,\n instructing the scalable target to scale out or in at the same time. Application Auto Scaling gives\n precedence to the policy that provides the largest capacity for both scale out and scale\n in. For example, if one policy increases capacity by 3, another policy increases capacity\n by 200 percent, and the current capacity is 10, Application Auto Scaling uses the policy with the highest\n calculated capacity (200% of 10 = 20) and scales out to 30.
\nWe recommend caution, however, when using target tracking scaling policies with step\n scaling policies because conflicts between these policies can cause undesirable behavior.\n For example, if the step scaling policy initiates a scale-in activity before the target\n tracking policy is ready to scale in, the scale-in activity will not be blocked. After the\n scale-in activity completes, the target tracking policy could instruct the scalable target\n to scale out again.
\nFor more information, see Target tracking scaling policies, Step scaling policies, and Predictive scaling policies \n in the Application Auto Scaling User Guide.
\nIf a scalable target is deregistered, the scalable target is no longer available to\n use scaling policies. Any scaling policies that were specified for the scalable target\n are deleted.
\nThe identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Amazon ElastiCache cache cluster - The resource type is cache-cluster and the unique identifier is the cache cluster name.\n Example: cache-cluster/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:cache-cluster:Nodes - The number of nodes for an Amazon ElastiCache cache cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scaling policy type. This parameter is required if you are creating a scaling\n policy.
\nThe following policy types are supported:
\n\n TargetTrackingScaling—Not supported for Amazon EMR.
\n StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon Keyspaces, Amazon MSK, Amazon ElastiCache, or\n Neptune.
For more information, see Target\n tracking scaling policies and Step scaling policies in the Application Auto Scaling User Guide.
" + "smithy.api#documentation": "The scaling policy type. This parameter is required if you are creating a scaling\n policy.
\nThe following policy types are supported:
\n\n TargetTrackingScaling—Not supported for Amazon EMR.
\n StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon Keyspaces, Amazon MSK, Amazon ElastiCache, or\n Neptune.
\n PredictiveScaling—Only supported for Amazon ECS.
For more information, see Target\n tracking scaling policies, Step scaling policies, and Predictive scaling policies \n in the Application Auto Scaling User Guide.
" } }, "StepScalingPolicyConfiguration": { @@ -3315,14 +3327,14 @@ "ResourceId": { "target": "com.amazonaws.applicationautoscaling#ResourceIdMaxLen1600", "traits": { - "smithy.api#documentation": "The identifier of the resource associated with the scheduled action.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The identifier of the resource associated with the scheduled action.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Amazon ElastiCache cache cluster - The resource type is cache-cluster and the unique identifier is the cache cluster name.\n Example: cache-cluster/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:cache-cluster:Nodes - The number of nodes for an Amazon ElastiCache cache cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The identifier of the resource that is associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The identifier of the resource that is associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Amazon ElastiCache cache cluster - The resource type is cache-cluster and the unique identifier is the cache cluster name.\n Example: cache-cluster/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:cache-cluster:Nodes - The number of nodes for an Amazon ElastiCache cache cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The identifier of the resource associated with the scalable target.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Amazon ElastiCache cache cluster - The resource type is cache-cluster and the unique identifier is the cache cluster name.\n Example: cache-cluster/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scalable dimension associated with the scalable target.\n This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:cache-cluster:Nodes - The number of nodes for an Amazon ElastiCache cache cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The identifier of the resource associated with the scaling activity.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The identifier of the resource associated with the scaling activity.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Amazon ElastiCache cache cluster - The resource type is cache-cluster and the unique identifier is the cache cluster name.\n Example: cache-cluster/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:cache-cluster:Nodes - The number of nodes for an Amazon ElastiCache cache cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Amazon ElastiCache cache cluster - The resource type is cache-cluster and the unique identifier is the cache cluster name.\n Example: cache-cluster/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:cache-cluster:Nodes - The number of nodes for an Amazon ElastiCache cache cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scaling policy type.
\nThe following policy types are supported:
\n\n TargetTrackingScaling—Not supported for Amazon EMR
\n StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon Keyspaces, Amazon MSK, Amazon ElastiCache, or\n Neptune.
The scaling policy type.
\nThe following policy types are supported:
\n\n TargetTrackingScaling—Not supported for Amazon EMR
\n StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon Keyspaces, Amazon MSK, Amazon ElastiCache, or\n Neptune.
\n PredictiveScaling—Only supported for Amazon ECS
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The identifier of the resource associated with the scaling policy.\n This string consists of the resource type and unique identifier.
\nECS service - The resource type is service and the unique identifier is the cluster name \n and service name. Example: service/my-cluster/my-service.
Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the \n Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.
EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID.\n Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.
AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name.\n Example: fleet/sample-fleet.
DynamoDB table - The resource type is table and the unique identifier is the table name. \n Example: table/my-table.
DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. \n Example: table/my-table/index/my-table-index.
Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name.\n Example: cluster:my-db-cluster.
SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information\n is available in our GitHub\n repository.
Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.
Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.
Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. \n Example: function:my-function:prod or function:my-function:1.
Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. \n Example: keyspace/mykeyspace/table/mytable.
Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. \n Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.
Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name.\n Example: replication-group/mycluster.
Amazon ElastiCache cache cluster - The resource type is cache-cluster and the unique identifier is the cache cluster name.\n Example: cache-cluster/mycluster.
Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster.
SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID.\n Example: endpoint/my-end-point/variant/KMeansClustering.
SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID.\n Example: inference-component/my-inference-component.
Pool of WorkSpaces - The resource type is workspacespool and the unique identifier is the pool ID. \n Example: workspacespool/wspool-123456.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
\n\n ecs:service:DesiredCount - The task count of an ECS service.
\n elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.
\n ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet.
\n appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet.
\n dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.
\n dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.
\n dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.
\n dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.
\n rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.
\n sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant.
\n custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.
\n comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.
\n comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint.
\n lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.
\n cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.
\n cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.
\n kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.
\n elasticache:cache-cluster:Nodes - The number of nodes for an Amazon ElastiCache cache cluster.
\n elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group.
\n elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group.
\n neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster.
\n sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a SageMaker serverless endpoint.
\n sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component.
\n workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the WorkSpaces in the pool.
This is a string-to-string map. It can \n include the following fields.
\n\n Type designates the type of object this is.
\n ResourceType specifies the type of the resource. This field is used only\n when the value of the Type field is Resource or AWS::Resource.
\n Name specifies the name of the object. This is used only if the value of the Type field\n is Service, RemoteService, or AWS::Service.
\n Identifier identifies the resource objects of this resource. \n This is used only if the value of the Type field\n is Resource or AWS::Resource.
\n Environment specifies the location where this object is hosted, or what it belongs to.
The name of the called operation in the dependency.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Identifies the dependency using the DependencyKeyAttributes and DependencyOperationName.
When creating a service dependency SLO, you must specify the KeyAttributes of the service, and the DependencyConfig for the dependency. You can specify the OperationName of the service, from which it calls the dependency. Alternatively, \n you can exclude OperationName and the SLO will monitor all of the service's operations that call the dependency.
Identifies the dependency using the DependencyKeyAttributes and DependencyOperationName.
Use this optional field to only include SLOs with the specified metric source types in the output. Supported types are:
\nService operation
\nService dependency
\nCloudWatch metric
\nThis structure defines the metric that is used as the \"good request\" or \"bad request\"\n value for a request-based SLO. \n This value observed for the metric defined in \n TotalRequestCountMetric is divided by the number found for \n MonitoredRequestCountMetric to determine the percentage of successful requests that \n this SLO tracks.
Identifies the dependency using the DependencyKeyAttributes and DependencyOperationName.
Use this structure to define the metric that you want to use as the \"good request\" or \"bad request\"\n value for a request-based SLO. \n This value observed for the metric defined in \n TotalRequestCountMetric will be divided by the number found for \n MonitoredRequestCountMetric to determine the percentage of successful requests that \n this SLO tracks.
Identifies the dependency using the DependencyKeyAttributes and DependencyOperationName.
If this SLO monitors a CloudWatch metric or the result of a CloudWatch metric math expression, \n this structure includes the information about that metric or expression.
", "smithy.api#required": {} } + }, + "DependencyConfig": { + "target": "com.amazonaws.applicationsignals#DependencyConfig", + "traits": { + "smithy.api#documentation": "Identifies the dependency using the DependencyKeyAttributes and DependencyOperationName.
If this SLO monitors a CloudWatch metric or the result of a CloudWatch metric math expression, \n use this structure to specify that metric or expression.
" } + }, + "DependencyConfig": { + "target": "com.amazonaws.applicationsignals#DependencyConfig", + "traits": { + "smithy.api#documentation": "Identifies the dependency using the DependencyKeyAttributes and DependencyOperationName.
Each object in this array defines the length of the look-back window used to calculate one burn rate metric\n for this SLO. The burn rate measures how fast the service is consuming the error budget, relative to the attainment goal of the SLO.
" } + }, + "MetricSourceType": { + "target": "com.amazonaws.applicationsignals#MetricSourceType", + "traits": { + "smithy.api#documentation": "Displays the SLI metric source type for this SLO. Supported types are:
\nService operation
\nService dependency
\nCloudWatch metric
\nIf this service level objective is specific to a single operation, this \n field displays the name of that operation.
" } }, + "DependencyConfig": { + "target": "com.amazonaws.applicationsignals#DependencyConfig", + "traits": { + "smithy.api#documentation": "Identifies the dependency using the DependencyKeyAttributes and DependencyOperationName.
The date and time that this service level objective was created. It is expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.
" } + }, + "EvaluationType": { + "target": "com.amazonaws.applicationsignals#EvaluationType", + "traits": { + "smithy.api#documentation": "Displays whether this is a period-based SLO or a request-based SLO.
" + } + }, + "MetricSourceType": { + "target": "com.amazonaws.applicationsignals#MetricSourceType", + "traits": { + "smithy.api#documentation": "Displays the SLI metric source type for this SLO. Supported types are:
\nService operation
\nService dependency
\nCloudWatch metric
\nThe time (in UTC) when the autoshift ended.
", - "smithy.api#required": {} + "smithy.api#documentation": "The time (in UTC) when the autoshift ended.
" } }, "startTime": { @@ -184,7 +177,7 @@ } }, "traits": { - "smithy.api#documentation": "Information about an autoshift. Amazon Web Services starts an autoshift to temporarily move traffic for a resource \n\t\t\taway from an Availability Zone in an Amazon Web Services Region\n\t\t\twhen Amazon Web Services determines that there's an issue in the Availability Zone that could potentially affect customers.\n\t\t\tYou can configure zonal autoshift in Route 53 ARC for managed resources in your Amazon Web Services account in a Region. \n\t\t\tSupported Amazon Web Services resources are automatically registered with Route 53 ARC.
\nAutoshifts are temporary. When the Availability Zone recovers, Amazon Web Services ends the autoshift, and \n\t\t\ttraffic for the resource is no longer directed to the other Availability Zones in the Region.
\nYou can stop an autoshift for a resource by disabling zonal autoshift.
" + "smithy.api#documentation": "Information about an autoshift. Amazon Web Services starts an autoshift to temporarily move traffic for a resource \n\t\t\taway from an Availability Zone in an Amazon Web Services Region\n\t\t\twhen Amazon Web Services determines that there's an issue in the Availability Zone that could potentially affect customers.\n\t\t\tYou can configure zonal autoshift in ARC for managed resources in your Amazon Web Services account in a Region. \n\t\t\tSupported Amazon Web Services resources are automatically registered with ARC.
\nAutoshifts are temporary. When the Availability Zone recovers, Amazon Web Services ends the autoshift, and \n\t\t\ttraffic for the resource is no longer directed to the other Availability Zones in the Region.
\nYou can stop an autoshift for a resource by disabling zonal autoshift.
" } }, "com.amazonaws.arczonalshift#AutoshiftTriggerResource": { @@ -379,6 +372,12 @@ "traits": { "smithy.api#enumValue": "PracticeConfigurationDoesNotExist" } + }, + "ZONAL_AUTOSHIFT_ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ZonalAutoshiftActive" + } } } }, @@ -393,7 +392,7 @@ } }, "alarmIdentifier": { - "target": "com.amazonaws.arczonalshift#ResourceArn", + "target": "com.amazonaws.arczonalshift#MetricIdentifier", "traits": { "smithy.api#documentation": "The Amazon Resource Name (ARN) for an Amazon CloudWatch alarm that you specify as a control condition for a practice run.
", "smithy.api#required": {} @@ -456,7 +455,7 @@ } ], "traits": { - "smithy.api#documentation": "A practice run configuration for zonal autoshift is required when you enable zonal autoshift.\n\t\t\tA practice run configuration includes specifications for blocked dates and blocked time windows,\n\t\tand for Amazon CloudWatch alarms that you create to use with practice runs. The alarms that you specify are an \n\t\t\toutcome alarm, to monitor application health during practice runs and, \n\t\t\toptionally, a blocking alarm, to block practice runs from starting.
\nWhen a resource has a practice run configuration, Route 53 ARC starts zonal shifts for the resource\n\t\t\tweekly, to shift traffic for practice runs. Practice runs help you to ensure that \n\t\t\tshifting away traffic from an Availability Zone during an autoshift is safe for your application.
\nFor more information, see \n\t\t\t\n\t\t\t\tConsiderations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide.
", + "smithy.api#documentation": "A practice run configuration for zonal autoshift is required when you enable zonal autoshift.\n\t\t\tA practice run configuration includes specifications for blocked dates and blocked time windows,\n\t\tand for Amazon CloudWatch alarms that you create to use with practice runs. The alarms that you specify are an \n\t\t\toutcome alarm, to monitor application health during practice runs and, \n\t\t\toptionally, a blocking alarm, to block practice runs from starting.
\nWhen a resource has a practice run configuration, ARC starts zonal shifts for the resource\n\t\t\tweekly, to shift traffic for practice runs. Practice runs help you to ensure that \n\t\t\tshifting away traffic from an Availability Zone during an autoshift is safe for your application.
\nFor more information, see \n\t\t\t\n\t\t\t\tConsiderations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide.
", "smithy.api#http": { "method": "POST", "uri": "/configuration", @@ -477,25 +476,25 @@ "blockedWindows": { "target": "com.amazonaws.arczonalshift#BlockedWindows", "traits": { - "smithy.api#documentation": "Optionally, you can block Route 53 ARC from starting practice runs for specific windows of \n\t\t\tdays and times.
\nThe format for blocked windows is: DAY:HH:SS-DAY:HH:SS. Keep in mind, when you specify dates,\n\t\t\tthat dates and times for practice runs are in UTC. Also, be aware of potential time adjustments \n\t\t\tthat might be required for daylight saving time differences. Separate multiple blocked windows \n\t\t\twith spaces.
\nFor example, say you run business report summaries three days a week. For\n\t\t\tthis scenario, you might set the following recurring days and times as blocked windows, \n\t\t\tfor example: MON-20:30-21:30 WED-20:30-21:30 \n\t\t\t\tFRI-20:30-21:30.
Optionally, you can block ARC from starting practice runs for specific windows of \n\t\t\tdays and times.
\nThe format for blocked windows is: DAY:HH:SS-DAY:HH:SS. Keep in mind, when you specify dates,\n\t\t\tthat dates and times for practice runs are in UTC. Also, be aware of potential time adjustments \n\t\t\tthat might be required for daylight saving time differences. Separate multiple blocked windows \n\t\t\twith spaces.
\nFor example, say you run business report summaries three days a week. For\n\t\t\tthis scenario, you might set the following recurring days and times as blocked windows, \n\t\t\tfor example: MON-20:30-21:30 WED-20:30-21:30 \n\t\t\t\tFRI-20:30-21:30.
Optionally, you can block Route 53 ARC from starting practice runs for a resource\n\t\t\ton specific calendar dates.
\nThe format for blocked dates is: YYYY-MM-DD. Keep in mind, when you specify dates,\n\t\t\tthat dates and times for practice runs are in UTC. Separate multiple blocked \n\t\t\tdates with spaces.
\nFor example, if you have an application update scheduled to launch on May 1, 2024, and \n\t\t\tyou don't want practice runs to shift traffic away at that time, you could set a blocked date \n\t\t\tfor 2024-05-01.
Optionally, you can block ARC from starting practice runs for a resource\n\t\t\ton specific calendar dates.
\nThe format for blocked dates is: YYYY-MM-DD. Keep in mind, when you specify dates,\n\t\t\tthat dates and times for practice runs are in UTC. Separate multiple blocked \n\t\t\tdates with spaces.
\nFor example, if you have an application update scheduled to launch on May 1, 2024, and \n\t\t\tyou don't want practice runs to shift traffic away at that time, you could set a blocked date \n\t\t\tfor 2024-05-01.
An Amazon CloudWatch alarm that you can specify for zonal autoshift \n\t\t\tpractice runs. This alarm blocks Route 53 ARC from starting practice run zonal \n\t\t\tshifts, and ends a practice run that's in progress, when the alarm is in \n\t\t\tan ALARM state.
An Amazon CloudWatch alarm that you can specify for zonal autoshift \n\t\t\tpractice runs. This alarm blocks ARC from starting practice run zonal \n\t\t\tshifts, and ends a practice run that's in progress, when the alarm is in \n\t\t\tan ALARM state.
The outcome alarm for practice runs is a required\n\t\t\tAmazon CloudWatch alarm that you specify that ends a practice run when the \n\t\t\talarm is in an ALARM state.
Configure the alarm to monitor the health of your application \n\t\t\twhen traffic is shifted away from an Availability Zone during each weekly \n\t\t\tpractice run. You should configure the alarm to go into an ALARM state \n\t\t\tif your application is impacted by the zonal shift, and you want to stop the\n\t\t\tzonal shift, to let traffic for the resource return to the Availability Zone.
The outcome alarm for practice runs is a required\n\t\t\tAmazon CloudWatch alarm that you specify that ends a practice run when the \n\t\t\talarm is in an ALARM state.
Configure the alarm to monitor the health of your application \n\t\t\twhen traffic is shifted away from an Availability Zone during each \n\t\t\tpractice run. You should configure the alarm to go into an ALARM state \n\t\t\tif your application is impacted by the zonal shift, and you want to stop the\n\t\t\tzonal shift, to let traffic for the resource return to the Availability Zone.
Returns the status of autoshift observer notification. Autoshift observer\n\t\t\tnotification enables you to be notified, through Amazon EventBridge, when\n\t\t\tthere is an autoshift event for zonal autoshift.
\nIf the status is ENABLED,\n\t\t\tRoute 53 ARC includes all autoshift events when you use the EventBridge pattern\n\t\t\tAutoshift In Progress. When the status is DISABLED,\n\t\t\tRoute 53 ARC includes only autoshift events for autoshifts when one or more of your\n\t\t\tresources is included in the autoshift.
For more information, see \n\t\t\t\n\t\t\t\tNotifications for practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller Developer Guide.
", + "smithy.api#documentation": "Returns the status of the autoshift observer notification. Autoshift observer notifications notify you through Amazon EventBridge when there is an autoshift event for zonal autoshift. The status can be ENABLED or DISABLED. When ENABLED, a notification is sent when an autoshift is triggered. When DISABLED, notifications are not sent.\n\t\t
The status of autoshift observer notification. If the status is ENABLED,\n\t\t\tRoute 53 ARC includes all autoshift events when you use the Amazon EventBridge pattern\n\t\t\tAutoshift In Progress. When the status is DISABLED,\n\t\t\tRoute 53 ARC includes only autoshift events for autoshifts when one or more of your\n\t\t\tresources is included in the autoshift.
The status of autoshift observer notification. If the status is ENABLED,\n\t\t\tARC includes all autoshift events when you use the Amazon EventBridge pattern\n\t\t\tAutoshift In Progress. When the status is DISABLED,\n\t\t\tARC includes only autoshift events for autoshifts when one or more of your\n\t\t\tresources is included in the autoshift.
Get information about a resource that's been registered for zonal shifts with Amazon Route 53 Application Recovery Controller in this Amazon Web Services Region. Resources that are registered for\n \t\tzonal shifts are managed resources in Route 53 ARC. You can start zonal shifts and configure zonal autoshift for managed resources.
\nAt this time, you can only start a zonal shift or configure zonal autoshift for Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.
", + "smithy.api#documentation": "Get information about a resource that's been registered for zonal shifts with Amazon Route 53 Application Recovery Controller in this Amazon Web Services Region. Resources that are registered for\n \t\tzonal shifts are managed resources in ARC. You can start zonal shifts and configure zonal autoshift for managed resources.
", "smithy.api#http": { "method": "GET", "uri": "/managedresources/{resourceIdentifier}", @@ -846,7 +845,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns a list of autoshifts for an Amazon Web Services Region. By default, the call returns\n\t\t\tonly ACTIVE autoshifts. Optionally, you can specify the status parameter to return\n\t\t\tCOMPLETED autoshifts.\n\t\t
Returns the autoshifts for an Amazon Web Services Region. By default, the call returns\n\t\t\tonly ACTIVE autoshifts. Optionally, you can specify the status parameter to return\n\t\t\tCOMPLETED autoshifts.\n\t\t
Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the \n\t\t\tprevious request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous \n\t\t\tcall's NextToken response to request the next page of results.
Specifies that you want to receive the next page of results. Valid only if you received a nextToken response in the \n\t\t\tprevious request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous \n\t\t\tcall's nextToken response to request the next page of results.
Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the \n\t\t\tprevious request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous \n\t\t\tcall's NextToken response to request the next page of results.
Specifies that you want to receive the next page of results. Valid only if you received a nextToken response in the \n\t\t\tprevious request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous \n\t\t\tcall's nextToken response to request the next page of results.
Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the \n \t\tprevious request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous \n \t\tcall's NextToken response to request the next page of results.
Specifies that you want to receive the next page of results. Valid only if you received a nextToken response in the \n \t\tprevious request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous \n \t\tcall's nextToken response to request the next page of results.
Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the \n \t\tprevious request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous \n \t\tcall's NextToken response to request the next page of results.
Specifies that you want to receive the next page of results. Valid only if you received a nextToken response in the \n \t\tprevious request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous \n \t\tcall's nextToken response to request the next page of results.
Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region.\n \t\tListZonalShifts returns customer-initiated zonal shifts, as well as practice run zonal shifts that Route 53 ARC started on \n \t\tyour behalf for zonal autoshift.
The ListZonalShifts operation does not list autoshifts. For more information about listing\n \t\tautoshifts, see \">ListAutoshifts.
Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region.\n \t\tListZonalShifts returns customer-initiated zonal shifts, as well as practice run zonal shifts that ARC started on \n \t\tyour behalf for zonal autoshift.
For more information about listing\n \t\tautoshifts, see \">ListAutoshifts.
", "smithy.api#http": { "method": "GET", "uri": "/zonalshifts", @@ -1035,7 +1034,7 @@ "nextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the \n \t\tprevious request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous \n \t\tcall's NextToken response to request the next page of results.
Specifies that you want to receive the next page of results. Valid only if you received a nextToken response in the \n \t\tprevious request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous \n \t\tcall's nextToken response to request the next page of results.
Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the \n \t\tprevious request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous \n \t\tcall's NextToken response to request the next page of results.
Specifies that you want to receive the next page of results. Valid only if you received a nextToken response in the \n \t\tprevious request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous \n \t\tcall's nextToken response to request the next page of results.
This status tracks whether a practice run configuration exists for a resource. When you configure\n\t\t\ta practice run for a resource so that a practice run configuration exists, Route 53 ARC sets this value to \n\t\t\tENABLED. If a you have not configured a practice run for the resource, or delete a practice \n\t\t\trun configuration, Route 53 ARC sets the value to DISABLED.
Route 53 ARC updates this status; you can't set a practice run status to ENABLED or \n\t\t\tDISABLED.
This status tracks whether a practice run configuration exists for a resource. When you configure\n\t\t\ta practice run for a resource so that a practice run configuration exists, ARC sets this value to \n\t\t\tENABLED. If a you have not configured a practice run for the resource, or delete a practice \n\t\t\trun configuration, ARC sets the value to DISABLED.
ARC updates this status; you can't set a practice run status to ENABLED or \n\t\t\tDISABLED.
A complex structure for a managed resource in an Amazon Web Services account with information about zonal shifts\n \t\tand autoshifts.
\nA managed resource is a load balancer that has been registered \n \t\twith Route 53 ARC by Elastic Load Balancing. You can start a zonal shift in Route 53 ARC for a managed resource to \n \t\ttemporarily move traffic for the resource away from an Availability Zone in an Amazon Web Services Region.\n \t\tYou can also configure zonal autoshift for a managed resource.
\nAt this time, managed resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.
\nA complex structure for a managed resource in an Amazon Web Services account with information about zonal shifts\n \t\tand autoshifts.
\nA managed resource is a load balancer that has been registered \n \t\twith ARC by Elastic Load Balancing. You can start a zonal shift in ARC for a managed resource to \n \t\ttemporarily move traffic for the resource away from an Availability Zone in an Amazon Web Services Region.\n \t\tYou can also configure zonal autoshift for a managed resource.
\nAt this time, managed resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.
\nWelcome to the API Reference Guide for zonal shift and zonal autoshift in Amazon Route 53 Application Recovery Controller (Route 53 ARC).
\nYou can start a zonal shift to move traffic for a load balancer resource away from an Availability Zone to\n\t\t\thelp your application recover quickly from an impairment in an Availability Zone. For example, \n\t\t\tyou can recover your application from a developer's bad code deployment or from an \n\t\t\tAmazon Web Services infrastructure failure in a single Availability Zone.
\nYou can also configure zonal autoshift for supported load balancer resources. Zonal autoshift \n\t\t\tis a capability in Route 53 ARC where you authorize Amazon Web Services to shift away application resource \n\t\t\ttraffic from an Availability Zone during events, on your behalf, to help reduce your time to recovery.\n\t\t\tAmazon Web Services starts an autoshift when internal telemetry indicates that there is an Availability \n\t\t\tZone impairment that could potentially impact customers.
\nTo help make sure that zonal autoshift is safe for your application, you must \n\t\t\talso configure practice runs when you enable zonal autoshift for a resource. Practice runs start \n\t\t\tweekly zonal shifts for a resource, to shift traffic for the resource away from an Availability Zone. \n\t\t\tPractice runs help you to make sure, on a regular basis, that you have enough capacity in all the \n\t\t\tAvailability Zones in an Amazon Web Services Region for your application to continue to operate normally \n\t\t\twhen traffic for a resource is shifted away from one Availability Zone.
\nBefore you configure practice runs or enable zonal autoshift, we strongly recommend\n\t\t\tthat you prescale your application resource capacity in all Availability Zones in the Region where \n\t\t\tyour application resources are deployed. You should not rely on scaling on demand when an \n\t\t\tautoshift or practice run starts. Zonal autoshift, including practice runs, works independently, \n\t\t\tand does not wait for auto scaling actions to complete. Relying on auto scaling, instead of \n\t\t\tpre-scaling, can result in loss of availability.
\nIf you use auto scaling to handle regular cycles of traffic, we strongly recommend that you configure \n\t\t\t\tthe minimum capacity of your auto scaling to continue operating normally with the loss of an \n\t\t\t\tAvailability Zone.
\nBe aware that Route 53 ARC does not inspect the health of individual resources. Amazon Web Services only starts an \n\t\t\tautoshift when Amazon Web Services telemetry detects that there is an Availability Zone impairment that could \n\t\t\tpotentially impact customers. In some cases, resources might be shifted away that are not experiencing \n\t\t\timpact.
\nFor more information about using zonal shift and zonal autoshift, see the \n\t\t\tAmazon Route 53 Application Recovery Controller \n\t\t\t\tDeveloper Guide.
", + "smithy.api#documentation": "Welcome to the API Reference Guide for zonal shift and zonal autoshift in Amazon Route 53 Application Recovery Controller (ARC).
\nYou can start a zonal shift to move traffic for a load balancer resource away from an Availability Zone to\n\t\t\thelp your application recover quickly from an impairment in an Availability Zone. For example, \n\t\t\tyou can recover your application from a developer's bad code deployment or from an \n\t\t\tAmazon Web Services infrastructure failure in a single Availability Zone.
\nYou can also configure zonal autoshift for supported load balancer resources. Zonal autoshift \n\t\t\tis a capability in ARC where you authorize Amazon Web Services to shift away application resource \n\t\t\ttraffic from an Availability Zone during events, on your behalf, to help reduce your time to recovery.\n\t\t\tAmazon Web Services starts an autoshift when internal telemetry indicates that there is an Availability \n\t\t\tZone impairment that could potentially impact customers.
\nTo help make sure that zonal autoshift is safe for your application, you must \n\t\t\talso configure practice runs when you enable zonal autoshift for a resource. Practice runs start \n\t\t\tweekly zonal shifts for a resource, to shift traffic for the resource away from an Availability Zone. \n\t\t\tPractice runs help you to make sure, on a regular basis, that you have enough capacity in all the \n\t\t\tAvailability Zones in an Amazon Web Services Region for your application to continue to operate normally \n\t\t\twhen traffic for a resource is shifted away from one Availability Zone.
\nBefore you configure practice runs or enable zonal autoshift, we strongly recommend\n\t\t\tthat you prescale your application resource capacity in all Availability Zones in the Region where \n\t\t\tyour application resources are deployed. You should not rely on scaling on demand when an \n\t\t\tautoshift or practice run starts. Zonal autoshift, including practice runs, works independently, \n\t\t\tand does not wait for auto scaling actions to complete. Relying on auto scaling, instead of \n\t\t\tpre-scaling, can result in loss of availability.
\nIf you use auto scaling to handle regular cycles of traffic, we strongly recommend that you configure \n\t\t\t\tthe minimum capacity of your auto scaling to continue operating normally with the loss of an \n\t\t\t\tAvailability Zone.
\nBe aware that ARC does not inspect the health of individual resources. Amazon Web Services only starts an \n\t\t\tautoshift when Amazon Web Services telemetry detects that there is an Availability Zone impairment that could \n\t\t\tpotentially impact customers. In some cases, resources might be shifted away that are not experiencing \n\t\t\timpact.
\nFor more information about using zonal shift and zonal autoshift, see the \n\t\t\tAmazon Route 53 Application Recovery Controller \n\t\t\t\tDeveloper Guide.
", "smithy.api#title": "AWS ARC - Zonal Shift", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1927,7 +1930,7 @@ "blockedWindows": { "target": "com.amazonaws.arczonalshift#BlockedWindows", "traits": { - "smithy.api#documentation": "An array of one or more windows of days and times that you can block Route 53 ARC\n\t\t\tfrom starting practice runs for a resource.
\nSpecify the blocked windows in UTC, using the format DAY:HH:MM-DAY:HH:MM, separated by \n\t\t\tspaces. For example, MON:18:30-MON:19:30 TUE:18:30-TUE:19:30.
An array of one or more windows of days and times that you can block ARC\n\t\t\tfrom starting practice runs for a resource.
\nSpecify the blocked windows in UTC, using the format DAY:HH:MM-DAY:HH:MM, separated by \n\t\t\tspaces. For example, MON:18:30-MON:19:30 TUE:18:30-TUE:19:30.
A practice run configuration for a resource includes the Amazon CloudWatch alarms that you've specified for a practice\n\t\trun, as well as any blocked dates or blocked windows for the practice run. When a resource has a practice run \n\t\tconfiguration, Route 53 ARC shifts traffic for the resource weekly for practice runs.
\nPractice runs are required for zonal autoshift. The zonal shifts that Route 53 ARC starts for practice runs help you to ensure that \n\t\t\tshifting away traffic from an Availability Zone during an autoshift is safe for your application.
\nYou can update or delete a practice run configuration. Before you delete a practice run configuration, you\n\t\t\tmust disable zonal autoshift for the resource. A practice run configuration is required when zonal autoshift is enabled.
" + "smithy.api#documentation": "A practice run configuration for a resource includes the Amazon CloudWatch alarms that you've specified for a practice\n\t\trun, as well as any blocked dates or blocked windows for the practice run. When a resource has a practice run \n\t\tconfiguration, ARC shifts traffic for the resource weekly for practice runs.
\nPractice runs are required for zonal autoshift. The zonal shifts that ARC starts for practice runs help you to ensure that \n\t\t\tshifting away traffic from an Availability Zone during an autoshift is safe for your application.
\nYou can update or delete a practice run configuration. Before you delete a practice run configuration, you\n\t\t\tmust disable zonal autoshift for the resource. A practice run configuration is required when zonal autoshift is enabled.
" } }, "com.amazonaws.arczonalshift#PracticeRunConfigurationResource": { @@ -2039,6 +2042,35 @@ "smithy.api#httpError": 404 } }, + "com.amazonaws.arczonalshift#ShiftType": { + "type": "enum", + "members": { + "ZONAL_SHIFT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ZONAL_SHIFT" + } + }, + "PRACTICE_RUN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRACTICE_RUN" + } + }, + "FIS_EXPERIMENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FIS_EXPERIMENT" + } + }, + "ZONAL_AUTOSHIFT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ZONAL_AUTOSHIFT" + } + } + } + }, "com.amazonaws.arczonalshift#StartTime": { "type": "timestamp" }, @@ -2071,7 +2103,7 @@ } ], "traits": { - "smithy.api#documentation": "You start a zonal shift to temporarily move load balancer traffic away from an Availability Zone in an Amazon Web Services Region,\n \t\tto help your application recover immediately, for example, from a developer's bad code deployment or from an Amazon Web Services \n \t\tinfrastructure failure in a single Availability Zone. You can start a zonal shift in Route 53 ARC only for managed\n \t\tresources in your Amazon Web Services account in an Amazon Web Services Region. Resources are automatically registered with Route 53 ARC \n \t\tby Amazon Web Services services.
\nAt this time, you can only start a zonal shift for Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.
\nWhen you start a zonal shift, traffic for the resource is no longer routed to the Availability Zone. The\n \t\tzonal shift is created immediately in Route 53 ARC. However, it can take a short time, typically up to a few minutes,\n \t\tfor existing, in-progress connections in the Availability Zone to complete.
\nFor more information, see Zonal shift\n \t\tin the Amazon Route 53 Application Recovery Controller Developer Guide.
", + "smithy.api#documentation": "You start a zonal shift to temporarily move load balancer traffic away from an Availability Zone in an Amazon Web Services Region,\n \t\tto help your application recover immediately, for example, from a developer's bad code deployment or from an Amazon Web Services \n \t\tinfrastructure failure in a single Availability Zone. You can start a zonal shift in ARC only for managed\n \t\tresources in your Amazon Web Services account in an Amazon Web Services Region. Resources are automatically registered with ARC \n \t\tby Amazon Web Services services.
\nAt this time, you can only start a zonal shift for Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.
\nWhen you start a zonal shift, traffic for the resource is no longer routed to the Availability Zone. The\n \t\tzonal shift is created immediately in ARC. However, it can take a short time, typically up to a few minutes,\n \t\tfor existing, in-progress connections in the Availability Zone to complete.
\nFor more information, see Zonal shift\n \t\tin the Amazon Route 53 Application Recovery Controller Developer Guide.
", "smithy.api#http": { "method": "POST", "uri": "/zonalshifts", @@ -2099,7 +2131,7 @@ "expiresIn": { "target": "com.amazonaws.arczonalshift#ExpiresIn", "traits": { - "smithy.api#documentation": "The length of time that you want a zonal shift to be active, which Route 53 ARC converts to an expiry time (expiration time).\n\t\tZonal shifts are temporary. You can set a zonal shift to be active initially for up to three days (72 hours).
\nIf you want to still keep traffic away from an Availability Zone, you can update the \n\t\tzonal shift and set a new expiration. You can also cancel a zonal shift, before it expires, for example, if you're ready to \n\t\trestore traffic to the Availability Zone.
\nTo set a length of time for a zonal shift to be active, specify a whole number, and then one of the following, with no space:
\n\n A lowercase letter m: To specify that the value is in minutes.
\n\n A lowercase letter h: To specify that the value is in hours.
\nFor example: 20h means the zonal shift expires in 20 hours. 120m means the zonal shift expires in 120 minutes (2 hours).
The length of time that you want a zonal shift to be active, which ARC converts to an expiry time (expiration time).\n\t\tZonal shifts are temporary. You can set a zonal shift to be active initially for up to three days (72 hours).
\nIf you want to still keep traffic away from an Availability Zone, you can update the \n\t\tzonal shift and set a new expiration. You can also cancel a zonal shift, before it expires, for example, if you're ready to \n\t\trestore traffic to the Availability Zone.
\nTo set a length of time for a zonal shift to be active, specify a whole number, and then one of the following, with no space:
\n\n A lowercase letter m: To specify that the value is in minutes.
\n\n A lowercase letter h: To specify that the value is in hours.
\nFor example: 20h means the zonal shift expires in 20 hours. 120m means the zonal shift expires in 120 minutes (2 hours).
Update the status of autoshift observer notification. Autoshift observer\n\t\t\tnotification enables you to be notified, through Amazon EventBridge, when\n\t\t\tthere is an autoshift event for zonal autoshift.
\nIf the status is ENABLED,\n\t\t\tRoute 53 ARC includes all autoshift events when you use the EventBridge pattern\n\t\t\tAutoshift In Progress. When the status is DISABLED,\n\t\t\tRoute 53 ARC includes only autoshift events for autoshifts when one or more of your\n\t\t\tresources is included in the autoshift.
For more information, see \n\t\t\t\n\t\t\t\tNotifications for practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller Developer Guide.
", + "smithy.api#documentation": "Update the status of autoshift observer notification. Autoshift observer\n\t\t\tnotification enables you to be notified, through Amazon EventBridge, when\n\t\t\tthere is an autoshift event for zonal autoshift.
\nIf the status is ENABLED,\n\t\t\tARC includes all autoshift events when you use the EventBridge pattern\n\t\t\tAutoshift In Progress. When the status is DISABLED,\n\t\t\tARC includes only autoshift events for autoshifts when one or more of your\n\t\t\tresources is included in the autoshift.
For more information, see \n\t\t\t\n\t\t\t\tNotifications for practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller Developer Guide.
", "smithy.api#http": { "method": "PUT", "uri": "/autoshift-observer-notification", @@ -2171,7 +2203,7 @@ "status": { "target": "com.amazonaws.arczonalshift#AutoshiftObserverNotificationStatus", "traits": { - "smithy.api#documentation": "The status to set for autoshift observer notification. If the status is ENABLED,\n\t\t\tRoute 53 ARC includes all autoshift events when you use the Amazon EventBridge pattern\n\t\t\tAutoshift In Progress. When the status is DISABLED,\n\t\t\tRoute 53 ARC includes only autoshift events for autoshifts when one or more of your\n\t\t\tresources is included in the autoshift.
The status to set for autoshift observer notification. If the status is ENABLED,\n\t\t\tARC includes all autoshift events when you use the Amazon EventBridge pattern\n\t\t\tAutoshift In Progress. When the status is DISABLED,\n\t\t\tARC includes only autoshift events for autoshifts when one or more of your\n\t\t\tresources is included in the autoshift.
Add, change, or remove windows of days and times for when you can, optionally,\n\t\t\tblock Route 53 ARC from starting a practice run for a resource.
\nThe format for blocked windows is: DAY:HH:SS-DAY:HH:SS. Keep in mind, when you specify dates,\n\t\t\tthat dates and times for practice runs are in UTC. Also, be aware of potential time adjustments \n\t\t\tthat might be required for daylight saving time differences. Separate multiple blocked windows \n\t\t\twith spaces.
\nFor example, say you run business report summaries three days a week. For\n\t\t\tthis scenario, you might set the following recurring days and times as blocked windows, \n\t\t\tfor example: MON-20:30-21:30 WED-20:30-21:30 \n\t\t\t\tFRI-20:30-21:30.
Add, change, or remove windows of days and times for when you can, optionally,\n\t\t\tblock ARC from starting a practice run for a resource.
\nThe format for blocked windows is: DAY:HH:SS-DAY:HH:SS. Keep in mind, when you specify dates,\n\t\t\tthat dates and times for practice runs are in UTC. Also, be aware of potential time adjustments \n\t\t\tthat might be required for daylight saving time differences. Separate multiple blocked windows \n\t\t\twith spaces.
\nFor example, say you run business report summaries three days a week. For\n\t\t\tthis scenario, you might set the following recurring days and times as blocked windows, \n\t\t\tfor example: MON-20:30-21:30 WED-20:30-21:30 \n\t\t\t\tFRI-20:30-21:30.
Update an active zonal shift in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account. You can update a zonal shift to set a new expiration, or \n \tedit or replace the comment for the zonal shift.
", + "smithy.api#documentation": "Update an active zonal shift in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account. You can update a zonal shift to set a new expiration, or \n \t\tedit or replace the comment for the zonal shift.
", "smithy.api#http": { "method": "PATCH", "uri": "/zonalshifts/{zonalShiftId}", @@ -2458,7 +2490,7 @@ "expiresIn": { "target": "com.amazonaws.arczonalshift#ExpiresIn", "traits": { - "smithy.api#documentation": "The length of time that you want a zonal shift to be active, which Route 53 ARC converts to an expiry time (expiration time).\n \t\tZonal shifts are temporary. You can set a zonal shift to be active initially for up to three days (72 hours).
\nIf you want to still keep traffic away from an Availability Zone, you can update the \n \t\tzonal shift and set a new expiration. You can also cancel a zonal shift, before it expires, for example, if you're ready to \n \t\trestore traffic to the Availability Zone.
\nTo set a length of time for a zonal shift to be active, specify a whole number, and then one of the following, with no space:
\n\n A lowercase letter m: To specify that the value is in minutes.
\n\n A lowercase letter h: To specify that the value is in hours.
\nFor example: 20h means the zonal shift expires in 20 hours. 120m means the zonal shift expires in 120 minutes (2 hours).
The length of time that you want a zonal shift to be active, which ARC converts to an expiry time (expiration time).\n \t\tZonal shifts are temporary. You can set a zonal shift to be active initially for up to three days (72 hours).
\nIf you want to still keep traffic away from an Availability Zone, you can update the \n \t\tzonal shift and set a new expiration. You can also cancel a zonal shift, before it expires, for example, if you're ready to \n \t\trestore traffic to the Availability Zone.
\nTo set a length of time for a zonal shift to be active, specify a whole number, and then one of the following, with no space:
\n\n A lowercase letter m: To specify that the value is in minutes.
\n\n A lowercase letter h: To specify that the value is in hours.
\nFor example: 20h means the zonal shift expires in 20 hours. 120m means the zonal shift expires in 120 minutes (2 hours).
The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. \n\t\t\tYou can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift \n\t\t\tto set a new expiration at any time.
\nWhen you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts \n\t\t\tto an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or\n\t\t\tjust wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.
", + "smithy.api#documentation": "The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. \n\t\t\tYou can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift \n\t\t\tto set a new expiration at any time.
\nWhen you start a zonal shift, you specify how long you want it to be active, which ARC converts \n\t\t\tto an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or\n\t\t\tjust wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.
", "smithy.api#required": {} } }, @@ -2673,7 +2729,7 @@ "expiryTime": { "target": "com.amazonaws.arczonalshift#ExpiryTime", "traits": { - "smithy.api#documentation": "The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. \n \t\tYou can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift \n \t\tto set a new expiration at any time.
\nWhen you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts \n \t\tto an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or\n \t\tjust wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.
", + "smithy.api#documentation": "The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. \n \t\tYou can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift \n \t\tto set a new expiration at any time.
\nWhen you start a zonal shift, you specify how long you want it to be active, which ARC converts \n \t\tto an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or\n \t\tjust wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.
", "smithy.api#required": {} } }, @@ -2691,6 +2747,12 @@ "smithy.api#required": {} } }, + "shiftType": { + "target": "com.amazonaws.arczonalshift#ShiftType", + "traits": { + "smithy.api#documentation": "Defines the zonal shift type.
" + } + }, "practiceRunOutcome": { "target": "com.amazonaws.arczonalshift#PracticeRunOutcome", "traits": { @@ -2774,7 +2836,7 @@ "expiryTime": { "target": "com.amazonaws.arczonalshift#ExpiryTime", "traits": { - "smithy.api#documentation": "The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. \n \t\tYou can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift \n \t\tto set a new expiration at any time.
\nWhen you start a zonal shift, you specify how long you want it to be active, which Route 53 ARC converts \n \t\tto an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or\n \t\tjust wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.
", + "smithy.api#documentation": "The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. \n \t\tYou can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift \n \t\tto set a new expiration at any time.
\nWhen you start a zonal shift, you specify how long you want it to be active, which ARC converts \n \t\tto an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or\n \t\tjust wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.
", "smithy.api#required": {} } }, @@ -2799,6 +2861,12 @@ "smithy.api#required": {} } }, + "shiftType": { + "target": "com.amazonaws.arczonalshift#ShiftType", + "traits": { + "smithy.api#documentation": "Defines the zonal shift type.
" + } + }, "practiceRunOutcome": { "target": "com.amazonaws.arczonalshift#PracticeRunOutcome", "traits": { @@ -2807,7 +2875,7 @@ } }, "traits": { - "smithy.api#documentation": "Lists information about zonal shifts in Amazon Route 53 Application Recovery Controller, including zonal shifts that you start yourself and zonal shifts that Route 53 ARC starts\n \t\ton your behalf for practice runs with zonal autoshift.
\nZonal shifts are temporary, including customer-initiated zonal shifts and the zonal autoshift practice run zonal shifts that\n \t\tRoute 53 ARC starts weekly, on your behalf. A zonal shift that a customer starts can be active for up to three days (72 hours). A\n \t\tpractice run zonal shift has a 30 minute duration.
" + "smithy.api#documentation": "Lists information about zonal shifts in Amazon Route 53 Application Recovery Controller, including zonal shifts that you start yourself and zonal shifts that ARC starts\n \t\ton your behalf for practice runs with zonal autoshift.
\nZonal shifts are temporary, including customer-initiated zonal shifts and the zonal autoshift practice run zonal shifts that\n \t\tARC starts weekly, on your behalf. A zonal shift that a customer starts can be active for up to three days (72 hours). A\n \t\tpractice run zonal shift has a 30 minute duration.
" } }, "com.amazonaws.arczonalshift#ZonalShifts": { diff --git a/codegen/sdk/aws-models/batch.json b/codegen/sdk/aws-models/batch.json index a8548432ef8..08a921d2623 100644 --- a/codegen/sdk/aws-models/batch.json +++ b/codegen/sdk/aws-models/batch.json @@ -1904,7 +1904,7 @@ "tags": { "target": "com.amazonaws.batch#TagsMap", "traits": { - "smithy.api#documentation": "Key-value pair tags to be applied to Amazon EC2 resources that are launched in the compute\n environment. For Batch, these take the form of \"String1\": \"String2\", where\n String1 is the tag key and String2 is the tag value-for example,\n { \"Name\": \"Batch Instance - C4OnDemand\" }. This is helpful for recognizing your\n Batch instances in the Amazon EC2 console. Updating these tags requires an infrastructure update to\n the compute environment. For more information, see Updating compute environments in the\n Batch User Guide. These tags aren't seen when using the Batch\n ListTagsForResource API operation.
This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.
\nKey-value pair tags to be applied to Amazon EC2 resources that are launched in the compute\n environment. For Batch, these take the form of \"String1\": \"String2\", where\n String1 is the tag key and String2 is the tag value (for example,\n { \"Name\": \"Batch Instance - C4OnDemand\" }). This is helpful for recognizing your\n Batch instances in the Amazon EC2 console. Updating these tags requires an infrastructure update to\n the compute environment. For more information, see Updating compute environments in the\n Batch User Guide. These tags aren't seen when using the Batch\n ListTagsForResource API operation.
This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.
\nKey-value pair tags to be applied to Amazon EC2 resources that are launched in the compute\n environment. For Batch, these take the form of \"String1\": \"String2\", where\n String1 is the tag key and String2 is the tag value-for example,\n { \"Name\": \"Batch Instance - C4OnDemand\" }. This is helpful for recognizing your\n Batch instances in the Amazon EC2 console. These tags aren't seen when using the Batch\n ListTagsForResource API operation.
When updating a compute environment, changing this setting requires an infrastructure update\n of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide.
\nThis parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.
\nKey-value pair tags to be applied to Amazon EC2 resources that are launched in the compute\n environment. For Batch, these take the form of \"String1\": \"String2\", where\n String1 is the tag key and String2 is the tag value (for example,\n { \"Name\": \"Batch Instance - C4OnDemand\" }). This is helpful for recognizing your\n Batch instances in the Amazon EC2 console. These tags aren't seen when using the Batch\n ListTagsForResource API operation.
When updating a compute environment, changing this setting requires an infrastructure update\n of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide.
\nThis parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.
\nThe private repository authentication credentials to use.
" } + }, + "enableExecuteCommand": { + "target": "com.amazonaws.batch#Boolean", + "traits": { + "smithy.api#documentation": "Determines whether execute command functionality is turned on for this task. If true, execute\n command functionality is turned on all the containers in the task.
The platform configuration for jobs that are running on Fargate resources. Jobs that are\n running on Amazon EC2 resources must not specify this parameter.
" } }, + "enableExecuteCommand": { + "target": "com.amazonaws.batch#Boolean", + "traits": { + "smithy.api#documentation": "Determines whether execute command functionality is turned on for this task. If true, execute\n command functionality is turned on all the containers in the task.
The maximum number of vCPUs for an unmanaged compute environment. This parameter is only\n used for fair share scheduling to reserve vCPU capacity for new share identifiers. If this\n parameter isn't provided for a fair share job queue, no vCPU capacity is reserved.
\nThis parameter is only supported when the type parameter is set to\n UNMANAGED.
The maximum number of vCPUs for an unmanaged compute environment. This parameter is only\n used for fair-share scheduling to reserve vCPU capacity for new share identifiers. If this\n parameter isn't provided for a fair-share job queue, no vCPU capacity is reserved.
\nThis parameter is only supported when the type parameter is set to\n UNMANAGED.
The Amazon Resource Name (ARN) of the fair share scheduling policy. Job queues that don't have a scheduling policy are scheduled in a first-in, first-out (FIFO) model. After a job queue has a scheduling policy, it can be replaced but can't be removed.
\nThe format is\n aws:Partition:batch:Region:Account:scheduling-policy/Name\n .
An example is\n aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy.
A job queue without a scheduling policy is scheduled as a FIFO job queue and can't have a scheduling policy added. Jobs queues with a scheduling policy can have a maximum of 500 active fair share identifiers. When the limit has been reached, submissions of any jobs that add a new fair share identifier fail.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the fair-share scheduling policy. Job queues that don't have a fair-share scheduling policy are scheduled in a first-in, first-out (FIFO) model. After a job queue has a fair-share scheduling policy, it can be replaced but can't be removed.
\nThe format is\n aws:Partition:batch:Region:Account:scheduling-policy/Name\n .
An example is\n aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy.
A job queue without a fair-share scheduling policy is scheduled as a FIFO job queue and can't have a fair-share scheduling policy added. Jobs queues with a fair-share scheduling policy can have a maximum of 500 active share identifiers. When the limit has been reached, submissions of any jobs that add a new share identifier fail.
" } }, "priority": { @@ -3020,14 +3032,14 @@ "target": "com.amazonaws.batch#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The name of the scheduling policy. It can be up to 128 letters long. It can contain\n uppercase and lowercase letters, numbers, hyphens (-), and underscores (_).
", + "smithy.api#documentation": "The name of the fair-share scheduling policy. It can be up to 128 letters long. It can contain\n uppercase and lowercase letters, numbers, hyphens (-), and underscores (_).
", "smithy.api#required": {} } }, "fairsharePolicy": { "target": "com.amazonaws.batch#FairsharePolicy", "traits": { - "smithy.api#documentation": "The fair share policy of the scheduling policy.
" + "smithy.api#documentation": "The fair-share scheduling policy details.
" } }, "tags": { @@ -4311,6 +4323,12 @@ "traits": { "smithy.api#documentation": "A list of data volumes used in a job.
" } + }, + "enableExecuteCommand": { + "target": "com.amazonaws.batch#Boolean", + "traits": { + "smithy.api#documentation": "Determines whether execute command functionality is turned on for this task. If true, execute\n command functionality is turned on all the containers in the task.
A list of volumes that are associated with the job.
" } + }, + "enableExecuteCommand": { + "target": "com.amazonaws.batch#Boolean", + "traits": { + "smithy.api#documentation": "Determines whether execute command functionality is turned on for this task. If true, execute\n command functionality is turned on all the containers in the task.
The overrides for the initContainers defined in the Amazon EKS pod. These containers run before\n application containers, always runs to completion, and must complete successfully before the next\n container starts. These containers are registered with the Amazon EKS Connector agent and persists the\n registration information in the Kubernetes backend data store. For more information, see Init\n Containers in the Kubernetes documentation.
The overrides for the initContainers defined in the Amazon EKS pod. These containers run before\n application containers, always run to completion, and must complete successfully before the next\n container starts. These containers are registered with the Amazon EKS Connector agent and persists the\n registration information in the Kubernetes backend data store. For more information, see Init\n Containers in the Kubernetes documentation.
The amount of time (in seconds) to use to calculate a fair share percentage for each fair\n share identifier in use. A value of zero (0) indicates the default minimum time window (600 seconds).\n The maximum supported value is 604800 (1 week).
\nThe decay allows for more recently run jobs to have more weight than jobs that ran earlier. \n Consider adjusting this number if you have jobs that (on average) run longer than ten minutes, \n or a large difference in job count or job run times between share identifiers, and the allocation\n of resources doesn’t meet your needs.
" + "smithy.api#documentation": "The amount of time (in seconds) to use to calculate a fair-share percentage for each \n share identifier in use. A value of zero (0) indicates the default minimum time window (600 seconds).\n The maximum supported value is 604800 (1 week).
\nThe decay allows for more recently run jobs to have more weight than jobs that ran earlier. \n Consider adjusting this number if you have jobs that (on average) run longer than ten minutes, \n or a large difference in job count or job run times between share identifiers, and the allocation\n of resources doesn't meet your needs.
" } }, "computeReservation": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "A value used to reserve some of the available maximum vCPU for fair share identifiers that\n aren't already used.
\nThe reserved ratio is\n (computeReservation/100)^ActiveFairShares\n \n where \n ActiveFairShares\n is the number of active fair share\n identifiers.
For example, a computeReservation value of 50 indicates that Batch reserves\n 50% of the maximum available vCPU if there's only one fair share identifier. It reserves 25% if\n there are two fair share identifiers. It reserves 12.5% if there are three fair share\n identifiers. A computeReservation value of 25 indicates that Batch should reserve\n 25% of the maximum available vCPU if there's only one fair share identifier, 6.25% if there are\n two fair share identifiers, and 1.56% if there are three fair share identifiers.
The minimum value is 0 and the maximum value is 99.
" + "smithy.api#documentation": "A value used to reserve some of the available maximum vCPU for share identifiers that\n aren't already used.
\nThe reserved ratio is\n (computeReservation/100)^ActiveFairShares\n \n where \n ActiveFairShares\n is the number of active share\n identifiers.
For example, a computeReservation value of 50 indicates that Batch reserves\n 50% of the maximum available vCPU if there's only one share identifier. It reserves 25% if\n there are two share identifiers. It reserves 12.5% if there are three share\n identifiers. A computeReservation value of 25 indicates that Batch should reserve\n 25% of the maximum available vCPU if there's only one share identifier, 6.25% if there are\n two fair share identifiers, and 1.56% if there are three share identifiers.
The minimum value is 0 and the maximum value is 99.
" } }, "shareDistribution": { "target": "com.amazonaws.batch#ShareAttributesList", "traits": { - "smithy.api#documentation": "An array of SharedIdentifier objects that contain the weights for the fair\n share identifiers for the fair share policy. Fair share identifiers that aren't included have a\n default weight of 1.0.
An array of SharedIdentifier objects that contain the weights for the \n share identifiers for the fair-share policy. Share identifiers that aren't included have a\n default weight of 1.0.
The fair share policy for a scheduling policy.
" + "smithy.api#documentation": "The fair-share scheduling policy details.
" } }, "com.amazonaws.batch#FargatePlatformConfiguration": { @@ -5339,6 +5363,54 @@ "smithy.api#documentation": "The platform configuration for jobs that are running on Fargate resources. Jobs that run\n on Amazon EC2 resources must not specify this parameter.
" } }, + "com.amazonaws.batch#FirelensConfiguration": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.batch#FirelensConfigurationType", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The log router to use. The valid values are fluentd or fluentbit.
The options to use when configuring the log router. This field is optional and can be\n used to specify a custom configuration file or to add additional metadata, such as the\n task, task definition, cluster, and container instance details to the log event. If\n specified, the syntax to use is\n \"options\":{\"enable-ecs-log-metadata\":\"true|false\",\"config-file-type:\"s3|file\",\"config-file-value\":\"arn:aws:s3:::mybucket/fluent.conf|filepath\"}.\n For more information, see Creating a task definition that uses a FireLens configuration\n in the Amazon Elastic Container Service Developer Guide.
The FireLens configuration for the container. This is used to specify and configure a\n log router for container logs. For more information, see Custom log routing in the Amazon Elastic Container Service Developer\n Guide.
" + } + }, + "com.amazonaws.batch#FirelensConfigurationOptionsMap": { + "type": "map", + "key": { + "target": "com.amazonaws.batch#String" + }, + "value": { + "target": "com.amazonaws.batch#String" + } + }, + "com.amazonaws.batch#FirelensConfigurationType": { + "type": "enum", + "members": { + "FLUENTD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "fluentd" + } + }, + "FLUENTBIT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "fluentbit" + } + } + } + }, "com.amazonaws.batch#Float": { "type": "float" }, @@ -5348,7 +5420,7 @@ "jobs": { "target": "com.amazonaws.batch#FrontOfQueueJobSummaryList", "traits": { - "smithy.api#documentation": "The Amazon Resource Names (ARNs) of the first 100 RUNNABLE jobs in a named job queue. For first-in-first-out (FIFO) job queues, jobs are ordered based on their submission time. For fair share scheduling (FSS) job queues, jobs are ordered based on their job priority and share usage.
The Amazon Resource Names (ARNs) of the first 100 RUNNABLE jobs in a named job queue. For first-in-first-out (FIFO) job queues, jobs are ordered based on their submission time. For fair-share scheduling (FSS) job queues, jobs are ordered based on their job priority and share usage.
The list of the first 100 RUNNABLE jobs in each job queue. For first-in-first-out (FIFO) job queues, jobs are ordered based on their submission time. For fair share scheduling (FSS) job queues, jobs are ordered based on their job priority and share usage.
The list of the first 100 RUNNABLE jobs in each job queue. For first-in-first-out (FIFO) job queues, jobs are ordered based on their submission time. For fair-share scheduling (FSS) job queues, jobs are ordered based on their job priority and share usage.
The scheduling priority of the job definition. This only affects jobs in job queues with a\n fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower\n scheduling priority.
" + "smithy.api#documentation": "The scheduling priority of the job definition. This only affects jobs in job queues with a\n fair-share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower\n scheduling priority.
" } }, "parameters": { @@ -5781,7 +5853,7 @@ "schedulingPriority": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "The scheduling policy of the job definition. This only affects jobs in job queues with a\n fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower\n scheduling priority.
" + "smithy.api#documentation": "The scheduling policy of the job definition. This only affects jobs in job queues with a\n fair-share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower\n scheduling priority.
" } }, "attempts": { @@ -6398,7 +6470,7 @@ "maxSwap": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "The total amount of swap memory (in MiB) a container can use. This parameter is translated\n to the --memory-swap option to docker\n run where the value is the sum of the container memory plus the maxSwap\n value. For more information, see \n --memory-swap details in the Docker documentation.
If a maxSwap value of 0 is specified, the container doesn't use\n swap. Accepted values are 0 or any positive integer. If the maxSwap\n parameter is omitted, the container doesn't use the swap configuration for the container instance\n that it's running on. A maxSwap value must be set for the swappiness\n parameter to be used.
This parameter isn't applicable to jobs that are running on Fargate resources. Don't\n provide it for these jobs.
\nThe total amount of swap memory (in MiB) a container can use. This parameter is translated\n to the --memory-swap option to docker\n run where the value is the sum of the container memory plus the maxSwap\n value. For more information, see \n --memory-swap details in the Docker documentation.
If a maxSwap value of 0 is specified, the container doesn't use\n swap. Accepted values are 0 or any positive integer. If the maxSwap\n parameter is omitted, the container doesn't use the swap configuration for the container instance\n on which it runs. A maxSwap value must be set for the swappiness\n parameter to be used.
This parameter isn't applicable to jobs that are running on Fargate resources. Don't\n provide it for these jobs.
\nThe log driver to use for the container. The valid values that are listed for this parameter\n are log drivers that the Amazon ECS container agent can communicate with by default.
\nThe supported log drivers are awslogs, fluentd, gelf,\n json-file, journald, logentries, syslog, and\n splunk.
Jobs that are running on Fargate resources are restricted to the awslogs and\n splunk log drivers.
Specifies the Amazon CloudWatch Logs logging driver. For more information, see Using the awslogs log driver\n in the Batch User Guide and Amazon CloudWatch Logs logging\n driver in the Docker documentation.
\nSpecifies the Fluentd logging driver. For more information including usage and options,\n see Fluentd logging\n driver in the Docker documentation.
\nSpecifies the Graylog Extended Format (GELF) logging driver. For more information\n including usage and options, see Graylog Extended Format logging\n driver in the Docker documentation.
\nSpecifies the journald logging driver. For more information including usage and options,\n see Journald logging\n driver in the Docker documentation.
\nSpecifies the JSON file logging driver. For more information including usage and options,\n see JSON File\n logging driver in the Docker documentation.
\nSpecifies the Splunk logging driver. For more information including usage and options,\n see Splunk logging\n driver in the Docker documentation.
\nSpecifies the syslog logging driver. For more information including usage and options,\n see Syslog logging\n driver in the Docker documentation.
\nIf you have a custom driver that's not listed earlier that you want to work with the Amazon ECS\n container agent, you can fork the Amazon ECS container agent project that's available on GitHub and customize it to\n work with that driver. We encourage you to submit pull requests for changes that you want to\n have included. However, Amazon Web Services doesn't currently support running modified copies of this\n software.
\nThis parameter requires version 1.18 of the Docker Remote API or greater on your\n container instance. To check the Docker Remote API version on your container instance, log in to your\n container instance and run the following command: sudo docker version | grep \"Server API version\"\n
The log driver to use for the container. The valid values that are listed for this parameter\n are log drivers that the Amazon ECS container agent can communicate with by default.
\nThe supported log drivers are awslogs, fluentd, gelf,\n json-file, journald, logentries, syslog, and\n splunk.
Jobs that are running on Fargate resources are restricted to the awslogs and\n splunk log drivers.
Specifies the firelens logging driver. For more information on configuring Firelens, see\n Send\n Amazon ECS logs to an Amazon Web Services service or Amazon Web Services Partner in the\n Amazon Elastic Container Service Developer Guide.
\nSpecifies the Amazon CloudWatch Logs logging driver. For more information, see Using the awslogs log driver\n in the Batch User Guide and Amazon CloudWatch Logs logging\n driver in the Docker documentation.
\nSpecifies the Fluentd logging driver. For more information including usage and options,\n see Fluentd logging\n driver in the Docker documentation.
\nSpecifies the Graylog Extended Format (GELF) logging driver. For more information\n including usage and options, see Graylog Extended Format logging\n driver in the Docker documentation.
\nSpecifies the journald logging driver. For more information including usage and options,\n see Journald logging\n driver in the Docker documentation.
\nSpecifies the JSON file logging driver. For more information including usage and options,\n see JSON File\n logging driver in the Docker documentation.
\nSpecifies the Splunk logging driver. For more information including usage and options,\n see Splunk logging\n driver in the Docker documentation.
\nSpecifies the syslog logging driver. For more information including usage and options,\n see Syslog logging\n driver in the Docker documentation.
\nIf you have a custom driver that's not listed earlier that you want to work with the Amazon ECS\n container agent, you can fork the Amazon ECS container agent project that's available on GitHub and customize it to\n work with that driver. We encourage you to submit pull requests for changes that you want to\n have included. However, Amazon Web Services doesn't currently support running modified copies of this\n software.
\nThis parameter requires version 1.18 of the Docker Remote API or greater on your\n container instance. To check the Docker Remote API version on your container instance, log in to your\n container instance and run the following command: sudo docker version | grep \"Server API version\"\n
The scheduling priority for jobs that are submitted with this job definition. This only\n affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority\n are scheduled before jobs with a lower scheduling priority.
\nThe minimum supported value is 0 and the maximum supported value is 9999.
" + "smithy.api#documentation": "The scheduling priority for jobs that are submitted with this job definition. This only\n affects jobs in job queues with a fair-share policy. Jobs with a higher scheduling priority\n are scheduled before jobs with a lower scheduling priority.
\nThe minimum supported value is 0 and the maximum supported value is 9999.
" } }, "containerProperties": { @@ -7864,7 +7942,7 @@ "target": "com.amazonaws.batch#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The name of the scheduling policy.
", + "smithy.api#documentation": "The name of the fair-share scheduling policy.
", "smithy.api#required": {} } }, @@ -7879,13 +7957,13 @@ "fairsharePolicy": { "target": "com.amazonaws.batch#FairsharePolicy", "traits": { - "smithy.api#documentation": "The fair share policy for the scheduling policy.
" + "smithy.api#documentation": "The fair-share scheduling policy details.
" } }, "tags": { "target": "com.amazonaws.batch#TagrisTagsMap", "traits": { - "smithy.api#documentation": "The tags that you apply to the scheduling policy to categorize and organize your resources.\n Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services resources in\n Amazon Web Services General Reference.
" + "smithy.api#documentation": "The tags that you apply to the fair-share scheduling policy to categorize and organize your resources.\n Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services resources in\n Amazon Web Services General Reference.
" } } }, @@ -7971,19 +8049,19 @@ "target": "com.amazonaws.batch#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "A fair share identifier or fair share identifier prefix. If the string ends with an asterisk\n (*), this entry specifies the weight factor to use for fair share identifiers that start with\n that prefix. The list of fair share identifiers in a fair share policy can't overlap. For\n example, you can't have one that specifies a shareIdentifier of UserA*\n and another that specifies a shareIdentifier of UserA-1.
There can be no more than 500 fair share identifiers active in a job queue.
\nThe string is limited to 255 alphanumeric characters, and can be followed by an asterisk\n (*).
", + "smithy.api#documentation": "A share identifier or share identifier prefix. If the string ends with an asterisk\n (*), this entry specifies the weight factor to use for share identifiers that start with\n that prefix. The list of share identifiers in a fair-share policy can't overlap. For\n example, you can't have one that specifies a shareIdentifier of UserA*\n and another that specifies a shareIdentifier of UserA-1.
There can be no more than 500 share identifiers active in a job queue.
\nThe string is limited to 255 alphanumeric characters, and can be followed by an asterisk\n (*).
", "smithy.api#required": {} } }, "weightFactor": { "target": "com.amazonaws.batch#Float", "traits": { - "smithy.api#documentation": "The weight factor for the fair share identifier. The default value is 1.0. A lower value has\n a higher priority for compute resources. For example, jobs that use a share identifier with a\n weight factor of 0.125 (1/8) get 8 times the compute resources of jobs that use a share\n identifier with a weight factor of 1.
\nThe smallest supported value is 0.0001, and the largest supported value is 999.9999.
" + "smithy.api#documentation": "The weight factor for the share identifier. The default value is 1.0. A lower value has\n a higher priority for compute resources. For example, jobs that use a share identifier with a\n weight factor of 0.125 (1/8) get 8 times the compute resources of jobs that use a share\n identifier with a weight factor of 1.
\nThe smallest supported value is 0.0001, and the largest supported value is 999.9999.
" } } }, "traits": { - "smithy.api#documentation": "Specifies the weights for the fair share identifiers for the fair share policy. Fair share\n identifiers that aren't included have a default weight of 1.0.
Specifies the weights for the share identifiers for the fair-share policy. Share\n identifiers that aren't included have a default weight of 1.0.
Submits an Batch job from a job definition. Parameters that are specified during SubmitJob override parameters defined in the job definition. vCPU and memory\n requirements that are specified in the resourceRequirements objects in the job\n definition are the exception. They can't be overridden this way using the memory\n and vcpus parameters. Rather, you must specify updates to job definition\n parameters in a resourceRequirements object that's included in the\n containerOverrides parameter.
Job queues with a scheduling policy are limited to 500 active fair share identifiers at\n a time.
\nJobs that run on Fargate resources can't be guaranteed to run for more than 14 days.\n This is because, after 14 days, Fargate resources might become unavailable and job might be\n terminated.
\nSubmits an Batch job from a job definition. Parameters that are specified during SubmitJob override parameters defined in the job definition. vCPU and memory\n requirements that are specified in the resourceRequirements objects in the job\n definition are the exception. They can't be overridden this way using the memory\n and vcpus parameters. Rather, you must specify updates to job definition\n parameters in a resourceRequirements object that's included in the\n containerOverrides parameter.
Job queues with a scheduling policy are limited to 500 active share identifiers at\n a time.
\nJobs that run on Fargate resources can't be guaranteed to run for more than 14 days.\n This is because, after 14 days, Fargate resources might become unavailable and job might be\n terminated.
\nThe share identifier for the job. Don't specify this parameter if the job queue doesn't\n have a scheduling policy. If the job queue has a scheduling policy, then this parameter must\n be specified.
\nThis string is limited to 255 alphanumeric characters, and can be followed by an asterisk\n (*).
" + "smithy.api#documentation": "The share identifier for the job. Don't specify this parameter if the job queue doesn't\n have a fair-share scheduling policy. If the job queue has a fair-share scheduling policy, then this parameter must\n be specified.
\nThis string is limited to 255 alphanumeric characters, and can be followed by an asterisk\n (*).
" } }, "schedulingPriorityOverride": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "The scheduling priority for the job. This only affects jobs in job queues with a fair\n share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower\n scheduling priority. This overrides any scheduling priority in the job definition and works only \n within a single share identifier.
\nThe minimum supported value is 0 and the maximum supported value is 9999.
" + "smithy.api#documentation": "The scheduling priority for the job. This only affects jobs in job queues with a \n fair-share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower\n scheduling priority. This overrides any scheduling priority in the job definition and works only \n within a single share identifier.
\nThe minimum supported value is 0 and the maximum supported value is 9999.
" } }, "arrayProperties": { @@ -8365,6 +8443,12 @@ "smithy.api#documentation": "If the essential parameter of a container is marked as true, and that container\n fails or stops for any reason, all other containers that are part of the task are stopped. If the\n essential parameter of a container is marked as false, its failure doesn't affect\n the rest of the containers in a task. If this parameter is omitted, a container is assumed to be\n essential.
All jobs must have at least one essential container. If you have an application that's\n composed of multiple containers, group containers that are used for a common purpose into\n components, and separate the different components into multiple task definitions. For more\n information, see Application\n Architecture in the Amazon Elastic Container Service Developer Guide.
" } }, + "firelensConfiguration": { + "target": "com.amazonaws.batch#FirelensConfiguration", + "traits": { + "smithy.api#documentation": "The FireLens configuration for the container. This is used to specify and configure a\n log router for container logs. For more information, see Custom log routing in the Amazon Elastic Container Service Developer\n Guide.
" + } + }, "image": { "target": "com.amazonaws.batch#String", "traits": { @@ -8525,6 +8609,12 @@ "smithy.api#documentation": "If the essential parameter of a container is marked as true, and that container\n fails or stops for any reason, all other containers that are part of the task are stopped. If the\n essential parameter of a container is marked as false, its failure doesn't affect\n the rest of the containers in a task. If this parameter is omitted, a container is assumed to be\n essential.
All jobs must have at least one essential container. If you have an application that's\n composed of multiple containers, group containers that are used for a common purpose into\n components, and separate the different components into multiple task definitions. For more\n information, see Application\n Architecture in the Amazon Elastic Container Service Developer Guide.
" } }, + "firelensConfiguration": { + "target": "com.amazonaws.batch#FirelensConfiguration", + "traits": { + "smithy.api#documentation": "The FireLens configuration for the container. This is used to specify and configure a\n log router for container logs. For more information, see Custom log routing in the Amazon Elastic Container Service Developer\n Guide.
" + } + }, "image": { "target": "com.amazonaws.batch#String", "traits": { @@ -8890,7 +8980,7 @@ "unmanagedvCpus": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "The maximum number of vCPUs expected to be used for an unmanaged compute environment.\n Don't specify this parameter for a managed compute environment. This parameter is only used\n for fair share scheduling to reserve vCPU capacity for new share identifiers. If this\n parameter isn't provided for a fair share job queue, no vCPU capacity is reserved.
" + "smithy.api#documentation": "The maximum number of vCPUs expected to be used for an unmanaged compute environment.\n Don't specify this parameter for a managed compute environment. This parameter is only used\n for fair-share scheduling to reserve vCPU capacity for new share identifiers. If this\n parameter isn't provided for a fair-share job queue, no vCPU capacity is reserved.
" } }, "computeResources": { @@ -9108,7 +9198,7 @@ "schedulingPolicyArn": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "Amazon Resource Name (ARN) of the fair share scheduling policy. Once a job queue is created, the fair share\n scheduling policy can be replaced but not removed. The format is\n aws:Partition:batch:Region:Account:scheduling-policy/Name\n .\n For example,\n aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy.
Amazon Resource Name (ARN) of the fair-share scheduling policy. Once a job queue is created, the fair-share\n scheduling policy can be replaced but not removed. The format is\n aws:Partition:batch:Region:Account:scheduling-policy/Name\n .\n For example,\n aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy.
Specifies the infrastructure update policy for the compute environment. For more information\n about infrastructure updates, see Updating compute environments in the\n Batch User Guide.
" + "smithy.api#documentation": "Specifies the infrastructure update policy for the Amazon EC2 compute environment. For more information\n about infrastructure updates, see Updating compute environments in the\n Batch User Guide.
" } }, "com.amazonaws.batch#UpdateSchedulingPolicy": { @@ -9214,7 +9304,7 @@ "fairsharePolicy": { "target": "com.amazonaws.batch#FairsharePolicy", "traits": { - "smithy.api#documentation": "The fair share policy.
" + "smithy.api#documentation": "The fair-share policy scheduling details.
" } } }, diff --git a/codegen/sdk/aws-models/bcm-pricing-calculator.json b/codegen/sdk/aws-models/bcm-pricing-calculator.json index a568ef483b7..679aa859b02 100644 --- a/codegen/sdk/aws-models/bcm-pricing-calculator.json +++ b/codegen/sdk/aws-models/bcm-pricing-calculator.json @@ -3088,6 +3088,12 @@ "traits": { "smithy.api#enumValue": "FAILED" } + }, + "STALE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STALE" + } } } }, @@ -4181,7 +4187,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to retrieve applicable rate type preferences for the account" }, - "smithy.api#documentation": "\n Retrieves the current preferences for the Amazon Web Services Cost Explorer service.\n
", + "smithy.api#documentation": "\n Retrieves the current preferences for Pricing Calculator.\n
", "smithy.api#readonly": {} } }, @@ -4206,6 +4212,12 @@ "traits": { "smithy.api#documentation": "\n The preferred rate types for member accounts.\n
" } + }, + "standaloneAccountRateTypeSelections": { + "target": "com.amazonaws.bcmpricingcalculator#RateTypes", + "traits": { + "smithy.api#documentation": "\n The preferred rate types for a standalone account.\n
" + } } }, "traits": { @@ -6345,7 +6357,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to update rate type preferences for the account" }, - "smithy.api#documentation": "\n Updates the preferences for the Amazon Web Services Cost Explorer service.\n
", + "smithy.api#documentation": "\n Updates the preferences for Pricing Calculator.\n
", "smithy.api#idempotent": {} } }, @@ -6363,6 +6375,12 @@ "traits": { "smithy.api#documentation": "\n The updated preferred rate types for member accounts.\n
" } + }, + "standaloneAccountRateTypeSelections": { + "target": "com.amazonaws.bcmpricingcalculator#RateTypes", + "traits": { + "smithy.api#documentation": "\n The updated preferred rate types for a standalone account.\n
" + } } }, "traits": { @@ -6383,6 +6401,12 @@ "traits": { "smithy.api#documentation": "\n The updated preferred rate types for member accounts.\n
" } + }, + "standaloneAccountRateTypeSelections": { + "target": "com.amazonaws.bcmpricingcalculator#RateTypes", + "traits": { + "smithy.api#documentation": "\n The updated preferred rate types for a standalone account.\n
" + } } }, "traits": { diff --git a/codegen/sdk/aws-models/bedrock-agent-runtime.json b/codegen/sdk/aws-models/bedrock-agent-runtime.json index 0b315a67512..a8a0c0e126c 100644 --- a/codegen/sdk/aws-models/bedrock-agent-runtime.json +++ b/codegen/sdk/aws-models/bedrock-agent-runtime.json @@ -3196,6 +3196,12 @@ "traits": { "smithy.api#documentation": "Contains information about an output from a condition node.
" } + }, + "nodeActionTrace": { + "target": "com.amazonaws.bedrockagentruntime#FlowTraceNodeActionEvent", + "traits": { + "smithy.api#documentation": "Contains information about an action (operation) called by a node.\n For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.
" + } } }, "traits": { @@ -3276,6 +3282,50 @@ "smithy.api#documentation": "Contains information about a trace, which tracks an input or output for a node in the flow. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.
" } }, + "com.amazonaws.bedrockagentruntime#FlowTraceNodeActionEvent": { + "type": "structure", + "members": { + "nodeName": { + "target": "com.amazonaws.bedrockagentruntime#NodeName", + "traits": { + "smithy.api#documentation": "The name of the node that called the operation.
", + "smithy.api#required": {} + } + }, + "timestamp": { + "target": "com.amazonaws.bedrockagentruntime#DateTimestamp", + "traits": { + "smithy.api#documentation": "The date and time that the operation was called.
", + "smithy.api#required": {} + } + }, + "requestId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ID of the request that the node made to the operation.
", + "smithy.api#required": {} + } + }, + "serviceName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the service that the node called.
", + "smithy.api#required": {} + } + }, + "operationName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The name of the operation that the node called.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Contains information about an action (operation) called by a node in an Amazon Bedrock flow. The service generates action events for calls made by prompt nodes,\n agent nodes, and Amazon Web Services Lambda nodes.
", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.bedrockagentruntime#FlowTraceNodeInputContent": { "type": "union", "members": { @@ -5867,7 +5917,7 @@ } ], "traits": { - "smithy.api#documentation": "Sends a prompt for the agent to process and respond to. Note the following fields for the request:
\nTo continue the same conversation with an agent, use the same sessionId value in the request.
To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.
To stream agent responses, make sure that only orchestration prompt is enabled. Agent streaming is not supported for the following steps:\n
\n\n Pre-processing\n
\n Post-processing\n
Agent with 1 Knowledge base and User Input not enabled
End a conversation by setting endSession to true.
In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group.
The response contains both chunk and trace attributes.
\nThe final response is returned in the bytes field of the chunk object. The InvokeAgent returns one chunk for the entire interaction.
The attribution object contains citations for parts of the response.
If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response.
If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field.
Errors are also surfaced in the response.
\nSends a prompt for the agent to process and respond to. Note the following fields for the request:
\nTo continue the same conversation with an agent, use the same sessionId value in the request.
To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.
End a conversation by setting endSession to true.
In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group.
The response contains both chunk and trace attributes.
\nThe final response is returned in the bytes field of the chunk object. The InvokeAgent returns one chunk for the entire interaction.
The attribution object contains citations for parts of the response.
If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response.
If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field.
Errors are also surfaced in the response.
\n\n The guardrail interval to apply as response is generated.\n
", + "smithy.api#documentation": " The guardrail interval to apply as response is generated. By default, the guardrail\n interval is set to 50 characters. If a larger interval is specified, the response will\n be generated in larger chunks with fewer ApplyGuardrail calls. The\n following examples show the response generated for Hello, I am an\n agent input string.
\n Example response in chunks: Interval set to 3 characters\n
\n\n 'Hel', 'lo, ','I am', ' an', ' Age', 'nt'\n
Each chunk has at least 3 characters except for the last chunk
\n\n Example response in chunks: Interval set to 20 or more characters\n
\n\n Hello, I am an Agent\n
\n Configurations for streaming.
" + "smithy.api#documentation": "Configurations for streaming.
" } }, "com.amazonaws.bedrockagentruntime#SummaryText": { diff --git a/codegen/sdk/aws-models/bedrock-agent.json b/codegen/sdk/aws-models/bedrock-agent.json index 37d24f910b9..2dcc7fcd5e6 100644 --- a/codegen/sdk/aws-models/bedrock-agent.json +++ b/codegen/sdk/aws-models/bedrock-agent.json @@ -2534,7 +2534,7 @@ "modelArn": { "target": "com.amazonaws.bedrockagent#BedrockModelArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the foundation model used for context enrichment.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base.
", "smithy.api#required": {} } } @@ -10488,6 +10488,12 @@ "traits": { "smithy.api#enumValue": "NEPTUNE_ANALYTICS" } + }, + "OPENSEARCH_MANAGED_CLUSTER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OPENSEARCH_MANAGED_CLUSTER" + } } } }, @@ -12529,6 +12535,12 @@ "traits": { "smithy.api#documentation": "The name of the VPC endpoint service in your account that is connected to your MongoDB Atlas cluster.
" } + }, + "textIndexName": { + "target": "com.amazonaws.bedrockagent#MongoDbAtlasIndexName", + "traits": { + "smithy.api#documentation": "The name of the text search index in the MongoDB collection. This is required for using the hybrid search\n feature.
" + } } }, "traits": { @@ -12718,6 +12730,100 @@ "smithy.api#pattern": "^[0-9]{1,5}$" } }, + "com.amazonaws.bedrockagent#OpenSearchManagedClusterConfiguration": { + "type": "structure", + "members": { + "domainEndpoint": { + "target": "com.amazonaws.bedrockagent#OpenSearchManagedClusterDomainEndpoint", + "traits": { + "smithy.api#documentation": "The endpoint URL the OpenSearch domain.
", + "smithy.api#required": {} + } + }, + "domainArn": { + "target": "com.amazonaws.bedrockagent#OpenSearchManagedClusterDomainArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the OpenSearch domain.
", + "smithy.api#required": {} + } + }, + "vectorIndexName": { + "target": "com.amazonaws.bedrockagent#OpenSearchManagedClusterIndexName", + "traits": { + "smithy.api#documentation": "The name of the vector store.
", + "smithy.api#required": {} + } + }, + "fieldMapping": { + "target": "com.amazonaws.bedrockagent#OpenSearchManagedClusterFieldMapping", + "traits": { + "smithy.api#documentation": "Contains the names of the fields to which to map information about the vector store.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Contains details about the Managed Cluster configuration of the knowledge base in Amazon OpenSearch Service. For more information, \n see Create a vector index in OpenSearch Managed Cluster.
" + } + }, + "com.amazonaws.bedrockagent#OpenSearchManagedClusterDomainArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws(|-cn|-us-gov|-iso):es:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:domain/[a-z][a-z0-9-]{3,28}$" + } + }, + "com.amazonaws.bedrockagent#OpenSearchManagedClusterDomainEndpoint": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^https://.*$" + } + }, + "com.amazonaws.bedrockagent#OpenSearchManagedClusterFieldMapping": { + "type": "structure", + "members": { + "vectorField": { + "target": "com.amazonaws.bedrockagent#FieldName", + "traits": { + "smithy.api#documentation": "The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources.
", + "smithy.api#required": {} + } + }, + "textField": { + "target": "com.amazonaws.bedrockagent#FieldName", + "traits": { + "smithy.api#documentation": "The name of the field in which Amazon Bedrock stores the raw text from your data. The text\n is split according to the chunking strategy you choose.
", + "smithy.api#required": {} + } + }, + "metadataField": { + "target": "com.amazonaws.bedrockagent#FieldName", + "traits": { + "smithy.api#documentation": "The name of the field in which Amazon Bedrock stores metadata about the vector store.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Contains the names of the fields to which to map information about the vector store.
" + } + }, + "com.amazonaws.bedrockagent#OpenSearchManagedClusterIndexName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^(?![\\-_+.])[a-z0-9][a-z0-9\\-_\\.]*$", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.bedrockagent#OpenSearchServerlessCollectionArn": { "type": "string", "traits": { @@ -14180,6 +14286,12 @@ "smithy.api#documentation": "The name of the field in which Amazon Bedrock stores metadata about the vector store.
", "smithy.api#required": {} } + }, + "customMetadataField": { + "target": "com.amazonaws.bedrockagent#ColumnName", + "traits": { + "smithy.api#documentation": "Provide a name for the universal metadata field where Amazon Bedrock will store any custom metadata from \n your data source.
" + } } }, "traits": { @@ -15464,6 +15576,12 @@ "smithy.api#documentation": "Contains the storage configuration of the knowledge base in Amazon OpenSearch Service.
" } }, + "opensearchManagedClusterConfiguration": { + "target": "com.amazonaws.bedrockagent#OpenSearchManagedClusterConfiguration", + "traits": { + "smithy.api#documentation": "Contains details about the storage configuration of the knowledge base in OpenSearch Managed\n Cluster. For more information, see Create \n a vector index in Amazon OpenSearch Service.
" + } + }, "pineconeConfiguration": { "target": "com.amazonaws.bedrockagent#PineconeConfiguration", "traits": { @@ -16491,7 +16609,7 @@ "parentActionGroupSignatureParams": { "target": "com.amazonaws.bedrockagent#ActionGroupSignatureParams", "traits": { - "smithy.api#documentation": "The configuration settings for a computer use action.
\n\n Computer use is a new Anthropic Claude model capability (in beta) available with Claude 3.7 and Claude 3.5 Sonnet v2 only. For more information, see Configure an Amazon Bedrock Agent to complete tasks with computer use tools.\n
\nThe configuration settings for a computer use action.
\n\n Computer use is a new Anthropic Claude model capability (in beta) available with Claude 3.7 Sonnet and Claude 3.5 Sonnet v2 only. For more information, see Configure an Amazon Bedrock Agent to complete tasks with computer use tools.\n
\nThe content details used in the request to apply the guardrail.
", "smithy.api#required": {} } + }, + "outputScope": { + "target": "com.amazonaws.bedrockruntime#GuardrailOutputScope", + "traits": { + "smithy.api#documentation": "Specifies the scope of the output that you get in the response. Set to FULL to return the entire output, including any detected and non-detected entries in the response for enhanced debugging.
Note that the full output scope doesn't apply to word filters or regex in sensitive information filters. It does apply to all other filtering policies, including sensitive information with filters that can detect personally identifiable information (PII).
" + } } }, "traits": { @@ -828,6 +834,12 @@ "smithy.api#required": {} } }, + "actionReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The reason for the action taken when harmful content is detected.
" + } + }, "outputs": { "target": "com.amazonaws.bedrockruntime#GuardrailOutputContentList", "traits": { @@ -1049,6 +1061,36 @@ "smithy.api#documentation": "The Model automatically decides if a tool should be called or whether to generate text instead.\n For example, {\"auto\" : {}}.
The audio content for the bidirectional input.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Payload content for the bidirectional input. The input is an audio stream.
", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockruntime#BidirectionalOutputPayloadPart": { + "type": "structure", + "members": { + "bytes": { + "target": "com.amazonaws.bedrockruntime#PartBody", + "traits": { + "smithy.api#documentation": "The speech output of the bidirectional stream.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Output from the bidirectional stream. The output is speech and a text transcription.
", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.bedrockruntime#Body": { "type": "blob", "traits": { @@ -1058,6 +1100,32 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.bedrockruntime#CachePointBlock": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.bedrockruntime#CachePointType", + "traits": { + "smithy.api#documentation": "Specifies the type of cache point within the CachePointBlock.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Defines a section of content to be cached for reuse in subsequent API calls.
" + } + }, + "com.amazonaws.bedrockruntime#CachePointType": { + "type": "enum", + "members": { + "DEFAULT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "default" + } + } + } + }, "com.amazonaws.bedrockruntime#ConflictException": { "type": "structure", "members": { @@ -1116,6 +1184,12 @@ "smithy.api#documentation": "Contains the content to assess with the guardrail. If you don't specify\n guardContent in a call to the Converse API, the guardrail (if passed in the\n Converse API) assesses the entire message.
For more information, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.\n\n
" } }, + "cachePoint": { + "target": "com.amazonaws.bedrockruntime#CachePointBlock", + "traits": { + "smithy.api#documentation": "CachePoint to include in the message.
" + } + }, "reasoningContent": { "target": "com.amazonaws.bedrockruntime#ReasoningContentBlock", "traits": { @@ -2164,6 +2238,12 @@ "smithy.api#documentation": "The guardrail action.
", "smithy.api#required": {} } + }, + "detected": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether content that breaches the guardrail configuration is detected.
" + } } }, "traits": { @@ -2283,6 +2363,12 @@ "traits": { "smithy.api#enumValue": "BLOCKED" } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } } } }, @@ -2301,6 +2387,9 @@ "smithy.api#documentation": "An assessment of a content policy for a guardrail.
" } }, + "com.amazonaws.bedrockruntime#GuardrailContentPolicyImageUnitsProcessed": { + "type": "integer" + }, "com.amazonaws.bedrockruntime#GuardrailContentPolicyUnitsProcessed": { "type": "integer" }, @@ -2388,6 +2477,12 @@ "smithy.api#documentation": "The action performed by the guardrails contextual grounding filter.
", "smithy.api#required": {} } + }, + "detected": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether content that fails the contextual grounding evaluation (grounding or relevance score less than the corresponding threshold) was detected.
" + } } }, "traits": { @@ -2615,6 +2710,12 @@ "smithy.api#documentation": "The action for the custom word.
", "smithy.api#required": {} } + }, + "detected": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether custom word content that breaches the guardrail configuration is detected.
" + } } }, "traits": { @@ -2763,6 +2864,12 @@ "smithy.api#documentation": "The action for the managed word.
", "smithy.api#required": {} } + }, + "detected": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether managed word content that breaches the guardrail configuration is detected.
" + } } }, "traits": { @@ -2806,6 +2913,23 @@ "target": "com.amazonaws.bedrockruntime#GuardrailOutputContent" } }, + "com.amazonaws.bedrockruntime#GuardrailOutputScope": { + "type": "enum", + "members": { + "INTERVENTIONS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERVENTIONS" + } + }, + "FULL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FULL" + } + } + } + }, "com.amazonaws.bedrockruntime#GuardrailOutputText": { "type": "string" }, @@ -2832,6 +2956,12 @@ "smithy.api#documentation": "The PII entity filter action.
", "smithy.api#required": {} } + }, + "detected": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether personally identifiable information (PII) that breaches the guardrail configuration is detected.
" + } } }, "traits": { @@ -3065,6 +3195,12 @@ "smithy.api#documentation": "The region filter action.
", "smithy.api#required": {} } + }, + "detected": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether custom regex entities that breach the guardrail configuration are detected.
" + } } }, "traits": { @@ -3099,6 +3235,12 @@ "traits": { "smithy.api#enumValue": "BLOCKED" } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } } } }, @@ -3247,6 +3389,12 @@ "smithy.api#documentation": "The action the guardrail should take when it intervenes on a topic.
", "smithy.api#required": {} } + }, + "detected": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether topic content that breaches the guardrail configuration is detected.
" + } } }, "traits": { @@ -3267,6 +3415,12 @@ "traits": { "smithy.api#enumValue": "BLOCKED" } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } } } }, @@ -3313,6 +3467,12 @@ "traits": { "smithy.api#enumValue": "disabled" } + }, + "ENABLED_FULL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "enabled_full" + } } } }, @@ -3336,6 +3496,12 @@ "traits": { "smithy.api#documentation": "the output assessments.
" } + }, + "actionReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "Provides the reason for the action taken when harmful content is detected.
" + } } }, "traits": { @@ -3386,6 +3552,12 @@ "smithy.api#documentation": "The contextual grounding policy units processed by the guardrail.
", "smithy.api#required": {} } + }, + "contentPolicyImageUnits": { + "target": "com.amazonaws.bedrockruntime#GuardrailContentPolicyImageUnitsProcessed", + "traits": { + "smithy.api#documentation": "The content policy image units processed by the guardrail.
" + } } }, "traits": { @@ -3406,6 +3578,12 @@ "traits": { "smithy.api#enumValue": "BLOCKED" } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } } } }, @@ -3566,6 +3744,9 @@ { "target": "com.amazonaws.bedrockruntime#InvokeModel" }, + { + "target": "com.amazonaws.bedrockruntime#InvokeModelWithBidirectionalStream" + }, { "target": "com.amazonaws.bedrockruntime#InvokeModelWithResponseStream" } @@ -3750,6 +3931,164 @@ "smithy.api#output": {} } }, + "com.amazonaws.bedrockruntime#InvokeModelWithBidirectionalStream": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockruntime#InvokeModelWithBidirectionalStreamRequest" + }, + "output": { + "target": "com.amazonaws.bedrockruntime#InvokeModelWithBidirectionalStreamResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockruntime#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockruntime#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockruntime#ModelErrorException" + }, + { + "target": "com.amazonaws.bedrockruntime#ModelNotReadyException" + }, + { + "target": "com.amazonaws.bedrockruntime#ModelStreamErrorException" + }, + { + "target": "com.amazonaws.bedrockruntime#ModelTimeoutException" + }, + { + "target": "com.amazonaws.bedrockruntime#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockruntime#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.bedrockruntime#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.bedrockruntime#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockruntime#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Invoke the specified Amazon Bedrock model to run inference using the bidirectional stream. The response is returned in a stream that remains open for 8 minutes. A single session can contain multiple prompts and responses from the model. The prompts to the model are provided as audio files and the model's responses are spoken back to the user and transcribed.
\nIt is possible for users to interrupt the model's response with a new prompt, which will halt the response speech. The model will retain contextual awareness of the conversation while pivoting to respond to the new prompt.
", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/model/{modelId}/invoke-with-bidirectional-stream" + } + } + }, + "com.amazonaws.bedrockruntime#InvokeModelWithBidirectionalStreamInput": { + "type": "union", + "members": { + "chunk": { + "target": "com.amazonaws.bedrockruntime#BidirectionalInputPayloadPart", + "traits": { + "smithy.api#documentation": "The audio chunk that is used as input for the invocation step.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Payload content, the speech chunk, for the bidirectional input of the invocation step.
", + "smithy.api#streaming": {} + } + }, + "com.amazonaws.bedrockruntime#InvokeModelWithBidirectionalStreamOutput": { + "type": "union", + "members": { + "chunk": { + "target": "com.amazonaws.bedrockruntime#BidirectionalOutputPayloadPart", + "traits": { + "smithy.api#documentation": "The speech chunk that was provided as output from the invocation step.
" + } + }, + "internalServerException": { + "target": "com.amazonaws.bedrockruntime#InternalServerException", + "traits": { + "smithy.api#documentation": "The request encountered an unknown internal error.
" + } + }, + "modelStreamErrorException": { + "target": "com.amazonaws.bedrockruntime#ModelStreamErrorException", + "traits": { + "smithy.api#documentation": "The request encountered an error with the model stream.
" + } + }, + "validationException": { + "target": "com.amazonaws.bedrockruntime#ValidationException", + "traits": { + "smithy.api#documentation": "The input fails to satisfy the constraints specified by an Amazon Web Services service.
" + } + }, + "throttlingException": { + "target": "com.amazonaws.bedrockruntime#ThrottlingException", + "traits": { + "smithy.api#documentation": "The request was denied due to request throttling.
" + } + }, + "modelTimeoutException": { + "target": "com.amazonaws.bedrockruntime#ModelTimeoutException", + "traits": { + "smithy.api#documentation": "The connection was closed because a request was not received within the timeout period.
" + } + }, + "serviceUnavailableException": { + "target": "com.amazonaws.bedrockruntime#ServiceUnavailableException", + "traits": { + "smithy.api#documentation": "The request has failed due to a temporary failure of the server.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Output from the bidirectional stream that was used for model invocation.
", + "smithy.api#streaming": {} + } + }, + "com.amazonaws.bedrockruntime#InvokeModelWithBidirectionalStreamRequest": { + "type": "structure", + "members": { + "modelId": { + "target": "com.amazonaws.bedrockruntime#InvokeModelIdentifier", + "traits": { + "smithy.api#documentation": "The model ID or ARN of the model ID to use. Currently, only amazon.nova-sonic-v1:0 is supported.
The prompt and inference parameters in the format specified in the BidirectionalInputPayloadPart in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to Inference parameters. For more information, see Run inference in the Bedrock User Guide.
Streaming response from the model in the format specified by the BidirectionalOutputPayloadPart header.
A content block to assess with the guardrail. Use with the Converse or ConverseStream API operations.
\nFor more information, see Use a guardrail with the Converse\n API in the Amazon Bedrock User Guide.
" } + }, + "cachePoint": { + "target": "com.amazonaws.bedrockruntime#CachePointBlock", + "traits": { + "smithy.api#documentation": "CachePoint to include in the system prompt.
" + } } }, "traits": { @@ -4883,6 +5228,24 @@ }, "smithy.api#required": {} } + }, + "cacheReadInputTokens": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "The number of input tokens read from the cache for the request.
", + "smithy.api#range": { + "min": 0 + } + } + }, + "cacheWriteInputTokens": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "The number of input tokens written to the cache for the request.
", + "smithy.api#range": { + "min": 0 + } + } } }, "traits": { @@ -4897,6 +5260,12 @@ "traits": { "smithy.api#documentation": "The specfication for the tool.
" } + }, + "cachePoint": { + "target": "com.amazonaws.bedrockruntime#CachePointBlock", + "traits": { + "smithy.api#documentation": "CachePoint to include in the tool configuration.
" + } } }, "traits": { @@ -5190,6 +5559,12 @@ "traits": { "smithy.api#enumValue": "DISABLED" } + }, + "ENABLED_FULL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED_FULL" + } } } }, diff --git a/codegen/sdk/aws-models/bedrock.json b/codegen/sdk/aws-models/bedrock.json index 68c8c6fcf8d..fe2afec9f15 100644 --- a/codegen/sdk/aws-models/bedrock.json +++ b/codegen/sdk/aws-models/bedrock.json @@ -2556,6 +2556,32 @@ "target": "com.amazonaws.bedrock#CustomModelSummary" } }, + "com.amazonaws.bedrock#CustomModelUnits": { + "type": "structure", + "members": { + "customModelUnitsPerModelCopy": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "The number of custom model units used to host a model copy.
" + } + }, + "customModelUnitsVersion": { + "target": "com.amazonaws.bedrock#CustomModelUnitsVersion", + "traits": { + "smithy.api#documentation": "The version of the custom model unit. Use to determine the billing rate for the custom model unit.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A CustomModelUnit (CMU) is an abstract view of the hardware utilization that \n Amazon Bedrock needs to host a single copy of your custom model. A model copy represents a \n single instance of your imported model that is ready to serve inference requests. Amazon Bedrock \n determines the number of custom model units that a model copy needs when you import the custom model.\n
You can use CustomModelUnits to estimate the cost of running\n your custom model. For more information, see Calculate the cost of running a custom model in the\n Amazon Bedrock user guide.\n
Specifies if the imported model supports converse.
" } + }, + "customModelUnits": { + "target": "com.amazonaws.bedrock#CustomModelUnits", + "traits": { + "smithy.api#documentation": "Information about the hardware utilization for a single copy of the model.
" + } } }, "traits": { @@ -6248,12 +6280,56 @@ "traits": { "smithy.api#documentation": "The output modalities selected for the guardrail content filter.
" } + }, + "inputAction": { + "target": "com.amazonaws.bedrock#GuardrailContentFilterAction", + "traits": { + "smithy.api#documentation": "The action to take when harmful content is detected in the input. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the\n trace response.
The action to take when harmful content is detected in the output. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Indicates whether guardrail evaluation is enabled on the input. When disabled, you aren't\n charged for the evaluation. The evaluation doesn't appear in the response.
" + } + }, + "outputEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether guardrail evaluation is enabled on the output. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } } }, "traits": { "smithy.api#documentation": "Contains filter strengths for harmful content. Guardrails support the following content filters to detect and filter harmful user inputs and FM-generated outputs.
\n\n Hate – Describes language or a statement that discriminates, criticizes, insults, denounces, or dehumanizes a person or group on the basis of an identity (such as race, ethnicity, gender, religion, sexual orientation, ability, and national origin).
\n\n Insults – Describes language or a statement that includes demeaning, humiliating, mocking, insulting, or belittling language. This type of language is also labeled as bullying.
\n\n Sexual – Describes language or a statement that indicates sexual interest, activity, or arousal using direct or indirect references to body parts, physical traits, or sex.
\n\n Violence – Describes language or a statement that includes glorification of or threats to inflict physical pain, hurt, or injury toward a person, group or thing.
\nContent filtering depends on the confidence classification of user inputs and FM\n responses across each of the four harmful categories. All input and output statements are\n classified into one of four confidence levels (NONE, LOW, MEDIUM, HIGH) for each\n harmful category. For example, if a statement is classified as\n Hate with HIGH confidence, the likelihood of the statement\n representing hateful content is high. A single statement can be classified across\n multiple categories with varying confidence levels. For example, a single statement\n can be classified as Hate with HIGH confidence, Insults with LOW confidence, Sexual with NONE confidence, and Violence with MEDIUM confidence.
\nFor more information, see Guardrails content filters.
\nThis data type is used in the following API operations:
\nThe output modalities selected for the guardrail content filter configuration.
" } + }, + "inputAction": { + "target": "com.amazonaws.bedrock#GuardrailContentFilterAction", + "traits": { + "smithy.api#documentation": "Specifies the action to take when harmful content is detected. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the\n trace response.
Specifies the action to take when harmful content is detected in the output. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Specifies whether to enable guardrail evaluation on the input. When disabled, you aren't\n charged for the evaluation. The evaluation doesn't appear in the response.
" + } + }, + "outputEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Specifies whether to enable guardrail evaluation on the output. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } } }, "traits": { @@ -6389,6 +6489,26 @@ "smithy.api#documentation": "Contains details about how to handle harmful content.
" } }, + "com.amazonaws.bedrock#GuardrailContextualGroundingAction": { + "type": "enum", + "members": { + "BLOCK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BLOCK" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + }, + "traits": { + "smithy.api#sensitive": {} + } + }, "com.amazonaws.bedrock#GuardrailContextualGroundingFilter": { "type": "structure", "members": { @@ -6408,6 +6528,18 @@ }, "smithy.api#required": {} } + }, + "action": { + "target": "com.amazonaws.bedrock#GuardrailContextualGroundingAction", + "traits": { + "smithy.api#documentation": "The action to take when content fails the contextual grounding evaluation. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Indicates whether contextual grounding is enabled for evaluation. When disabled, you aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } } }, "traits": { @@ -6433,6 +6565,18 @@ }, "smithy.api#required": {} } + }, + "action": { + "target": "com.amazonaws.bedrock#GuardrailContextualGroundingAction", + "traits": { + "smithy.api#documentation": "Specifies the action to take when content fails the contextual grounding evaluation. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Specifies whether to enable contextual grounding evaluation. When disabled, you aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } } }, "traits": { @@ -6617,6 +6761,30 @@ "smithy.api#documentation": "ManagedWords$type\n The managed word type that was configured for the guardrail.\n (For now, we only offer profanity word list)
", "smithy.api#required": {} } + }, + "inputAction": { + "target": "com.amazonaws.bedrock#GuardrailWordAction", + "traits": { + "smithy.api#documentation": "The action to take when harmful content is detected in the input. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
The action to take when harmful content is detected in the output. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Indicates whether guardrail evaluation is enabled on the input. When disabled, you aren't\n charged for the evaluation. The evaluation doesn't appear in the response.
" + } + }, + "outputEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether guardrail evaluation is enabled on the output. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } } }, "traits": { @@ -6632,6 +6800,30 @@ "smithy.api#documentation": "The managed word type to configure for the guardrail.
", "smithy.api#required": {} } + }, + "inputAction": { + "target": "com.amazonaws.bedrock#GuardrailWordAction", + "traits": { + "smithy.api#documentation": "Specifies the action to take when harmful content is detected in the input. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Specifies the action to take when harmful content is detected in the output. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Specifies whether to enable guardrail evaluation on the input. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } + }, + "outputEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Specifies whether to enable guardrail evaluation on the output. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } } }, "traits": { @@ -6738,6 +6930,30 @@ "smithy.api#documentation": "The configured guardrail action when PII entity is detected.
", "smithy.api#required": {} } + }, + "inputAction": { + "target": "com.amazonaws.bedrock#GuardrailSensitiveInformationAction", + "traits": { + "smithy.api#documentation": "The action to take when harmful content is detected in the input. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n ANONYMIZE – Mask the content and replace it with identifier\n tags.
\n NONE – Take no action but return detection information in the\n trace response.
The action to take when harmful content is detected in the output. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n ANONYMIZE – Mask the content and replace it with identifier\n tags.
\n NONE – Take no action but return detection information in the\n trace response.
Indicates whether guardrail evaluation is enabled on the input. When disabled, you aren't\n charged for the evaluation. The evaluation doesn't appear in the response.
" + } + }, + "outputEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether guardrail evaluation is enabled on the output. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } } }, "traits": { @@ -6760,6 +6976,30 @@ "smithy.api#documentation": "Configure guardrail action when the PII entity is detected.
", "smithy.api#required": {} } + }, + "inputAction": { + "target": "com.amazonaws.bedrock#GuardrailSensitiveInformationAction", + "traits": { + "smithy.api#documentation": "Specifies the action to take when harmful content is detected in the input. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n ANONYMIZE – Mask the content and replace it with identifier\n tags.
\n NONE – Take no action but return detection information in the\n trace response.
Specifies the action to take when harmful content is detected in the output. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n ANONYMIZE – Mask the content and replace it with identifier\n tags.
\n NONE – Take no action but return detection information in the\n trace response.
Specifies whether to enable guardrail evaluation on the input. When disabled, you aren't\n charged for the evaluation. The evaluation doesn't appear in the response.
" + } + }, + "outputEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Specifies whether to enable guardrail evaluation on the output. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } } }, "traits": { @@ -6998,6 +7238,30 @@ "smithy.api#documentation": "The action taken when a match to the regular expression is detected.
", "smithy.api#required": {} } + }, + "inputAction": { + "target": "com.amazonaws.bedrock#GuardrailSensitiveInformationAction", + "traits": { + "smithy.api#documentation": "The action to take when harmful content is detected in the input. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
The action to take when harmful content is detected in the output. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Indicates whether guardrail evaluation is enabled on the input. When disabled, you aren't\n charged for the evaluation. The evaluation doesn't appear in the response.
" + } + }, + "outputEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether guardrail evaluation is enabled on the output. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } } }, "traits": { @@ -7045,6 +7309,30 @@ "smithy.api#documentation": "The guardrail action to configure when matching regular expression is detected.
", "smithy.api#required": {} } + }, + "inputAction": { + "target": "com.amazonaws.bedrock#GuardrailSensitiveInformationAction", + "traits": { + "smithy.api#documentation": "Specifies the action to take when harmful content is detected in the input. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Specifies the action to take when harmful content is detected in the output. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Specifies whether to enable guardrail evaluation on the input. When disabled, you aren't\n charged for the evaluation. The evaluation doesn't appear in the response.
" + } + }, + "outputEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Specifies whether to enable guardrail evaluation on the output. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } } }, "traits": { @@ -7083,6 +7371,12 @@ "traits": { "smithy.api#enumValue": "ANONYMIZE" } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } } } }, @@ -7290,12 +7584,56 @@ "traits": { "smithy.api#documentation": "Specifies to deny the topic.
" } + }, + "inputAction": { + "target": "com.amazonaws.bedrock#GuardrailTopicAction", + "traits": { + "smithy.api#documentation": "The action to take when harmful content is detected in the input. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
The action to take when harmful content is detected in the output. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Indicates whether guardrail evaluation is enabled on the input. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } + }, + "outputEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether guardrail evaluation is enabled on the output. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } } }, "traits": { "smithy.api#documentation": "Details about topics for the guardrail to identify and deny.
\nThis data type is used in the following API operations:
\nSpecifies to deny the topic.
", "smithy.api#required": {} } + }, + "inputAction": { + "target": "com.amazonaws.bedrock#GuardrailTopicAction", + "traits": { + "smithy.api#documentation": "Specifies the action to take when harmful content is detected in the input. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Specifies the action to take when harmful content is detected in the output. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Specifies whether to enable guardrail evaluation on the input. When disabled, you aren't\n charged for the evaluation. The evaluation doesn't appear in the response.
" + } + }, + "outputEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Specifies whether to enable guardrail evaluation on the output. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } } }, "traits": { @@ -7458,12 +7820,56 @@ }, "smithy.api#required": {} } + }, + "inputAction": { + "target": "com.amazonaws.bedrock#GuardrailWordAction", + "traits": { + "smithy.api#documentation": "The action to take when harmful content is detected in the input. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
The action to take when harmful content is detected in the output. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Indicates whether guardrail evaluation is enabled on the input. When disabled, you aren't\n charged for the evaluation. The evaluation doesn't appear in the response.
" + } + }, + "outputEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether guardrail evaluation is enabled on the output. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } } }, "traits": { "smithy.api#documentation": "A word configured for the guardrail.
" } }, + "com.amazonaws.bedrock#GuardrailWordAction": { + "type": "enum", + "members": { + "BLOCK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BLOCK" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + }, + "traits": { + "smithy.api#sensitive": {} + } + }, "com.amazonaws.bedrock#GuardrailWordConfig": { "type": "structure", "members": { @@ -7477,6 +7883,30 @@ }, "smithy.api#required": {} } + }, + "inputAction": { + "target": "com.amazonaws.bedrock#GuardrailWordAction", + "traits": { + "smithy.api#documentation": "Specifies the action to take when harmful content is detected in the input. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Specifies the action to take when harmful content is detected in the output. Supported values include:
\n\n BLOCK – Block the content and replace it with blocked\n messaging.
\n NONE – Take no action but return detection information in the trace\n response.
Specifies whether to enable guardrail evaluation on the intput. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } + }, + "outputEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Specifies whether to enable guardrail evaluation on the output. When disabled, you\n aren't charged for the evaluation. The evaluation doesn't appear in the response.
" + } } }, "traits": { diff --git a/codegen/sdk/aws-models/chime-sdk-voice.json b/codegen/sdk/aws-models/chime-sdk-voice.json index b91aba9e937..e4213865f69 100644 --- a/codegen/sdk/aws-models/chime-sdk-voice.json +++ b/codegen/sdk/aws-models/chime-sdk-voice.json @@ -7322,6 +7322,12 @@ "traits": { "smithy.api#documentation": "The updated phone number order time stamp, in ISO 8601 format.
" } + }, + "FocDate": { + "target": "com.amazonaws.chimesdkvoice#Iso8601Timestamp", + "traits": { + "smithy.api#documentation": "The Firm Order Commitment (FOC) date for phone number porting orders. This field is null\n if a phone number order is not a porting order.
" + } } }, "traits": { @@ -10939,6 +10945,9 @@ "target": "com.amazonaws.chimesdkvoice#ValidateE911AddressResponse" }, "errors": [ + { + "target": "com.amazonaws.chimesdkvoice#AccessDeniedException" + }, { "target": "com.amazonaws.chimesdkvoice#BadRequestException" }, diff --git a/codegen/sdk/aws-models/cleanrooms.json b/codegen/sdk/aws-models/cleanrooms.json index c4ae68b2200..1b275aa905c 100644 --- a/codegen/sdk/aws-models/cleanrooms.json +++ b/codegen/sdk/aws-models/cleanrooms.json @@ -15436,6 +15436,12 @@ "traits": { "smithy.api#documentation": "A description of the collaboration.
" } + }, + "analyticsEngine": { + "target": "com.amazonaws.cleanrooms#AnalyticsEngine", + "traits": { + "smithy.api#documentation": "The analytics engine.
" + } } } }, diff --git a/codegen/sdk/aws-models/cloudformation.json b/codegen/sdk/aws-models/cloudformation.json index 4553626fb16..50e42e33a14 100644 --- a/codegen/sdk/aws-models/cloudformation.json +++ b/codegen/sdk/aws-models/cloudformation.json @@ -1378,7 +1378,7 @@ "name": "cloudformation" }, "aws.protocols#awsQuery": {}, - "smithy.api#documentation": "CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and\n repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service,\n Elastic Load Balancing, and Amazon EC2 Auto Scaling to build highly reliable, highly\n scalable, cost-effective applications without creating or configuring the underlying Amazon Web Services\n infrastructure.
\nWith CloudFormation, you declare all your resources and dependencies in a template file. The template defines a\n collection of resources as a single unit called a stack. CloudFormation creates and deletes all member resources of the stack\n together and manages all dependencies between the resources for you.
\nFor more information about CloudFormation, see the CloudFormation\n product page.
\nCloudFormation makes use of other Amazon Web Services products. If you need additional technical information about a\n specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com.
", + "smithy.api#documentation": "CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and\n repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store,\n Amazon Simple Notification Service, Elastic Load Balancing, and Amazon EC2 Auto Scaling to build highly reliable, highly scalable, cost-effective\n applications without creating or configuring the underlying Amazon Web Services infrastructure.
\nWith CloudFormation, you declare all your resources and dependencies in a template file. The\n template defines a collection of resources as a single unit called a stack. CloudFormation creates\n and deletes all member resources of the stack together and manages all dependencies between the\n resources for you.
\nFor more information about CloudFormation, see the CloudFormation product page.
\nCloudFormation makes use of other Amazon Web Services products. If you need additional technical information\n about a specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com.
", "smithy.api#title": "AWS CloudFormation", "smithy.api#xmlNamespace": { "uri": "http://cloudformation.amazonaws.com/doc/2010-05-15/" @@ -2470,7 +2470,7 @@ } ], "traits": { - "smithy.api#documentation": "For a specified stack that's in the UPDATE_ROLLBACK_FAILED state, continues\n rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of\n the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can\n return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and\n then try to update the stack again.
A stack goes into the UPDATE_ROLLBACK_FAILED state when CloudFormation can't roll\n back all changes after a failed stack update. For example, you might have a stack that's\n rolling back to an old database instance that was deleted outside of CloudFormation. Because\n CloudFormation doesn't know the database was deleted, it assumes that the database instance still\n exists and attempts to roll back to it, causing the update rollback to fail.
For a specified stack that's in the UPDATE_ROLLBACK_FAILED state, continues\n rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of\n the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can return\n your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try\n to update the stack again.
A stack goes into the UPDATE_ROLLBACK_FAILED state when CloudFormation can't roll\n back all changes after a failed stack update. For example, you might have a stack that's\n rolling back to an old database instance that was deleted outside of CloudFormation. Because\n CloudFormation doesn't know the database was deleted, it assumes that the database instance still\n exists and attempts to roll back to it, causing the update rollback to fail.
The URL of the file that contains the revised template. The URL must point to a\n template (max size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager\n document. CloudFormation generates the change set by comparing this template with the stack that\n you specified. The location for an Amazon S3 bucket must start with https://.
Conditional: You must specify only TemplateBody or\n TemplateURL.
The URL of the file that contains the revised template. The URL must point to a template\n (max size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. CloudFormation\n generates the change set by comparing this template with the stack that you specified. The\n location for an Amazon S3 bucket must start with https://.
Conditional: You must specify only TemplateBody or\n TemplateURL.
In some cases, you must explicitly acknowledge that your stack template contains certain\n capabilities in order for CloudFormation to create the stack.
\n\n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n
Some stack templates might include resources that can affect permissions in your\n Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must\n explicitly acknowledge this by specifying one of these capabilities.
\nThe following IAM resources require you to specify either the\n CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
\nIf you have IAM resources with custom names, you must\n specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.
If your stack template contains these resources, we suggest that you review all\n permissions associated with them and edit their permissions if necessary.
\n\n \n AWS::IAM::Group\n
\n\n \n AWS::IAM::Policy\n
\n\n \n AWS::IAM::Role\n
\n\n \n AWS::IAM::User\n
\nFor more information, see Acknowledging IAM resources in CloudFormation templates.
\n\n CAPABILITY_AUTO_EXPAND\n
Some template contain macros. Macros perform custom processing on templates; this can\n include simple actions like find-and-replace operations, all the way to extensive\n transformations of entire templates. Because of this, users typically create a change set\n from the processed template, so that they can review the changes resulting from the macros\n before actually creating the stack. If your stack template contains one or more macros,\n and you choose to create a stack directly from the processed template, without first\n reviewing the resulting changes in a change set, you must acknowledge this capability.\n This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.
\nThis capacity doesn't apply to creating change sets, and specifying it when creating\n change sets has no effect.
\nIf you want to create a stack from a stack template that contains macros\n and nested stacks, you must create or update the stack directly\n from the template using the CreateStack or UpdateStack action, and specifying this capability.
\nFor more information about macros, see Perform custom processing\n on CloudFormation templates with template macros.
\nOnly one of the Capabilities and ResourceType parameters can\n be specified.
In some cases, you must explicitly acknowledge that your stack template contains certain\n capabilities in order for CloudFormation to create the stack.
\n\n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n
Some stack templates might include resources that can affect permissions in your\n Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must\n explicitly acknowledge this by specifying one of these capabilities.
\nThe following IAM resources require you to specify either the\n CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
\nIf you have IAM resources with custom names, you must\n specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.
If your stack template contains these resources, we suggest that you review all\n permissions associated with them and edit their permissions if necessary.
\n\n \n AWS::IAM::Group\n
\n\n \n AWS::IAM::Policy\n
\n\n \n AWS::IAM::Role\n
\n\n \n AWS::IAM::User\n
\nFor more information, see Acknowledging IAM resources in CloudFormation templates.
\n\n CAPABILITY_AUTO_EXPAND\n
Some template contain macros. Macros perform custom processing on templates; this can\n include simple actions like find-and-replace operations, all the way to extensive\n transformations of entire templates. Because of this, users typically create a change set\n from the processed template, so that they can review the changes resulting from the macros\n before actually creating the stack. If your stack template contains one or more macros,\n and you choose to create a stack directly from the processed template, without first\n reviewing the resulting changes in a change set, you must acknowledge this capability.\n This includes the AWS::Include\n and AWS::Serverless transforms, which are macros hosted by CloudFormation.
\nThis capacity doesn't apply to creating change sets, and specifying it when creating\n change sets has no effect.
\nIf you want to create a stack from a stack template that contains macros\n and nested stacks, you must create or update the stack directly\n from the template using the CreateStack or UpdateStack action, and specifying this capability.
\nFor more information about macros, see Perform custom processing\n on CloudFormation templates with template macros.
\nOnly one of the Capabilities and ResourceType parameters can\n be specified.
The URL of a file containing the template body. The URL must point to a template (max size:\n 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. The location\n for an Amazon S3 bucket must start with https://.
Conditional: You must specify either the TemplateBody or the\n TemplateURL parameter, but not both.
The URL of a file containing the template body. The URL must point to a template (max\n size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. The location for\n an Amazon S3 bucket must start with https://.
Conditional: You must specify either the TemplateBody or the\n TemplateURL parameter, but not both.
In some cases, you must explicitly acknowledge that your stack template contains certain\n capabilities in order for CloudFormation to create the stack.
\n\n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n
Some stack templates might include resources that can affect permissions in your\n Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must\n explicitly acknowledge this by specifying one of these capabilities.
\nThe following IAM resources require you to specify either the\n CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
\nIf you have IAM resources with custom names, you must\n specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.
If your stack template contains these resources, we recommend that you review all\n permissions associated with them and edit their permissions if necessary.
\n\n AWS::IAM::AccessKey\n
\n\n AWS::IAM::Group\n
\n\n AWS::IAM::Policy\n
\n\n AWS::IAM::Role\n
\n\n AWS::IAM::User\n
\nFor more information, see Acknowledging IAM resources in CloudFormation templates.
\n\n CAPABILITY_AUTO_EXPAND\n
Some template contain macros. Macros perform custom processing on templates; this can\n include simple actions like find-and-replace operations, all the way to extensive\n transformations of entire templates. Because of this, users typically create a change set\n from the processed template, so that they can review the changes resulting from the macros\n before actually creating the stack. If your stack template contains one or more macros,\n and you choose to create a stack directly from the processed template, without first\n reviewing the resulting changes in a change set, you must acknowledge this capability.\n This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.
\nIf you want to create a stack from a stack template that contains macros\n and nested stacks, you must create the stack directly from the\n template using this capability.
\nYou should only create stacks directly from a stack template that contains macros if\n you know what processing the macro performs.
\nEach macro relies on an underlying Lambda service function for processing stack\n templates. Be aware that the Lambda function owner can update the function operation\n without CloudFormation being notified.
\nFor more information, see Perform custom processing\n on CloudFormation templates with template macros.
\nOnly one of the Capabilities and ResourceType parameters can\n be specified.
In some cases, you must explicitly acknowledge that your stack template contains certain\n capabilities in order for CloudFormation to create the stack.
\n\n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n
Some stack templates might include resources that can affect permissions in your\n Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must\n explicitly acknowledge this by specifying one of these capabilities.
\nThe following IAM resources require you to specify either the\n CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
\nIf you have IAM resources with custom names, you must\n specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.
If your stack template contains these resources, we recommend that you review all\n permissions associated with them and edit their permissions if necessary.
\n\n AWS::IAM::AccessKey\n
\n\n AWS::IAM::Group\n
\n\n AWS::IAM::Policy\n
\n\n AWS::IAM::Role\n
\n\n AWS::IAM::User\n
\nFor more information, see Acknowledging IAM resources in CloudFormation templates.
\n\n CAPABILITY_AUTO_EXPAND\n
Some template contain macros. Macros perform custom processing on templates; this can\n include simple actions like find-and-replace operations, all the way to extensive\n transformations of entire templates. Because of this, users typically create a change set\n from the processed template, so that they can review the changes resulting from the macros\n before actually creating the stack. If your stack template contains one or more macros,\n and you choose to create a stack directly from the processed template, without first\n reviewing the resulting changes in a change set, you must acknowledge this capability.\n This includes the AWS::Include\n and AWS::Serverless transforms, which are macros hosted by CloudFormation.
\nIf you want to create a stack from a stack template that contains macros\n and nested stacks, you must create the stack directly from the\n template using this capability.
\nYou should only create stacks directly from a stack template that contains macros if\n you know what processing the macro performs.
\nEach macro relies on an underlying Lambda service function for processing stack\n templates. Be aware that the Lambda function owner can update the function operation\n without CloudFormation being notified.
\nFor more information, see Perform custom processing\n on CloudFormation templates with template macros.
\nOnly one of the Capabilities and ResourceType parameters can\n be specified.
Creates a refactor across multiple stacks, with the list of stacks and resources that are affected.
" + "smithy.api#documentation": "Creates a refactor across multiple stacks, with the list of stacks and resources that are\n affected.
" } }, "com.amazonaws.cloudformation#CreateStackRefactorInput": { @@ -3097,7 +3097,7 @@ "ResourceMappings": { "target": "com.amazonaws.cloudformation#ResourceMappings", "traits": { - "smithy.api#documentation": "The mappings for the stack resource Source and stack resource Destination.
The mappings for the stack resource Source and stack resource\n Destination.
The URL of a file that contains the template body. The URL must point to a template\n (maximum size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document.\n The location for an Amazon S3 bucket must start with https://.
Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but\n not both.
" + "smithy.api#documentation": "The URL of a file that contains the template body. The URL must point to a template\n (maximum size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. The\n location for an Amazon S3 bucket must start with https://.
Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but\n not both.
" } }, "StackId": { @@ -3208,13 +3208,13 @@ "AdministrationRoleARN": { "target": "com.amazonaws.cloudformation#RoleARN", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role to use to create this stack set.
\nSpecify an IAM role only if you are using customized administrator roles to control\n which users or groups can manage specific stack sets within the same administrator account.\n For more information, see Prerequisites for using\n StackSets in the CloudFormation User Guide.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role to use to create this stack set.
\nSpecify an IAM role only if you are using customized administrator roles to control\n which users or groups can manage specific stack sets within the same administrator account.\n For more information, see Grant\n self-managed permissions in the CloudFormation User Guide.
\nValid only if the permissions model is SELF_MANAGED.
The name of the IAM execution role to use to create the stack set. If you do not specify\n an execution role, CloudFormation uses the AWSCloudFormationStackSetExecutionRole\n role for the stack set operation.
Specify an IAM role only if you are using customized execution roles to control which\n stack resources users and groups can include in their stack sets.
" + "smithy.api#documentation": "The name of the IAM execution role to use to create the stack set. If you do not specify\n an execution role, CloudFormation uses the AWSCloudFormationStackSetExecutionRole\n role for the stack set operation.
Specify an IAM role only if you are using customized execution roles to control which\n stack resources users and groups can include in their stack sets.
\nValid only if the permissions model is SELF_MANAGED.
Describes whether StackSets automatically deploys to Organizations accounts that\n are added to the target organization or organizational unit (OU). Specify only if\n PermissionModel is SERVICE_MANAGED.
Describes whether StackSets automatically deploys to Organizations accounts that\n are added to the target organization or organizational unit (OU). For more information, see\n Manage\n automatic deployments for CloudFormation StackSets that use service-managed permissions\n in the CloudFormation User Guide.
\nRequired if the permissions model is SERVICE_MANAGED. (Not used with\n self-managed permissions.)
[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.
\nBy default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.
To create a stack set with service-managed permissions while signed in to the management account, specify SELF.
To create a stack set with service-managed permissions while signed in to a delegated\n administrator account, specify DELEGATED_ADMIN.
Your Amazon Web Services account must be registered as a delegated admin in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.
\nStack sets with service-managed permissions are created in the management account, including stack sets that are created by delegated\n administrators.
" + "smithy.api#documentation": "Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.
\nBy default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.
To create a stack set with service-managed permissions while signed in to the management account, specify SELF.
To create a stack set with service-managed permissions while signed in to a delegated\n administrator account, specify DELEGATED_ADMIN.
Your Amazon Web Services account must be registered as a delegated admin in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.
\nStack sets with service-managed permissions are created in the management account, including stack sets that are created by delegated\n administrators.
\nValid only if the permissions model is SERVICE_MANAGED.
If specified, lists only the Hooks related to the specified\n LogicalResourceId.
If specified, lists only the Hooks related to the specified\n LogicalResourceId.
Status of the resource scan.
\nThe resource scan is still in progress.
\nThe resource scan is complete.
\nThe resource scan has expired.
\nThe resource scan has failed.
\nStatus of the resource scan.
\nThe resource scan is still in progress.
\nThe resource scan is complete.
\nThe resource scan has expired.
\nThe resource scan has failed.
\nThe number of resources that were read. This is only available for scans with a\n Status set to COMPLETE, EXPIRED, or FAILED\n .
This field may be 0 if the resource scan failed with a\n ResourceScanLimitExceededException.
The number of resources that were read. This is only available for scans with a\n Status set to COMPLETE, EXPIRED, or\n FAILED.
This field may be 0 if the resource scan failed with a\n ResourceScanLimitExceededException.
The scan filters that were used.
" } } }, @@ -4611,7 +4617,7 @@ "StackName": { "target": "com.amazonaws.cloudformation#StackName", "traits": { - "smithy.api#documentation": "The name or the unique stack ID that's associated with the stack, which aren't always\n interchangeable:
\nRunning stacks: You can specify either the stack's name or its unique stack ID.
\nDeleted stacks: You must specify the unique stack ID.
\nDefault: There is no default value.
" + "smithy.api#documentation": "The name or the unique stack ID that's associated with the stack, which aren't always\n interchangeable:
\nRunning stacks: You can specify either the stack's name or its unique stack ID.
\nDeleted stacks: You must specify the unique stack ID.
\nThe name or the unique stack ID that's associated with the stack, which aren't always\n interchangeable:
\nRunning stacks: You can specify either the stack's name or its unique stack ID.
\nDeleted stacks: You must specify the unique stack ID.
\nDefault: There is no default value.
", + "smithy.api#documentation": "The name or the unique stack ID that's associated with the stack, which aren't always\n interchangeable:
\nRunning stacks: You can specify either the stack's name or its unique stack ID.
\nDeleted stacks: You must specify the unique stack ID.
\nThe logical name of the resource as specified in the template.
\nDefault: There is no default value.
", + "smithy.api#documentation": "The logical name of the resource as specified in the template.
", "smithy.api#required": {} } } @@ -5024,19 +5030,19 @@ "StackName": { "target": "com.amazonaws.cloudformation#StackName", "traits": { - "smithy.api#documentation": "The name or the unique stack ID that is associated with the stack, which aren't always\n interchangeable:
\nRunning stacks: You can specify either the stack's name or its unique stack ID.
\nDeleted stacks: You must specify the unique stack ID.
\nDefault: There is no default value.
\nRequired: Conditional. If you don't specify StackName, you must specify\n PhysicalResourceId.
The name or the unique stack ID that is associated with the stack, which aren't always\n interchangeable:
\nRunning stacks: You can specify either the stack's name or its unique stack ID.
\nDeleted stacks: You must specify the unique stack ID.
\nRequired: Conditional. If you don't specify StackName, you must specify\n PhysicalResourceId.
The logical name of the resource as specified in the template.
\nDefault: There is no default value.
" + "smithy.api#documentation": "The logical name of the resource as specified in the template.
" } }, "PhysicalResourceId": { "target": "com.amazonaws.cloudformation#PhysicalResourceId", "traits": { - "smithy.api#documentation": "The name or unique identifier that corresponds to a physical instance ID of a resource\n supported by CloudFormation.
\nFor example, for an Amazon Elastic Compute Cloud (EC2) instance,\n PhysicalResourceId corresponds to the InstanceId. You can pass the\n EC2 InstanceId to DescribeStackResources to find which stack the\n instance belongs to and what other resources are part of the stack.
Required: Conditional. If you don't specify PhysicalResourceId, you must\n specify StackName.
Default: There is no default value.
" + "smithy.api#documentation": "The name or unique identifier that corresponds to a physical instance ID of a resource\n supported by CloudFormation.
\nFor example, for an Amazon Elastic Compute Cloud (EC2) instance,\n PhysicalResourceId corresponds to the InstanceId. You can pass the\n EC2 InstanceId to DescribeStackResources to find which stack the\n instance belongs to and what other resources are part of the stack.
Required: Conditional. If you don't specify PhysicalResourceId, you must\n specify StackName.
If you don't pass a parameter to StackName, the API returns a response\n that describes all resources in the account, which can impact performance. This requires\n ListStacks and DescribeStacks permissions.
Consider using the ListStacks API if you're not passing a parameter to\n StackName.
The IAM policy below can be added to IAM policies when you want to limit\n resource-level permissions and avoid returning a response when no parameter is sent in the\n request:
\n{ \"Version\": \"2012-10-17\", \"Statement\": [{ \"Effect\": \"Deny\", \"Action\":\n \"cloudformation:DescribeStacks\", \"NotResource\": \"arn:aws:cloudformation:*:*:stack/*/*\" }]\n }
\nThe name or the unique stack ID that's associated with the stack, which aren't always\n interchangeable:
\nRunning stacks: You can specify either the stack's name or its unique stack ID.
\nDeleted stacks: You must specify the unique stack ID.
\nDefault: There is no default value.
" + "smithy.api#documentation": "If you don't pass a parameter to StackName, the API returns a response\n that describes all resources in the account, which can impact performance. This requires\n ListStacks and DescribeStacks permissions.
Consider using the ListStacks API if you're not passing a parameter to\n StackName.
The IAM policy below can be added to IAM policies when you want to limit\n resource-level permissions and avoid returning a response when no parameter is sent in the\n request:
\n{ \"Version\": \"2012-10-17\", \"Statement\": [{ \"Effect\": \"Deny\", \"Action\":\n \"cloudformation:DescribeStacks\", \"NotResource\": \"arn:aws:cloudformation:*:*:stack/*/*\" }]\n }
\nThe name or the unique stack ID that's associated with the stack, which aren't always\n interchangeable:
\nRunning stacks: You can specify either the stack's name or its unique stack ID.
\nDeleted stacks: You must specify the unique stack ID.
\nThe name or the unique stack ID that's associated with the stack, which aren't always\n interchangeable:
\nRunning stacks: You can specify either the stack's name or its unique stack ID.
\nDeleted stacks: You must specify the unique stack ID.
\nDefault: There is no default value.
" + "smithy.api#documentation": "The name or the unique stack ID that's associated with the stack, which aren't always\n interchangeable:
\nRunning stacks: You can specify either the stack's name or its unique stack ID.
\nDeleted stacks: You must specify the unique stack ID.
\nThe URL of a file containing the template body. The URL must point to a template (max size:\n 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. The location\n for an Amazon S3 bucket must start with https://.
Conditional: You must specify only one of the following parameters:\n StackName, StackSetName, TemplateBody, or\n TemplateURL.
The URL of a file containing the template body. The URL must point to a template (max\n size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. The location for\n an Amazon S3 bucket must start with https://.
Conditional: You must specify only one of the following parameters:\n StackName, StackSetName, TemplateBody, or\n TemplateURL.
Lists all exported output values in the account and Region in which you call this action.\n Use this action to see the exported output values that you can import into other stacks. To\n import values, use the \n Fn::ImportValue function.
\nFor more information, see Get exported outputs from a deployed CloudFormation stack.
", + "smithy.api#documentation": "Lists all exported output values in the account and Region in which you call this action.\n Use this action to see the exported output values that you can import into other stacks. To\n import values, use the \n Fn::ImportValue function.
\nFor more information, see Get exported outputs\n from a deployed CloudFormation stack.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -7846,7 +7852,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns summaries of invoked Hooks when a change set or Cloud Control API operation target is provided.
" + "smithy.api#documentation": "Returns summaries of invoked Hooks when a change set or Cloud Control API operation target is\n provided.
" } }, "com.amazonaws.cloudformation#ListHookResultsInput": { @@ -7864,7 +7870,7 @@ "target": "com.amazonaws.cloudformation#HookResultId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The logical ID of the target the operation is acting on by the Hook. If the target is a change set, \n it's the ARN of the change set.
\nIf the target is a Cloud Control API operation, this will be the HookRequestToken returned by the Cloud Control API \n operation request. For more information on the HookRequestToken, see ProgressEvent.
The logical ID of the target the operation is acting on by the Hook. If the\n target is a change set, it's the ARN of the change set.
\nIf the target is a Cloud Control API operation, this will be the HookRequestToken\n returned by the Cloud Control API operation request. For more information on the\n HookRequestToken, see ProgressEvent.
The logical ID of the target the operation is acting on by the Hook. If the target is a change set, \n it's the ARN of the change set.
\nIf the target is a Cloud Control API operation, this will be the HooksRequestToken returned by the Cloud Control API \n operation request. For more information on the HooksRequestToken, see ProgressEvent.
The logical ID of the target the operation is acting on by the Hook. If the\n target is a change set, it's the ARN of the change set.
\nIf the target is a Cloud Control API operation, this will be the HooksRequestToken\n returned by the Cloud Control API operation request. For more information on the\n HooksRequestToken, see ProgressEvent.
A list of HookResultSummary structures that provides the status and Hook status reason for each Hook \n invocation for the specified target.
A list of HookResultSummary structures that provides the status and\n Hook status reason for each Hook invocation for the specified\n target.
If the number of available results exceeds this maximum, the response includes a\n NextToken value that you can use for the NextToken parameter to\n get the next set of results. The default value is 10. The maximum value is 100.
The scan type that you want to get summary information about. The default is\n FULL.
If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve\n the next set of results, call this action again and assign that token to the request object's NextToken\n parameter. If the request returns all results, NextToken is set to null.
If the request doesn't return all the remaining results, NextToken is set to\n a token. To retrieve the next set of results, call this action again and assign that token to\n the request object's NextToken parameter. If the request returns all results,\n NextToken is set to null.
The maximum number of results to be returned with a single call. If the number of available results exceeds this\n maximum, the response includes a NextToken value that you can assign to the NextToken\n request parameter to get the next set of results.
The maximum number of results to be returned with a single call. If the number of\n available results exceeds this maximum, the response includes a NextToken value\n that you can assign to the NextToken request parameter to get the next set of\n results.
If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve\n the next set of results, call this action again and assign that token to the request object's NextToken\n parameter. If the request returns all results, NextToken is set to null.
If the request doesn't return all the remaining results, NextToken is set to\n a token. To retrieve the next set of results, call this action again and assign that token to\n the request object's NextToken parameter. If the request returns all results,\n NextToken is set to null.
Execution status to use as a filter. Specify one or more execution status codes to list only stack refactors with the specified\n execution status codes.
" + "smithy.api#documentation": "Execution status to use as a filter. Specify one or more execution status codes to list\n only stack refactors with the specified execution status codes.
" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve\n the next set of results, call this action again and assign that token to the request object's NextToken\n parameter. If the request returns all results, NextToken is set to null.
If the request doesn't return all the remaining results, NextToken is set to\n a token. To retrieve the next set of results, call this action again and assign that token to\n the request object's NextToken parameter. If the request returns all results,\n NextToken is set to null.
The maximum number of results to be returned with a single call. If the number of available results exceeds this\n maximum, the response includes a NextToken value that you can assign to the NextToken\n request parameter to get the next set of results.
The maximum number of results to be returned with a single call. If the number of\n available results exceeds this maximum, the response includes a NextToken value\n that you can assign to the NextToken request parameter to get the next set of\n results.
If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve\n the next set of results, call this action again and assign that token to the request object's NextToken\n parameter. If the request returns all results, NextToken is set to null.
If the request doesn't return all the remaining results, NextToken is set to\n a token. To retrieve the next set of results, call this action again and assign that token to\n the request object's NextToken parameter. If the request returns all results,\n NextToken is set to null.
The name or the unique stack ID that is associated with the stack, which aren't always\n interchangeable:
\nRunning stacks: You can specify either the stack's name or its unique stack ID.
\nDeleted stacks: You must specify the unique stack ID.
\nDefault: There is no default value.
", + "smithy.api#documentation": "The name or the unique stack ID that is associated with the stack, which aren't always\n interchangeable:
\nRunning stacks: You can specify either the stack's name or its unique stack ID.
\nDeleted stacks: You must specify the unique stack ID.
\nReturns the summary information for stacks whose status matches the specified\n StackStatusFilter. Summary information for stacks that have been deleted is kept for 90 days\n after the stack is deleted. If no StackStatusFilter is specified, summary information for all\n stacks is returned (including existing stacks and stacks that have been deleted).
", + "smithy.api#documentation": "Returns the summary information for stacks whose status matches the specified\n StackStatusFilter. Summary information for stacks that have been deleted is\n kept for 90 days after the stack is deleted. If no StackStatusFilter is\n specified, summary information for all stacks is returned (including existing stacks and\n stacks that have been deleted).
The source stack StackName and LogicalResourceId for the resource being refactored.
The source stack StackName and LogicalResourceId for the resource\n being refactored.
The destination stack StackName and LogicalResourceId for the resource being refactored.
The destination stack StackName and LogicalResourceId for the\n resource being refactored.
Specifies the current source of the resource and the destination of where it will be moved to.
" + "smithy.api#documentation": "Specifies the current source of the resource and the destination of where it will be moved\n to.
" } }, "com.amazonaws.cloudformation#ResourceMappings": { @@ -11232,7 +11244,7 @@ "Status": { "target": "com.amazonaws.cloudformation#ResourceScanStatus", "traits": { - "smithy.api#documentation": "Status of the resource scan.
\nThe resource scan is still in progress.
\nThe resource scan is complete.
\nThe resource scan has expired.
\nThe resource scan has failed.
\nStatus of the resource scan.
\nThe resource scan is still in progress.
\nThe resource scan is complete.
\nThe resource scan has expired.
\nThe resource scan has failed.
\nThe percentage of the resource scan that has been completed.
" } + }, + "ScanType": { + "target": "com.amazonaws.cloudformation#ScanType", + "traits": { + "smithy.api#documentation": "The scan type that has been completed.
" + } } }, "traits": { @@ -11566,6 +11584,27 @@ } } }, + "com.amazonaws.cloudformation#ResourceTypeFilter": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.cloudformation#ResourceTypeFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.cloudformation#ResourceTypeFilter" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, "com.amazonaws.cloudformation#ResourceTypePrefix": { "type": "string" }, @@ -11808,6 +11847,49 @@ } } }, + "com.amazonaws.cloudformation#ScanFilter": { + "type": "structure", + "members": { + "Types": { + "target": "com.amazonaws.cloudformation#ResourceTypeFilters", + "traits": { + "smithy.api#documentation": "An array of strings where each string represents an Amazon Web Services resource type you want to scan.\n Each string defines the resource type using the format\n AWS::ServiceName::ResourceType, for example, AWS::DynamoDB::Table. For\n the full list of supported resource types, see the Resource type\n support table in the CloudFormation User Guide.
To scan all resource types within a service, you can use a wildcard, represented by an\n asterisk (*). You can place a asterisk at only the end of the string, for example,\n AWS::S3::*.
A filter that is used to specify which resource types to scan.
" + } + }, + "com.amazonaws.cloudformation#ScanFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.cloudformation#ScanFilter" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.cloudformation#ScanType": { + "type": "enum", + "members": { + "FULL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FULL" + } + }, + "PARTIAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PARTIAL" + } + } + } + }, "com.amazonaws.cloudformation#ScannedResource": { "type": "structure", "members": { @@ -13013,7 +13095,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 1350 + "max": 5120 } } }, @@ -13022,7 +13104,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 1350 + "max": 5120 } } }, @@ -13038,7 +13120,7 @@ "Entity": { "target": "com.amazonaws.cloudformation#StackRefactorActionEntity", "traits": { - "smithy.api#documentation": "The type that will be evaluated in the StackRefactorAction. The following are potential Entity types:
\n Stack\n
\n Resource\n
The type that will be evaluated in the StackRefactorAction. The following are\n potential Entity types:
\n Stack\n
\n Resource\n
A key-value pair that identifies the target resource. The key is an identifier property (for example,\n BucketName for AWS::S3::Bucket resources) and the value is the actual property value (for\n example, MyS3Bucket).
A key-value pair that identifies the target resource. The key is an identifier property (for\n example, BucketName for AWS::S3::Bucket resources) and the value is the\n actual property value (for example, MyS3Bucket).
The detection type is one of the following:
\nAuto: CloudFormation figured out the mapping on its own.
\nManual: The customer provided the mapping in the ResourceMapping parameter.
The detection type is one of the following:
\nAuto: CloudFormation figured out the mapping on its own.
\nManual: The customer provided the mapping in the ResourceMapping\n parameter.
The mapping for the stack resource Source and stack resource Destination.
The mapping for the stack resource Source and stack resource\n Destination.
Describes the stack and the action that CloudFormation will perform on it if you execute the stack refactor.
" + "smithy.api#documentation": "Describes the stack and the action that CloudFormation will perform on it if you execute the\n stack refactor.
" } }, "com.amazonaws.cloudformation#StackRefactorActionEntity": { @@ -14198,7 +14280,7 @@ "RegionOrder": { "target": "com.amazonaws.cloudformation#RegionList", "traits": { - "smithy.api#documentation": "The order of the Regions where you want to perform the stack operation.
\n\n RegionOrder isn't followed if AutoDeployment is enabled.
The order of the Regions where you want to perform the stack operation.
" } }, "FailureToleranceCount": { @@ -14233,7 +14315,7 @@ } }, "traits": { - "smithy.api#documentation": "The user-specified preferences for how CloudFormation performs a stack set operation.
\nFor more information about maximum concurrent accounts and failure tolerance, see Stack\n set operation options.
" + "smithy.api#documentation": "The user-specified preferences for how CloudFormation performs a stack set operation.
\nFor more information about maximum concurrent accounts and failure tolerance, see Stack\n set operation options.
\n\n StackSetOperationPreferences don't apply to AutoDeployment, even if it's enabled.
A unique identifier for this StartResourceScan request. Specify this token if\n you plan to retry requests so that CloudFormation knows that you're not attempting to start a new\n resource scan.
The scan filters to use.
" + } } }, "traits": { @@ -15207,7 +15295,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 1024 + "max": 5120 } } }, @@ -16032,13 +16120,13 @@ "Capabilities": { "target": "com.amazonaws.cloudformation#Capabilities", "traits": { - "smithy.api#documentation": "In some cases, you must explicitly acknowledge that your stack template contains certain\n capabilities in order for CloudFormation to update the stack.
\n\n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n
Some stack templates might include resources that can affect permissions in your\n Amazon Web Services account, for example, by creating new IAM users. For those stacks, you must\n explicitly acknowledge this by specifying one of these capabilities.
\nThe following IAM resources require you to specify either the\n CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
\nIf you have IAM resources with custom names, you must\n specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.
If your stack template contains these resources, we suggest that you review all\n permissions associated with them and edit their permissions if necessary.
\n\n \n AWS::IAM::Group\n
\n\n AWS::IAM::Policy\n
\n\n \n AWS::IAM::Role\n
\n\n \n AWS::IAM::User\n
\nFor more information, see Acknowledging IAM resources in CloudFormation templates.
\n\n CAPABILITY_AUTO_EXPAND\n
Some template contain macros. Macros perform custom processing on templates; this can\n include simple actions like find-and-replace operations, all the way to extensive\n transformations of entire templates. Because of this, users typically create a change set\n from the processed template, so that they can review the changes resulting from the macros\n before actually updating the stack. If your stack template contains one or more macros,\n and you choose to update a stack directly from the processed template, without first\n reviewing the resulting changes in a change set, you must acknowledge this capability.\n This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.
\nIf you want to update a stack from a stack template that contains macros\n and nested stacks, you must update the stack directly from the\n template using this capability.
\nYou should only update stacks directly from a stack template that contains macros if\n you know what processing the macro performs.
\nEach macro relies on an underlying Lambda service function for processing stack\n templates. Be aware that the Lambda function owner can update the function operation\n without CloudFormation being notified.
\nFor more information, see Perform custom processing\n on CloudFormation templates with template macros.
\nOnly one of the Capabilities and ResourceType parameters can\n be specified.
In some cases, you must explicitly acknowledge that your stack template contains certain\n capabilities in order for CloudFormation to update the stack.
\n\n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n
Some stack templates might include resources that can affect permissions in your\n Amazon Web Services account, for example, by creating new IAM users. For those stacks, you must\n explicitly acknowledge this by specifying one of these capabilities.
\nThe following IAM resources require you to specify either the\n CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
\nIf you have IAM resources with custom names, you must\n specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.
If your stack template contains these resources, we suggest that you review all\n permissions associated with them and edit their permissions if necessary.
\n\n \n AWS::IAM::Group\n
\n\n AWS::IAM::Policy\n
\n\n \n AWS::IAM::Role\n
\n\n \n AWS::IAM::User\n
\nFor more information, see Acknowledging IAM resources in CloudFormation templates.
\n\n CAPABILITY_AUTO_EXPAND\n
Some template contain macros. Macros perform custom processing on templates; this can\n include simple actions like find-and-replace operations, all the way to extensive\n transformations of entire templates. Because of this, users typically create a change set\n from the processed template, so that they can review the changes resulting from the macros\n before actually updating the stack. If your stack template contains one or more macros,\n and you choose to update a stack directly from the processed template, without first\n reviewing the resulting changes in a change set, you must acknowledge this capability.\n This includes the AWS::Include\n and AWS::Serverless transforms, which are macros hosted by CloudFormation.
\nIf you want to update a stack from a stack template that contains macros\n and nested stacks, you must update the stack directly from the\n template using this capability.
\nYou should only update stacks directly from a stack template that contains macros if\n you know what processing the macro performs.
\nEach macro relies on an underlying Lambda service function for processing stack\n templates. Be aware that the Lambda function owner can update the function operation\n without CloudFormation being notified.
\nFor more information, see Perform custom processing\n on CloudFormation templates with template macros.
\nOnly one of the Capabilities and ResourceType parameters can\n be specified.
The template resource types that you have permissions to work with for this update stack\n action, such as AWS::EC2::Instance, AWS::EC2::*, or\n Custom::MyCustomInstance.
If the list of resource types doesn't include a resource that you're updating, the stack\n update fails. By default, CloudFormation grants permissions to all resource types. IAM uses this\n parameter for CloudFormation-specific condition keys in IAM policies. For more information, see\n Control access with Identity and Access Management.
\nOnly one of the Capabilities and ResourceType parameters can\n be specified.
The template resource types that you have permissions to work with for this update stack\n action, such as AWS::EC2::Instance, AWS::EC2::*, or\n Custom::MyCustomInstance.
If the list of resource types doesn't include a resource that you're updating, the stack\n update fails. By default, CloudFormation grants permissions to all resource types. IAM uses this\n parameter for CloudFormation-specific condition keys in IAM policies. For more information, see\n Control access with\n Identity and Access Management.
\nOnly one of the Capabilities and ResourceType parameters can\n be specified.
Updates the stack set, and associated stack instances in the specified accounts and\n Amazon Web Services Regions.
\nEven if the stack set operation created by updating the stack set fails (completely or\n partially, below or above a specified failure tolerance), the stack set is updated with your\n changes. Subsequent CreateStackInstances calls on the specified stack set\n use the updated stack set.
" + "smithy.api#documentation": "Updates the stack set and associated stack instances in the specified accounts and\n Amazon Web Services Regions.
\nEven if the stack set operation created by updating the stack set fails (completely or\n partially, below or above a specified failure tolerance), the stack set is updated with your\n changes. Subsequent CreateStackInstances calls on the specified stack set\n use the updated stack set.
" } }, "com.amazonaws.cloudformation#UpdateStackSetInput": { @@ -16281,7 +16369,7 @@ "TemplateURL": { "target": "com.amazonaws.cloudformation#TemplateURL", "traits": { - "smithy.api#documentation": "The URL of a file that contains the template body. The URL must point to a template\n (maximum size: 1 MB) that is located in an Amazon S3 bucket or a Systems Manager document.\n The location for an Amazon S3 bucket must start with https://.
Conditional: You must specify only one of the following parameters:\n TemplateBody or TemplateURL—or set\n UsePreviousTemplate to true.
The URL of a file that contains the template body. The URL must point to a template\n (maximum size: 1 MB) that is located in an Amazon S3 bucket or a Systems Manager document. The\n location for an Amazon S3 bucket must start with https://.
Conditional: You must specify only one of the following parameters:\n TemplateBody or TemplateURL—or set\n UsePreviousTemplate to true.
The Amazon Resource Name (ARN) of the IAM role to use to update this stack set.
\nSpecify an IAM role only if you are using customized administrator roles to control\n which users or groups can manage specific stack sets within the same administrator account.\n For more information, see Prerequisites for using\n CloudFormation StackSets in the CloudFormation User Guide.
\nIf you specified a customized administrator role when you created the stack set, you must\n specify a customized administrator role, even if it is the same customized administrator role\n used with this stack set previously.
" + "smithy.api#documentation": "[Self-managed permissions] The Amazon Resource Name (ARN) of the IAM role to use to\n update this stack set.
\nSpecify an IAM role only if you are using customized administrator roles to control\n which users or groups can manage specific stack sets within the same administrator account.\n For more information, see Grant\n self-managed permissions in the CloudFormation User Guide.
\nIf you specified a customized administrator role when you created the stack set, you must\n specify a customized administrator role, even if it is the same customized administrator role\n used with this stack set previously.
" } }, "ExecutionRoleName": { "target": "com.amazonaws.cloudformation#ExecutionRoleName", "traits": { - "smithy.api#documentation": "The name of the IAM execution role to use to update the stack set. If you do not specify\n an execution role, CloudFormation uses the AWSCloudFormationStackSetExecutionRole\n role for the stack set operation.
Specify an IAM role only if you are using customized execution roles to control which\n stack resources users and groups can include in their stack sets.
\nIf you specify a customized execution role, CloudFormation uses that role to update the stack.\n If you do not specify a customized execution role, CloudFormation performs the update using the\n role previously associated with the stack set, so long as you have permissions to perform\n operations on the stack set.
" + "smithy.api#documentation": "[Self-managed permissions] The name of the IAM execution role to use to update the stack\n set. If you do not specify an execution role, CloudFormation uses the\n AWSCloudFormationStackSetExecutionRole role for the stack set operation.
Specify an IAM role only if you are using customized execution roles to control which\n stack resources users and groups can include in their stack sets.
\nIf you specify a customized execution role, CloudFormation uses that role to update the stack.\n If you do not specify a customized execution role, CloudFormation performs the update using the\n role previously associated with the stack set, so long as you have permissions to perform\n operations on the stack set.
" } }, "DeploymentTargets": { @@ -16341,7 +16429,7 @@ "AutoDeployment": { "target": "com.amazonaws.cloudformation#AutoDeployment", "traits": { - "smithy.api#documentation": "[Service-managed permissions] Describes whether StackSets automatically deploys to Organizations accounts that are added to a target organization or organizational unit\n (OU).
\nIf you specify AutoDeployment, don't specify DeploymentTargets\n or Regions.
[Service-managed permissions] Describes whether StackSets automatically deploys to Organizations accounts that are added to a target organization or organizational unit (OU).\n For more information, see Manage\n automatic deployments for CloudFormation StackSets that use service-managed permissions\n in the CloudFormation User Guide.
\nIf you specify AutoDeployment, don't specify DeploymentTargets\n or Regions.
The URL of a file containing the template body. The URL must point to a template (max size:\n 1 MB) that is located in an Amazon S3 bucket or a Systems Manager document. The location\n for an Amazon S3 bucket must start with https://.
Conditional: You must pass TemplateURL or TemplateBody. If both\n are passed, only TemplateBody is used.
The URL of a file containing the template body. The URL must point to a template (max\n size: 1 MB) that is located in an Amazon S3 bucket or a Systems Manager document. The location for\n an Amazon S3 bucket must start with https://.
Conditional: You must pass TemplateURL or TemplateBody. If both\n are passed, only TemplateBody is used.
The CodeBuild access has been suspended for the calling Amazon Web Services account.
", + "smithy.api#error": "client" + } + }, "com.amazonaws.codebuild#ArtifactNamespace": { "type": "enum", "members": { @@ -321,6 +333,65 @@ "smithy.api#output": {} } }, + "com.amazonaws.codebuild#BatchGetCommandExecutions": { + "type": "operation", + "input": { + "target": "com.amazonaws.codebuild#BatchGetCommandExecutionsInput" + }, + "output": { + "target": "com.amazonaws.codebuild#BatchGetCommandExecutionsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.codebuild#InvalidInputException" + } + ], + "traits": { + "smithy.api#documentation": "Gets information about the command executions.
" + } + }, + "com.amazonaws.codebuild#BatchGetCommandExecutionsInput": { + "type": "structure", + "members": { + "sandboxId": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "A sandboxId or sandboxArn.
A comma separated list of commandExecutionIds.
Information about the requested command executions.
" + } + }, + "commandExecutionsNotFound": { + "target": "com.amazonaws.codebuild#CommandExecutionIds", + "traits": { + "smithy.api#documentation": "The IDs of command executions for which information could not be found.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.codebuild#BatchGetFleets": { "type": "operation", "input": { @@ -529,6 +600,58 @@ "smithy.api#output": {} } }, + "com.amazonaws.codebuild#BatchGetSandboxes": { + "type": "operation", + "input": { + "target": "com.amazonaws.codebuild#BatchGetSandboxesInput" + }, + "output": { + "target": "com.amazonaws.codebuild#BatchGetSandboxesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.codebuild#InvalidInputException" + } + ], + "traits": { + "smithy.api#documentation": "Gets information about the sandbox status.
" + } + }, + "com.amazonaws.codebuild#BatchGetSandboxesInput": { + "type": "structure", + "members": { + "ids": { + "target": "com.amazonaws.codebuild#SandboxIds", + "traits": { + "smithy.api#documentation": "A comma separated list of sandboxIds or sandboxArns.
Information about the requested sandboxes.
" + } + }, + "sandboxesNotFound": { + "target": "com.amazonaws.codebuild#SandboxIds", + "traits": { + "smithy.api#documentation": "The IDs of sandboxes for which information could not be found.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.codebuild#BatchReportModeType": { "type": "enum", "members": { @@ -1557,6 +1680,9 @@ { "target": "com.amazonaws.codebuild#BatchGetBuilds" }, + { + "target": "com.amazonaws.codebuild#BatchGetCommandExecutions" + }, { "target": "com.amazonaws.codebuild#BatchGetFleets" }, @@ -1569,6 +1695,9 @@ { "target": "com.amazonaws.codebuild#BatchGetReports" }, + { + "target": "com.amazonaws.codebuild#BatchGetSandboxes" + }, { "target": "com.amazonaws.codebuild#CreateFleet" }, @@ -1635,6 +1764,9 @@ { "target": "com.amazonaws.codebuild#ListBuildsForProject" }, + { + "target": "com.amazonaws.codebuild#ListCommandExecutionsForSandbox" + }, { "target": "com.amazonaws.codebuild#ListCuratedEnvironmentImages" }, @@ -1653,6 +1785,12 @@ { "target": "com.amazonaws.codebuild#ListReportsForReportGroup" }, + { + "target": "com.amazonaws.codebuild#ListSandboxes" + }, + { + "target": "com.amazonaws.codebuild#ListSandboxesForProject" + }, { "target": "com.amazonaws.codebuild#ListSharedProjects" }, @@ -1677,12 +1815,24 @@ { "target": "com.amazonaws.codebuild#StartBuildBatch" }, + { + "target": "com.amazonaws.codebuild#StartCommandExecution" + }, + { + "target": "com.amazonaws.codebuild#StartSandbox" + }, + { + "target": "com.amazonaws.codebuild#StartSandboxConnection" + }, { "target": "com.amazonaws.codebuild#StopBuild" }, { "target": "com.amazonaws.codebuild#StopBuildBatch" }, + { + "target": "com.amazonaws.codebuild#StopSandbox" + }, { "target": "com.amazonaws.codebuild#UpdateFleet" }, @@ -2812,6 +2962,118 @@ "target": "com.amazonaws.codebuild#CodeCoverage" } }, + "com.amazonaws.codebuild#CommandExecution": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "The ID of the command execution.
" + } + }, + "sandboxId": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "A sandboxId.
When the command execution process was initially submitted, expressed in Unix time format.
" + } + }, + "startTime": { + "target": "com.amazonaws.codebuild#Timestamp", + "traits": { + "smithy.api#documentation": "When the command execution process started, expressed in Unix time format.
" + } + }, + "endTime": { + "target": "com.amazonaws.codebuild#Timestamp", + "traits": { + "smithy.api#documentation": "When the command execution process ended, expressed in Unix time format.
" + } + }, + "status": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "The status of the command execution.
" + } + }, + "command": { + "target": "com.amazonaws.codebuild#SensitiveNonEmptyString", + "traits": { + "smithy.api#documentation": "The command that needs to be executed.
" + } + }, + "type": { + "target": "com.amazonaws.codebuild#CommandType", + "traits": { + "smithy.api#documentation": "The command type.
" + } + }, + "exitCode": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "The exit code to return upon completion.
" + } + }, + "standardOutputContent": { + "target": "com.amazonaws.codebuild#SensitiveNonEmptyString", + "traits": { + "smithy.api#documentation": "The text written by the command to stdout.
" + } + }, + "standardErrContent": { + "target": "com.amazonaws.codebuild#SensitiveNonEmptyString", + "traits": { + "smithy.api#documentation": "The text written by the command to stderr.
" + } + }, + "logs": { + "target": "com.amazonaws.codebuild#LogsLocation" + }, + "sandboxArn": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "A sandboxArn.
Contains command execution information.
" + } + }, + "com.amazonaws.codebuild#CommandExecutionIds": { + "type": "list", + "member": { + "target": "com.amazonaws.codebuild#NonEmptyString" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.codebuild#CommandExecutions": { + "type": "list", + "member": { + "target": "com.amazonaws.codebuild#CommandExecution" + } + }, + "com.amazonaws.codebuild#CommandType": { + "type": "enum", + "members": { + "SHELL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SHELL" + } + } + } + }, "com.amazonaws.codebuild#ComputeConfiguration": { "type": "structure", "members": { @@ -4053,6 +4315,12 @@ "smithy.api#enumValue": "WINDOWS_SERVER_2019_CONTAINER" } }, + "WINDOWS_SERVER_2022_CONTAINER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WINDOWS_SERVER_2022_CONTAINER" + } + }, "LINUX_LAMBDA_CONTAINER": { "target": "smithy.api#Unit", "traits": { @@ -5326,86 +5594,58 @@ "smithy.api#output": {} } }, - "com.amazonaws.codebuild#ListCuratedEnvironmentImages": { - "type": "operation", - "input": { - "target": "com.amazonaws.codebuild#ListCuratedEnvironmentImagesInput" - }, - "output": { - "target": "com.amazonaws.codebuild#ListCuratedEnvironmentImagesOutput" - }, - "traits": { - "smithy.api#documentation": "Gets information about Docker images that are managed by CodeBuild.
" - } - }, - "com.amazonaws.codebuild#ListCuratedEnvironmentImagesInput": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.codebuild#ListCuratedEnvironmentImagesOutput": { - "type": "structure", - "members": { - "platforms": { - "target": "com.amazonaws.codebuild#EnvironmentPlatforms", - "traits": { - "smithy.api#documentation": "Information about supported platforms for Docker images that are managed by\n CodeBuild.
" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.codebuild#ListFleets": { + "com.amazonaws.codebuild#ListCommandExecutionsForSandbox": { "type": "operation", "input": { - "target": "com.amazonaws.codebuild#ListFleetsInput" + "target": "com.amazonaws.codebuild#ListCommandExecutionsForSandboxInput" }, "output": { - "target": "com.amazonaws.codebuild#ListFleetsOutput" + "target": "com.amazonaws.codebuild#ListCommandExecutionsForSandboxOutput" }, "errors": [ { "target": "com.amazonaws.codebuild#InvalidInputException" + }, + { + "target": "com.amazonaws.codebuild#ResourceNotFoundException" } ], "traits": { - "smithy.api#documentation": "Gets a list of compute fleet names with each compute fleet name representing a single compute fleet.
", + "smithy.api#documentation": "Gets a list of command executions for a sandbox.
", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", + "items": "commandExecutions", "pageSize": "maxResults" } } }, - "com.amazonaws.codebuild#ListFleetsInput": { + "com.amazonaws.codebuild#ListCommandExecutionsForSandboxInput": { "type": "structure", "members": { - "nextToken": { - "target": "com.amazonaws.codebuild#SensitiveString", + "sandboxId": { + "target": "com.amazonaws.codebuild#NonEmptyString", "traits": { - "smithy.api#documentation": "During a previous call, if there are more than 100 items in the list, only the first\n 100 items are returned, along with a unique string called a\n nextToken. To get the next batch of items in the list, call\n this operation again, adding the next token to the call. To get all of the items in the\n list, keep calling this operation with each subsequent next token that is returned,\n until no more next tokens are returned.
" + "smithy.api#documentation": "A sandboxId or sandboxArn.
The maximum number of paginated compute fleets returned per response. Use\n nextToken to iterate pages in the list of returned compute fleets.
The maximum number of sandbox records to be retrieved.
" } }, "sortOrder": { "target": "com.amazonaws.codebuild#SortOrderType", "traits": { - "smithy.api#documentation": "The order in which to list compute fleets. Valid values include:
\n\n ASCENDING: List in ascending order.
\n DESCENDING: List in descending order.
Use sortBy to specify the criterion to be used to list compute fleet\n names.
The order in which sandbox records should be retrieved.
" } }, - "sortBy": { - "target": "com.amazonaws.codebuild#FleetSortByType", + "nextToken": { + "target": "com.amazonaws.codebuild#SensitiveString", "traits": { - "smithy.api#documentation": "The criterion to be used to list compute fleet names. Valid values include:
\n\n CREATED_TIME: List based on when each compute fleet was\n created.
\n LAST_MODIFIED_TIME: List based on when information about each\n compute fleet was last changed.
\n NAME: List based on each compute fleet's name.
Use sortOrder to specify in what order to list the compute fleet names\n based on the preceding criteria.
The next token, if any, to get paginated results. You will get this value from previous execution of list sandboxes.
" } } }, @@ -5413,17 +5653,124 @@ "smithy.api#input": {} } }, - "com.amazonaws.codebuild#ListFleetsOutput": { + "com.amazonaws.codebuild#ListCommandExecutionsForSandboxOutput": { "type": "structure", "members": { - "nextToken": { - "target": "com.amazonaws.codebuild#String", + "commandExecutions": { + "target": "com.amazonaws.codebuild#CommandExecutions", "traits": { - "smithy.api#documentation": "If there are more than 100 items in the list, only the first 100 items are returned,\n along with a unique string called a nextToken. To get the next\n batch of items in the list, call this operation again, adding the next token to the\n call.
" + "smithy.api#documentation": "Information about the requested command executions.
" } }, - "fleets": { - "target": "com.amazonaws.codebuild#FleetArns", + "nextToken": { + "target": "com.amazonaws.codebuild#String", + "traits": { + "smithy.api#documentation": "Information about the next token to get paginated results.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.codebuild#ListCuratedEnvironmentImages": { + "type": "operation", + "input": { + "target": "com.amazonaws.codebuild#ListCuratedEnvironmentImagesInput" + }, + "output": { + "target": "com.amazonaws.codebuild#ListCuratedEnvironmentImagesOutput" + }, + "traits": { + "smithy.api#documentation": "Gets information about Docker images that are managed by CodeBuild.
" + } + }, + "com.amazonaws.codebuild#ListCuratedEnvironmentImagesInput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.codebuild#ListCuratedEnvironmentImagesOutput": { + "type": "structure", + "members": { + "platforms": { + "target": "com.amazonaws.codebuild#EnvironmentPlatforms", + "traits": { + "smithy.api#documentation": "Information about supported platforms for Docker images that are managed by\n CodeBuild.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.codebuild#ListFleets": { + "type": "operation", + "input": { + "target": "com.amazonaws.codebuild#ListFleetsInput" + }, + "output": { + "target": "com.amazonaws.codebuild#ListFleetsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.codebuild#InvalidInputException" + } + ], + "traits": { + "smithy.api#documentation": "Gets a list of compute fleet names with each compute fleet name representing a single compute fleet.
", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.codebuild#ListFleetsInput": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.codebuild#SensitiveString", + "traits": { + "smithy.api#documentation": "During a previous call, if there are more than 100 items in the list, only the first\n 100 items are returned, along with a unique string called a\n nextToken. To get the next batch of items in the list, call\n this operation again, adding the next token to the call. To get all of the items in the\n list, keep calling this operation with each subsequent next token that is returned,\n until no more next tokens are returned.
" + } + }, + "maxResults": { + "target": "com.amazonaws.codebuild#PageSize", + "traits": { + "smithy.api#documentation": "The maximum number of paginated compute fleets returned per response. Use\n nextToken to iterate pages in the list of returned compute fleets.
The order in which to list compute fleets. Valid values include:
\n\n ASCENDING: List in ascending order.
\n DESCENDING: List in descending order.
Use sortBy to specify the criterion to be used to list compute fleet\n names.
The criterion to be used to list compute fleet names. Valid values include:
\n\n CREATED_TIME: List based on when each compute fleet was\n created.
\n LAST_MODIFIED_TIME: List based on when information about each\n compute fleet was last changed.
\n NAME: List based on each compute fleet's name.
Use sortOrder to specify in what order to list the compute fleet names\n based on the preceding criteria.
If there are more than 100 items in the list, only the first 100 items are returned,\n along with a unique string called a nextToken. To get the next\n batch of items in the list, call this operation again, adding the next token to the\n call.
" + } + }, + "fleets": { + "target": "com.amazonaws.codebuild#FleetArns", "traits": { "smithy.api#documentation": "The list of compute fleet names.
" } @@ -5736,6 +6083,154 @@ "smithy.api#output": {} } }, + "com.amazonaws.codebuild#ListSandboxes": { + "type": "operation", + "input": { + "target": "com.amazonaws.codebuild#ListSandboxesInput" + }, + "output": { + "target": "com.amazonaws.codebuild#ListSandboxesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.codebuild#InvalidInputException" + } + ], + "traits": { + "smithy.api#documentation": "Gets a list of sandboxes.
", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "ids", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.codebuild#ListSandboxesForProject": { + "type": "operation", + "input": { + "target": "com.amazonaws.codebuild#ListSandboxesForProjectInput" + }, + "output": { + "target": "com.amazonaws.codebuild#ListSandboxesForProjectOutput" + }, + "errors": [ + { + "target": "com.amazonaws.codebuild#InvalidInputException" + }, + { + "target": "com.amazonaws.codebuild#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "Gets a list of sandboxes for a given project.
", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "ids", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.codebuild#ListSandboxesForProjectInput": { + "type": "structure", + "members": { + "projectName": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "The CodeBuild project name.
", + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.codebuild#PageSize", + "traits": { + "smithy.api#documentation": "The maximum number of sandbox records to be retrieved.
" + } + }, + "sortOrder": { + "target": "com.amazonaws.codebuild#SortOrderType", + "traits": { + "smithy.api#documentation": "The order in which sandbox records should be retrieved.
" + } + }, + "nextToken": { + "target": "com.amazonaws.codebuild#SensitiveString", + "traits": { + "smithy.api#documentation": "The next token, if any, to get paginated results. You will get this value from previous execution of list sandboxes.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.codebuild#ListSandboxesForProjectOutput": { + "type": "structure", + "members": { + "ids": { + "target": "com.amazonaws.codebuild#SandboxIds", + "traits": { + "smithy.api#documentation": "Information about the requested sandbox IDs.
" + } + }, + "nextToken": { + "target": "com.amazonaws.codebuild#String", + "traits": { + "smithy.api#documentation": "Information about the next token to get paginated results.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.codebuild#ListSandboxesInput": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.codebuild#PageSize", + "traits": { + "smithy.api#documentation": "The maximum number of sandbox records to be retrieved.
" + } + }, + "sortOrder": { + "target": "com.amazonaws.codebuild#SortOrderType", + "traits": { + "smithy.api#documentation": "The order in which sandbox records should be retrieved.
" + } + }, + "nextToken": { + "target": "com.amazonaws.codebuild#String", + "traits": { + "smithy.api#documentation": "The next token, if any, to get paginated results. You will get this value from previous execution of list sandboxes.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.codebuild#ListSandboxesOutput": { + "type": "structure", + "members": { + "ids": { + "target": "com.amazonaws.codebuild#SandboxIds", + "traits": { + "smithy.api#documentation": "Information about the requested sandbox IDs.
" + } + }, + "nextToken": { + "target": "com.amazonaws.codebuild#String", + "traits": { + "smithy.api#documentation": "Information about the next token to get paginated results.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.codebuild#ListSharedProjects": { "type": "operation", "input": { @@ -6504,6 +6999,12 @@ "traits": { "smithy.api#documentation": "An array of strings that specify the local cache modes. You can use one or more local\n cache modes at the same time. This is only used for LOCAL cache\n types.
Possible values are:
\nCaches Git metadata for primary and secondary sources. After the cache is\n created, subsequent builds pull only the change between commits. This mode\n is a good choice for projects with a clean working directory and a source\n that is a large Git repository. If you choose this option and your project\n does not use a Git repository (GitHub, GitHub Enterprise, or Bitbucket), the\n option is ignored.
\nCaches existing Docker layers. This mode is a good choice for projects\n that build or pull large Docker images. It can prevent the performance\n issues caused by pulling large Docker images down from the network.
\nYou can use a Docker layer cache in the Linux environment\n only.
\nThe privileged flag must be set so that your\n project has the required Docker permissions.
You should consider the security implications before you use a\n Docker layer cache.
\nCaches directories you specify in the buildspec file. This mode is a good\n choice if your build scenario is not suited to one of the other three local\n cache modes. If you use a custom cache:
\nOnly directories can be specified for caching. You cannot specify\n individual files.
\nSymlinks are used to reference cached directories.
\nCached directories are linked to your build before it downloads\n its project sources. Cached items are overridden if a source item\n has the same name. Directories are specified using cache paths in\n the buildspec file.
\nDefines the scope of the cache. You can use this namespace to share a cache across \n multiple projects. For more information, see Cache sharing \n between projects in the CodeBuild User Guide.
" + } } }, "traits": { @@ -7686,44 +8187,304 @@ "smithy.api#documentation": "Information about the S3 bucket where the raw data of a report are exported.
" } }, - "com.amazonaws.codebuild#ScalingConfigurationInput": { + "com.amazonaws.codebuild#SSMSession": { "type": "structure", "members": { - "scalingType": { - "target": "com.amazonaws.codebuild#FleetScalingType", + "sessionId": { + "target": "com.amazonaws.codebuild#String", "traits": { - "smithy.api#documentation": "The scaling type for a compute fleet.
" + "smithy.api#documentation": "The ID of the session.
" } }, - "targetTrackingScalingConfigs": { - "target": "com.amazonaws.codebuild#TargetTrackingScalingConfigurations", + "tokenValue": { + "target": "com.amazonaws.codebuild#String", "traits": { - "smithy.api#documentation": "A list of TargetTrackingScalingConfiguration objects.
An encrypted token value containing session and caller information.
" } }, - "maxCapacity": { - "target": "com.amazonaws.codebuild#FleetCapacity", + "streamUrl": { + "target": "com.amazonaws.codebuild#String", "traits": { - "smithy.api#documentation": "The maximum number of instances in the fleet when auto-scaling.
" + "smithy.api#documentation": "A URL back to SSM Agent on the managed node that the Session Manager client uses to send commands and receive output from the node.
" } } }, "traits": { - "smithy.api#documentation": "The scaling configuration input of a compute fleet.
" + "smithy.api#documentation": "Contains information about the Session Manager session.
" } }, - "com.amazonaws.codebuild#ScalingConfigurationOutput": { + "com.amazonaws.codebuild#Sandbox": { "type": "structure", "members": { - "scalingType": { - "target": "com.amazonaws.codebuild#FleetScalingType", + "id": { + "target": "com.amazonaws.codebuild#NonEmptyString", "traits": { - "smithy.api#documentation": "The scaling type for a compute fleet.
" + "smithy.api#documentation": "The ID of the sandbox.
" } }, - "targetTrackingScalingConfigs": { - "target": "com.amazonaws.codebuild#TargetTrackingScalingConfigurations", - "traits": { + "arn": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "The ARN of the sandbox.
" + } + }, + "projectName": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "The CodeBuild project name.
" + } + }, + "requestTime": { + "target": "com.amazonaws.codebuild#Timestamp", + "traits": { + "smithy.api#documentation": "When the sandbox process was initially requested, expressed in Unix time format.
" + } + }, + "startTime": { + "target": "com.amazonaws.codebuild#Timestamp", + "traits": { + "smithy.api#documentation": "When the sandbox process started, expressed in Unix time format.
" + } + }, + "endTime": { + "target": "com.amazonaws.codebuild#Timestamp", + "traits": { + "smithy.api#documentation": "When the sandbox process ended, expressed in Unix time format.
" + } + }, + "status": { + "target": "com.amazonaws.codebuild#String", + "traits": { + "smithy.api#documentation": "The status of the sandbox.
" + } + }, + "source": { + "target": "com.amazonaws.codebuild#ProjectSource" + }, + "sourceVersion": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "Any version identifier for the version of the sandbox to be built.
" + } + }, + "secondarySources": { + "target": "com.amazonaws.codebuild#ProjectSources", + "traits": { + "smithy.api#documentation": " An array of ProjectSource objects.
An array of ProjectSourceVersion objects.
\n An array of ProjectFileSystemLocation objects for a CodeBuild build project. A ProjectFileSystemLocation object \n specifies the identifier, location, mountOptions, \n mountPoint, and type of a file system created using Amazon Elastic File System.\n
How long, in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before timing out this sandbox if it does not\n get marked as completed.
" + } + }, + "queuedTimeoutInMinutes": { + "target": "com.amazonaws.codebuild#WrapperInt", + "traits": { + "smithy.api#documentation": "The number of minutes a sandbox is allowed to be queued before it times out.
" + } + }, + "vpcConfig": { + "target": "com.amazonaws.codebuild#VpcConfig" + }, + "logConfig": { + "target": "com.amazonaws.codebuild#LogsConfig" + }, + "encryptionKey": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "The Key Management Service customer master key (CMK) to be used for encrypting the sandbox output\n artifacts.
" + } + }, + "serviceRole": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "The name of a service role used for this sandbox.
" + } + }, + "currentSession": { + "target": "com.amazonaws.codebuild#SandboxSession", + "traits": { + "smithy.api#documentation": "The current session for the sandbox.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Contains sandbox information.
" + } + }, + "com.amazonaws.codebuild#SandboxIds": { + "type": "list", + "member": { + "target": "com.amazonaws.codebuild#NonEmptyString" + } + }, + "com.amazonaws.codebuild#SandboxSession": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "The ID of the sandbox session.
" + } + }, + "status": { + "target": "com.amazonaws.codebuild#String", + "traits": { + "smithy.api#documentation": "The status of the sandbox session.
" + } + }, + "startTime": { + "target": "com.amazonaws.codebuild#Timestamp", + "traits": { + "smithy.api#documentation": "When the sandbox session started, expressed in Unix time format.
" + } + }, + "endTime": { + "target": "com.amazonaws.codebuild#Timestamp", + "traits": { + "smithy.api#documentation": "When the sandbox session ended, expressed in Unix time format.
" + } + }, + "currentPhase": { + "target": "com.amazonaws.codebuild#String", + "traits": { + "smithy.api#documentation": "The current phase for the sandbox.
" + } + }, + "phases": { + "target": "com.amazonaws.codebuild#SandboxSessionPhases", + "traits": { + "smithy.api#documentation": " An array of SandboxSessionPhase objects.
An identifier for the version of this sandbox's source code.
" + } + }, + "logs": { + "target": "com.amazonaws.codebuild#LogsLocation" + }, + "networkInterface": { + "target": "com.amazonaws.codebuild#NetworkInterface" + } + }, + "traits": { + "smithy.api#documentation": "Contains information about the sandbox session.
" + } + }, + "com.amazonaws.codebuild#SandboxSessionPhase": { + "type": "structure", + "members": { + "phaseType": { + "target": "com.amazonaws.codebuild#String", + "traits": { + "smithy.api#documentation": "The name of the sandbox phase.
" + } + }, + "phaseStatus": { + "target": "com.amazonaws.codebuild#StatusType", + "traits": { + "smithy.api#documentation": "The current status of the sandbox phase. Valid values include:
\nThe sandbox phase failed.
\nThe sandbox phase faulted.
\nThe sandbox phase is still in progress.
\nThe sandbox phase stopped.
\nThe sandbox phase succeeded.
\nThe sandbox phase timed out.
\nWhen the sandbox phase started, expressed in Unix time format.
" + } + }, + "endTime": { + "target": "com.amazonaws.codebuild#Timestamp", + "traits": { + "smithy.api#documentation": "When the sandbox phase ended, expressed in Unix time format.
" + } + }, + "durationInSeconds": { + "target": "com.amazonaws.codebuild#WrapperLong", + "traits": { + "smithy.api#documentation": "How long, in seconds, between the starting and ending times of the sandbox's\n phase.
" + } + }, + "contexts": { + "target": "com.amazonaws.codebuild#PhaseContexts", + "traits": { + "smithy.api#documentation": " An array of PhaseContext objects.
Contains information about the sandbox phase.
" + } + }, + "com.amazonaws.codebuild#SandboxSessionPhases": { + "type": "list", + "member": { + "target": "com.amazonaws.codebuild#SandboxSessionPhase" + } + }, + "com.amazonaws.codebuild#Sandboxes": { + "type": "list", + "member": { + "target": "com.amazonaws.codebuild#Sandbox" + } + }, + "com.amazonaws.codebuild#ScalingConfigurationInput": { + "type": "structure", + "members": { + "scalingType": { + "target": "com.amazonaws.codebuild#FleetScalingType", + "traits": { + "smithy.api#documentation": "The scaling type for a compute fleet.
" + } + }, + "targetTrackingScalingConfigs": { + "target": "com.amazonaws.codebuild#TargetTrackingScalingConfigurations", + "traits": { + "smithy.api#documentation": "A list of TargetTrackingScalingConfiguration objects.
The maximum number of instances in the fleet when auto-scaling.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The scaling configuration input of a compute fleet.
" + } + }, + "com.amazonaws.codebuild#ScalingConfigurationOutput": { + "type": "structure", + "members": { + "scalingType": { + "target": "com.amazonaws.codebuild#FleetScalingType", + "traits": { + "smithy.api#documentation": "The scaling type for a compute fleet.
" + } + }, + "targetTrackingScalingConfigs": { + "target": "com.amazonaws.codebuild#TargetTrackingScalingConfigurations", + "traits": { "smithy.api#documentation": "A list of TargetTrackingScalingConfiguration objects.
Starts a command execution.
" + } + }, + "com.amazonaws.codebuild#StartCommandExecutionInput": { + "type": "structure", + "members": { + "sandboxId": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "A sandboxId or sandboxArn.
The command that needs to be executed.
", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.codebuild#CommandType", + "traits": { + "smithy.api#documentation": "The command type.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.codebuild#StartCommandExecutionOutput": { + "type": "structure", + "members": { + "commandExecution": { + "target": "com.amazonaws.codebuild#CommandExecution", + "traits": { + "smithy.api#documentation": "Information about the requested command executions.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.codebuild#StartSandbox": { + "type": "operation", + "input": { + "target": "com.amazonaws.codebuild#StartSandboxInput" + }, + "output": { + "target": "com.amazonaws.codebuild#StartSandboxOutput" + }, + "errors": [ + { + "target": "com.amazonaws.codebuild#AccountSuspendedException" + }, + { + "target": "com.amazonaws.codebuild#InvalidInputException" + }, + { + "target": "com.amazonaws.codebuild#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "Starts a sandbox.
" + } + }, + "com.amazonaws.codebuild#StartSandboxConnection": { + "type": "operation", + "input": { + "target": "com.amazonaws.codebuild#StartSandboxConnectionInput" + }, + "output": { + "target": "com.amazonaws.codebuild#StartSandboxConnectionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.codebuild#InvalidInputException" + }, + { + "target": "com.amazonaws.codebuild#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "Starts a sandbox connection.
" + } + }, + "com.amazonaws.codebuild#StartSandboxConnectionInput": { + "type": "structure", + "members": { + "sandboxId": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "A sandboxId or sandboxArn.
Information about the Session Manager session.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.codebuild#StartSandboxInput": { + "type": "structure", + "members": { + "projectName": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "The CodeBuild project name.
" + } + }, + "idempotencyToken": { + "target": "com.amazonaws.codebuild#SensitiveString", + "traits": { + "smithy.api#documentation": "A unique client token.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.codebuild#StartSandboxOutput": { + "type": "structure", + "members": { + "sandbox": { + "target": "com.amazonaws.codebuild#Sandbox", + "traits": { + "smithy.api#documentation": "Information about the requested sandbox.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.codebuild#StatusType": { "type": "enum", "members": { @@ -8618,6 +9547,55 @@ "smithy.api#output": {} } }, + "com.amazonaws.codebuild#StopSandbox": { + "type": "operation", + "input": { + "target": "com.amazonaws.codebuild#StopSandboxInput" + }, + "output": { + "target": "com.amazonaws.codebuild#StopSandboxOutput" + }, + "errors": [ + { + "target": "com.amazonaws.codebuild#InvalidInputException" + }, + { + "target": "com.amazonaws.codebuild#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "Stops a sandbox.
" + } + }, + "com.amazonaws.codebuild#StopSandboxInput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "Information about the requested sandbox ID.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.codebuild#StopSandboxOutput": { + "type": "structure", + "members": { + "sandbox": { + "target": "com.amazonaws.codebuild#Sandbox", + "traits": { + "smithy.api#documentation": "Information about the requested sandbox.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.codebuild#String": { "type": "string" }, diff --git a/codegen/sdk/aws-models/connect-contact-lens.json b/codegen/sdk/aws-models/connect-contact-lens.json index d696f32dc84..d453e5aac1c 100644 --- a/codegen/sdk/aws-models/connect-contact-lens.json +++ b/codegen/sdk/aws-models/connect-contact-lens.json @@ -1389,9 +1389,7 @@ "Sentiment": { "target": "com.amazonaws.connectcontactlens#SentimentValue", "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The sentiment detected for this piece of transcript.
", - "smithy.api#required": {} + "smithy.api#documentation": "The sentiment detected for this piece of transcript.
" } }, "IssuesDetected": { diff --git a/codegen/sdk/aws-models/controlcatalog.json b/codegen/sdk/aws-models/controlcatalog.json index ca837e37199..e29b1fb745f 100644 --- a/codegen/sdk/aws-models/controlcatalog.json +++ b/codegen/sdk/aws-models/controlcatalog.json @@ -979,6 +979,35 @@ } } }, + "com.amazonaws.controlcatalog#ControlSeverity": { + "type": "enum", + "members": { + "LOW": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LOW" + } + }, + "MEDIUM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MEDIUM" + } + }, + "HIGH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HIGH" + } + }, + "CRITICAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CRITICAL" + } + } + } + }, "com.amazonaws.controlcatalog#ControlSummary": { "type": "structure", "members": { @@ -1002,6 +1031,30 @@ "smithy.api#documentation": "A description of the control, as it may appear in the console. Describes the functionality of the control.
", "smithy.api#required": {} } + }, + "Behavior": { + "target": "com.amazonaws.controlcatalog#ControlBehavior", + "traits": { + "smithy.api#documentation": "An enumerated type, with the following possible values:
" + } + }, + "Severity": { + "target": "com.amazonaws.controlcatalog#ControlSeverity", + "traits": { + "smithy.api#documentation": "An enumerated type, with the following possible values:
" + } + }, + "Implementation": { + "target": "com.amazonaws.controlcatalog#ImplementationSummary", + "traits": { + "smithy.api#documentation": "An object of type ImplementationSummary that describes how the control is implemented.
A timestamp that notes the time when the control was released (start of its life) as a governance capability in Amazon Web Services.
" + } } }, "traits": { @@ -1202,6 +1255,12 @@ "smithy.api#required": {} } }, + "Severity": { + "target": "com.amazonaws.controlcatalog#ControlSeverity", + "traits": { + "smithy.api#documentation": "An enumerated type, with the following possible values:
" + } + }, "RegionConfiguration": { "target": "com.amazonaws.controlcatalog#RegionConfiguration", "traits": { @@ -1219,6 +1278,12 @@ "traits": { "smithy.api#documentation": "Returns an array of ControlParameter objects that specify the parameters a control supports. An empty list is returned for controls that don’t support parameters.\n
A timestamp that notes the time when the control was released (start of its life) as a governance capability in Amazon Web Services.
" + } } }, "traits": { @@ -1234,12 +1299,49 @@ "smithy.api#documentation": "A string that describes a control's implementation type.
", "smithy.api#required": {} } + }, + "Identifier": { + "target": "com.amazonaws.controlcatalog#ImplementationIdentifier", + "traits": { + "smithy.api#documentation": "A service-specific identifier for the control, assigned by the service that implemented the control. For example, this identifier could be an Amazon Web Services Config Rule ID or a Security Hub Control ID.
" + } } }, "traits": { "smithy.api#documentation": "An object that describes the implementation type for a control.
\nOur ImplementationDetails\n Type format has three required segments:
\n SERVICE-PROVIDER::SERVICE-NAME::RESOURCE-NAME\n
For example, AWS::Config::ConfigRule\n or\n AWS::SecurityHub::SecurityControl resources have the format with three required segments.
Our ImplementationDetails\n Type format has an optional fourth segment, which is present for applicable \n implementation types. The format is as follows:
\n SERVICE-PROVIDER::SERVICE-NAME::RESOURCE-NAME::RESOURCE-TYPE-DESCRIPTION\n
For example, AWS::Organizations::Policy::SERVICE_CONTROL_POLICY\n or\n AWS::CloudFormation::Type::HOOK have the format with four segments.
Although the format is similar, the values for the Type field do not match any Amazon Web Services CloudFormation values.
A string that represents the Amazon Web Services service that implements this control. For example, a value of AWS::Config::ConfigRule indicates that the control is implemented by Amazon Web Services Config, and AWS::SecurityHub::SecurityControl indicates implementation by Amazon Web Services Security Hub.
The identifier originally assigned by the Amazon Web Services service that implements the control. For example, CODEPIPELINE_DEPLOYMENT_COUNT_CHECK.
A summary of how the control is implemented, including the Amazon Web Services service that enforces the control and its service-specific identifier. For example, the value of this field could indicate that the control is implemented as an Amazon Web Services Config Rule or an Amazon Web Services Security Hub control.
" + } + }, "com.amazonaws.controlcatalog#ImplementationType": { "type": "string", "traits": { diff --git a/codegen/sdk/aws-models/cost-explorer.json b/codegen/sdk/aws-models/cost-explorer.json index 343825eb987..9f616a5f1b8 100644 --- a/codegen/sdk/aws-models/cost-explorer.json +++ b/codegen/sdk/aws-models/cost-explorer.json @@ -3441,7 +3441,7 @@ "Key": { "target": "com.amazonaws.costexplorer#Dimension", "traits": { - "smithy.api#documentation": "The names of the metadata types that you can use to filter and group your results. For\n example, AZ returns a list of Availability Zones.
Not all dimensions are supported in each API. Refer to the documentation for each\n specific API to see what is supported.
\n\n LINK_ACCOUNT_NAME and SERVICE_CODE can only be used in\n CostCategoryRule.
\n ANOMALY_TOTAL_IMPACT_ABSOLUTE and\n ANOMALY_TOTAL_IMPACT_PERCENTAGE can only be used in AnomalySubscriptions.
The names of the metadata types that you can use to filter and group your results. For\n example, AZ returns a list of Availability Zones.
Not all dimensions are supported in each API. Refer to the documentation for each\n specific API to see what is supported.
\n\n LINKED_ACCOUNT_NAME and SERVICE_CODE can only be used in\n CostCategoryRule.
\n ANOMALY_TOTAL_IMPACT_ABSOLUTE and\n ANOMALY_TOTAL_IMPACT_PERCENTAGE can only be used in AnomalySubscriptions.
Retrieves all of the cost anomalies detected on your account during the time period that's\n specified by the DateInterval object. Anomalies are available for up to 90\n days.
Retrieves all of the cost anomalies detected on your account during the time period that's\n specified by the DateInterval object. Anomalies are available for up to 90\n days.
Retrieves the cost anomaly monitor definitions for your account. You can filter using a\n list of cost anomaly monitor Amazon Resource Names (ARNs).
" + "smithy.api#documentation": "Retrieves the cost anomaly monitor definitions for your account. You can filter using a\n list of cost anomaly monitor Amazon Resource Names (ARNs).
", + "smithy.api#paginated": { + "inputToken": "NextPageToken", + "outputToken": "NextPageToken", + "items": "AnomalyMonitors", + "pageSize": "MaxResults" + } } }, "com.amazonaws.costexplorer#GetAnomalyMonitorsRequest": { @@ -4355,7 +4367,13 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves the cost anomaly subscription objects for your account. You can filter using a\n list of cost anomaly monitor Amazon Resource Names (ARNs).
" + "smithy.api#documentation": "Retrieves the cost anomaly subscription objects for your account. You can filter using a\n list of cost anomaly monitor Amazon Resource Names (ARNs).
", + "smithy.api#paginated": { + "inputToken": "NextPageToken", + "outputToken": "NextPageToken", + "items": "AnomalySubscriptions", + "pageSize": "MaxResults" + } } }, "com.amazonaws.costexplorer#GetAnomalySubscriptionsRequest": { @@ -4754,7 +4772,7 @@ "Filter": { "target": "com.amazonaws.costexplorer#Expression", "traits": { - "smithy.api#documentation": "Filters Amazon Web Services costs by different dimensions. For example, you can specify\n SERVICE and LINKED_ACCOUNT and get the costs that are associated\n with that account's usage of that service. You can nest Expression objects to\n define any combination of dimension filters. For more information, see Expression.
The GetCostAndUsageWithResources operation requires that you either group\n by or filter by a ResourceId. It requires the Expression\n \"SERVICE = Amazon Elastic Compute Cloud - Compute\" in the filter.
Valid values for MatchOptions for Dimensions are\n EQUALS and CASE_SENSITIVE.
Valid values for MatchOptions for CostCategories and\n Tags are EQUALS, ABSENT, and\n CASE_SENSITIVE. Default values are EQUALS and\n CASE_SENSITIVE.
Filters Amazon Web Services costs by different dimensions. For example, you can specify\n SERVICE and LINKED_ACCOUNT and get the costs that are associated\n with that account's usage of that service. You can nest Expression objects to\n define any combination of dimension filters. For more information, see Expression.
Valid values for MatchOptions for Dimensions are\n EQUALS and CASE_SENSITIVE.
Valid values for MatchOptions for CostCategories and\n Tags are EQUALS, ABSENT, and\n CASE_SENSITIVE. Default values are EQUALS and\n CASE_SENSITIVE.
The filters that you want to use to filter your forecast. The\n GetCostForecast API supports filtering by the following dimensions:
\n AZ\n
\n INSTANCE_TYPE\n
\n LINKED_ACCOUNT\n
\n LINKED_ACCOUNT_NAME\n
\n OPERATION\n
\n PURCHASE_TYPE\n
\n REGION\n
\n SERVICE\n
\n USAGE_TYPE\n
\n USAGE_TYPE_GROUP\n
\n RECORD_TYPE\n
\n OPERATING_SYSTEM\n
\n TENANCY\n
\n SCOPE\n
\n PLATFORM\n
\n SUBSCRIPTION_ID\n
\n LEGAL_ENTITY_NAME\n
\n DEPLOYMENT_OPTION\n
\n DATABASE_ENGINE\n
\n INSTANCE_TYPE_FAMILY\n
\n BILLING_ENTITY\n
\n RESERVATION_ID\n
\n SAVINGS_PLAN_ARN\n
The filters that you want to use to filter your forecast. The\n GetCostForecast API supports filtering by the following dimensions:
\n AZ\n
\n INSTANCE_TYPE\n
\n LINKED_ACCOUNT\n
\n OPERATION\n
\n PURCHASE_TYPE\n
\n REGION\n
\n SERVICE\n
\n USAGE_TYPE\n
\n USAGE_TYPE_GROUP\n
\n RECORD_TYPE\n
\n OPERATING_SYSTEM\n
\n TENANCY\n
\n SCOPE\n
\n PLATFORM\n
\n SUBSCRIPTION_ID\n
\n LEGAL_ENTITY_NAME\n
\n DEPLOYMENT_OPTION\n
\n DATABASE_ENGINE\n
\n INSTANCE_TYPE_FAMILY\n
\n BILLING_ENTITY\n
\n RESERVATION_ID\n
\n SAVINGS_PLAN_ARN\n
\n Request a cost allocation tag backfill. This will backfill the activation status (either active or inactive) for all tag keys from para:BackfillFrom up to the when this request is made.
You can request a backfill once every 24 hours.\n
" + "smithy.api#documentation": "\n Request a cost allocation tag backfill. This will backfill the activation status (either active or inactive) for all tag keys from para:BackfillFrom up to the time this request is made.
You can request a backfill once every 24 hours.\n
" } }, "com.amazonaws.costexplorer#StartCostAllocationTagBackfillRequest": { diff --git a/codegen/sdk/aws-models/cost-optimization-hub.json b/codegen/sdk/aws-models/cost-optimization-hub.json index d078e5cd6b2..0a97f2bf0c7 100644 --- a/codegen/sdk/aws-models/cost-optimization-hub.json +++ b/codegen/sdk/aws-models/cost-optimization-hub.json @@ -233,7 +233,7 @@ "accountScope": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The account scope that you want your recommendations for. Amazon Web Services calculates\n recommendations including the management account and member accounts if the value is set to\n PAYER. If the value is LINKED, recommendations are calculated for\n individual member accounts only.
The account scope for which you want recommendations. Amazon Web Services calculates\n recommendations including the management account and member accounts if the value is set to\n PAYER. If the value is LINKED, recommendations are calculated for\n individual member accounts only.
The DB instance configuration used for recommendations.
" } }, + "com.amazonaws.costoptimizationhub#DynamoDbReservedCapacity": { + "type": "structure", + "members": { + "configuration": { + "target": "com.amazonaws.costoptimizationhub#DynamoDbReservedCapacityConfiguration", + "traits": { + "smithy.api#documentation": "The DynamoDB reserved capacity configuration used for recommendations.
" + } + }, + "costCalculation": { + "target": "com.amazonaws.costoptimizationhub#ReservedInstancesCostCalculation" + } + }, + "traits": { + "smithy.api#documentation": "The DynamoDB reserved capacity recommendation details.
", + "smithy.api#tags": [ + "db_recs" + ] + } + }, + "com.amazonaws.costoptimizationhub#DynamoDbReservedCapacityConfiguration": { + "type": "structure", + "members": { + "accountScope": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The account scope for which you want recommendations.
" + } + }, + "service": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The service for which you want recommendations.
" + } + }, + "term": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The reserved capacity recommendation term in years.
" + } + }, + "paymentOption": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The payment option for the commitment.
" + } + }, + "reservedInstancesRegion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The Amazon Web Services Region of the commitment.
" + } + }, + "upfrontCost": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "How much purchasing this reserved capacity costs you upfront.
" + } + }, + "monthlyRecurringCost": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "How much purchasing this reserved capacity costs you on a monthly basis.
" + } + }, + "numberOfCapacityUnitsToPurchase": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The number of reserved capacity units that Amazon Web Services recommends that you\n purchase.
" + } + }, + "capacityUnits": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The capacity unit of the recommended reservation.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The DynamoDB reserved capacity configuration used for recommendations.
", + "smithy.api#tags": [ + "db_recs" + ] + } + }, "com.amazonaws.costoptimizationhub#EbsVolume": { "type": "structure", "members": { @@ -1154,7 +1239,7 @@ "accountScope": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The account scope that you want your recommendations for.
" + "smithy.api#documentation": "The account scope for which you want recommendations.
" } }, "term": { @@ -1218,19 +1303,13 @@ "accountScope": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The account scope that you want your recommendations for.
" + "smithy.api#documentation": "The account scope for which you want recommendations.
" } }, "service": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The service that you want your recommendations for.
" - } - }, - "normalizedUnitsToPurchase": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "The number of normalized units that Amazon Web Services recommends that you\n purchase.
" + "smithy.api#documentation": "The service for which you want recommendations.
" } }, "term": { @@ -1245,6 +1324,30 @@ "smithy.api#documentation": "The payment option for the commitment.
" } }, + "reservedInstancesRegion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The Amazon Web Services Region of the commitment.
" + } + }, + "upfrontCost": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "How much purchasing this instance costs you upfront.
" + } + }, + "monthlyRecurringCost": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "How much purchasing these reserved instances costs you on a monthly basis.
" + } + }, + "normalizedUnitsToPurchase": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The number of normalized units that Amazon Web Services recommends that you\n purchase.
" + } + }, "numberOfInstancesToPurchase": { "target": "smithy.api#String", "traits": { @@ -1269,12 +1372,6 @@ "smithy.api#documentation": "The type of instance that Amazon Web Services recommends.
" } }, - "reservedInstancesRegion": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "The Amazon Web Services Region of the commitment.
" - } - }, "currentGeneration": { "target": "smithy.api#String", "traits": { @@ -1298,18 +1395,6 @@ "traits": { "smithy.api#documentation": "Determines whether the recommendation is size flexible.
" } - }, - "upfrontCost": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "How much purchasing this instance costs you upfront.
" - } - }, - "monthlyRecurringCost": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "How much purchasing reserved instances costs you on a monthly basis.
" - } } }, "traits": { @@ -1376,19 +1461,13 @@ "accountScope": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The account scope that you want your recommendations for.
" + "smithy.api#documentation": "The account scope for which you want recommendations.
" } }, "service": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The service that you want your recommendations for.
" - } - }, - "normalizedUnitsToPurchase": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "The number of normalized units that Amazon Web Services recommends that you\n purchase.
" + "smithy.api#documentation": "The service for which you want recommendations.
" } }, "term": { @@ -1403,52 +1482,58 @@ "smithy.api#documentation": "The payment option for the commitment.
" } }, - "numberOfInstancesToPurchase": { + "reservedInstancesRegion": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The number of instances that Amazon Web Services recommends that you purchase.
" + "smithy.api#documentation": "The Amazon Web Services Region of the commitment.
" } }, - "instanceFamily": { + "upfrontCost": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The instance family of the recommended reservation.
" + "smithy.api#documentation": "How much purchasing this instance costs you upfront.
" } }, - "instanceType": { + "monthlyRecurringCost": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The type of instance that Amazon Web Services recommends.
" + "smithy.api#documentation": "How much purchasing these reserved instances costs you on a monthly basis.
" } }, - "reservedInstancesRegion": { + "normalizedUnitsToPurchase": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The Amazon Web Services Region of the commitment.
" + "smithy.api#documentation": "The number of normalized units that Amazon Web Services recommends that you\n purchase.
" } }, - "currentGeneration": { + "numberOfInstancesToPurchase": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "Determines whether the recommendation is for a current generation instance.
" + "smithy.api#documentation": "The number of instances that Amazon Web Services recommends that you purchase.
" } }, - "sizeFlexEligible": { - "target": "smithy.api#Boolean", + "instanceFamily": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "Determines whether the recommendation is size flexible.
" + "smithy.api#documentation": "The instance family of the recommended reservation.
" } }, - "upfrontCost": { + "instanceType": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "How much purchasing this instance costs you upfront.
" + "smithy.api#documentation": "The type of instance that Amazon Web Services recommends.
" } }, - "monthlyRecurringCost": { + "currentGeneration": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "How much purchasing reserved instances costs you on a monthly basis.
" + "smithy.api#documentation": "Determines whether the recommendation is for a current generation instance.
" + } + }, + "sizeFlexEligible": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Determines whether the recommendation is size flexible.
" } } }, @@ -1523,7 +1608,7 @@ "accountIds": { "target": "com.amazonaws.costoptimizationhub#AccountIdList", "traits": { - "smithy.api#documentation": "The account that the recommendation is for.
" + "smithy.api#documentation": "The account to which the recommendation applies.
" } }, "regions": { @@ -1696,7 +1781,7 @@ "accountId": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The account that the recommendation is for.
" + "smithy.api#documentation": "The account to which the recommendation applies.
" } }, "currencyCode": { @@ -2266,6 +2351,115 @@ } } }, + "com.amazonaws.costoptimizationhub#MemoryDbReservedInstances": { + "type": "structure", + "members": { + "configuration": { + "target": "com.amazonaws.costoptimizationhub#MemoryDbReservedInstancesConfiguration", + "traits": { + "smithy.api#documentation": "The MemoryDB reserved instances configuration used for recommendations.
" + } + }, + "costCalculation": { + "target": "com.amazonaws.costoptimizationhub#ReservedInstancesCostCalculation" + } + }, + "traits": { + "smithy.api#documentation": "The MemoryDB reserved instances recommendation details.
\nMemoryDB reserved instances are referred to as \"MemoryDB reserved nodes\" in\n customer-facing documentation.
\nThe account scope for which you want recommendations.
" + } + }, + "service": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The service for which you want recommendations.
" + } + }, + "term": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The reserved instances recommendation term in years.
" + } + }, + "paymentOption": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The payment option for the commitment.
" + } + }, + "reservedInstancesRegion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The Amazon Web Services Region of the commitment.
" + } + }, + "upfrontCost": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "How much purchasing these reserved instances costs you upfront.
" + } + }, + "monthlyRecurringCost": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "How much purchasing these reserved instances costs you on a monthly basis.
" + } + }, + "normalizedUnitsToPurchase": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The number of normalized units that Amazon Web Services recommends that you\n purchase.
" + } + }, + "numberOfInstancesToPurchase": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The number of instances that Amazon Web Services recommends that you purchase.
" + } + }, + "instanceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The type of instance that Amazon Web Services recommends.
" + } + }, + "instanceFamily": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The instance family of the recommended reservation.
" + } + }, + "sizeFlexEligible": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Determines whether the recommendation is size flexible.
" + } + }, + "currentGeneration": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "Determines whether the recommendation is for a current generation instance.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The MemoryDB reserved instances configuration used for recommendations.
\nMemoryDB reserved instances are referred to as \"MemoryDB reserved nodes\" in\n customer-facing documentation.
\nThe account scope that you want your recommendations for.
" + "smithy.api#documentation": "The account scope for which you want recommendations.
" } }, "service": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The service that you want your recommendations for.
" - } - }, - "normalizedUnitsToPurchase": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "The number of normalized units that Amazon Web Services recommends that you\n purchase.
" + "smithy.api#documentation": "The service for which you want recommendations.
" } }, "term": { @@ -2339,46 +2527,52 @@ "smithy.api#documentation": "The payment option for the commitment.
" } }, - "numberOfInstancesToPurchase": { + "reservedInstancesRegion": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The number of instances that Amazon Web Services recommends that you purchase.
" + "smithy.api#documentation": "The Amazon Web Services Region of the commitment.
" } }, - "instanceType": { + "upfrontCost": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The type of instance that Amazon Web Services recommends.
" + "smithy.api#documentation": "How much purchasing this instance costs you upfront.
" } }, - "reservedInstancesRegion": { + "monthlyRecurringCost": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The Amazon Web Services Region of the commitment.
" + "smithy.api#documentation": "How much purchasing these reserved instances costs you on a monthly basis.
" } }, - "currentGeneration": { + "normalizedUnitsToPurchase": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "Determines whether the recommendation is for a current generation instance.
" + "smithy.api#documentation": "The number of normalized units that Amazon Web Services recommends that you\n purchase.
" } }, - "sizeFlexEligible": { - "target": "smithy.api#Boolean", + "numberOfInstancesToPurchase": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "Determines whether the recommendation is size flexible.
" + "smithy.api#documentation": "The number of instances that Amazon Web Services recommends that you purchase.
" } }, - "upfrontCost": { + "instanceType": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "How much purchasing this instance costs you upfront.
" + "smithy.api#documentation": "The type of instance that Amazon Web Services recommends.
" } }, - "monthlyRecurringCost": { + "currentGeneration": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "How much purchasing reserved instances costs you on a monthly basis.
" + "smithy.api#documentation": "Determines whether the recommendation is for a current generation instance.
" + } + }, + "sizeFlexEligible": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Determines whether the recommendation is size flexible.
" } } }, @@ -2529,19 +2723,13 @@ "accountScope": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The account scope that you want your recommendations for.
" + "smithy.api#documentation": "The account scope for which you want recommendations.
" } }, "service": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The service that you want your recommendations for.
" - } - }, - "normalizedUnitsToPurchase": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "The number of normalized units that Amazon Web Services recommends that you\n purchase.
" + "smithy.api#documentation": "The service for which you want recommendations.
" } }, "term": { @@ -2556,52 +2744,58 @@ "smithy.api#documentation": "The payment option for the commitment.
" } }, - "numberOfInstancesToPurchase": { + "reservedInstancesRegion": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The number of instances that Amazon Web Services recommends that you purchase.
" + "smithy.api#documentation": "The Amazon Web Services Region of the commitment.
" } }, - "instanceFamily": { + "upfrontCost": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The instance family of the recommended reservation.
" + "smithy.api#documentation": "How much purchasing this instance costs you upfront.
" } }, - "instanceType": { + "monthlyRecurringCost": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The type of instance that Amazon Web Services recommends.
" + "smithy.api#documentation": "How much purchasing this instance costs you on a monthly basis.
" } }, - "reservedInstancesRegion": { + "normalizedUnitsToPurchase": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The Amazon Web Services Region of the commitment.
" + "smithy.api#documentation": "The number of normalized units that Amazon Web Services recommends that you\n purchase.
" } }, - "sizeFlexEligible": { - "target": "smithy.api#Boolean", + "numberOfInstancesToPurchase": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "Determines whether the recommendation is size flexible.
" + "smithy.api#documentation": "The number of instances that Amazon Web Services recommends that you purchase.
" } }, - "currentGeneration": { + "instanceFamily": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "Determines whether the recommendation is for a current generation instance.
" + "smithy.api#documentation": "The instance family of the recommended reservation.
" } }, - "upfrontCost": { + "instanceType": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "How much purchasing this instance costs you upfront.
" + "smithy.api#documentation": "The type of instance that Amazon Web Services recommends.
" } }, - "monthlyRecurringCost": { + "sizeFlexEligible": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Determines whether the recommendation is size flexible.
" + } + }, + "currentGeneration": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "How much purchasing this instance costs you on a monthly basis.
" + "smithy.api#documentation": "Determines whether the recommendation is for a current generation instance.
" } }, "licenseModel": { @@ -2645,7 +2839,7 @@ "accountId": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The account that the recommendation is for.
" + "smithy.api#documentation": "The account to which the recommendation applies.
" } }, "region": { @@ -2843,19 +3037,13 @@ "accountScope": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The account scope that you want your recommendations for.
" + "smithy.api#documentation": "The account scope for which you want recommendations.
" } }, "service": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The service that you want your recommendations for.
" - } - }, - "normalizedUnitsToPurchase": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "The number of normalized units that Amazon Web Services recommends that you\n purchase.
" + "smithy.api#documentation": "The service for which you want recommendations.
" } }, "term": { @@ -2870,52 +3058,58 @@ "smithy.api#documentation": "The payment option for the commitment.
" } }, - "numberOfInstancesToPurchase": { + "reservedInstancesRegion": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The number of instances that Amazon Web Services recommends that you purchase.
" + "smithy.api#documentation": "The Amazon Web Services Region of the commitment.
" } }, - "instanceFamily": { + "upfrontCost": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The instance family of the recommended reservation.
" + "smithy.api#documentation": "How much purchasing this instance costs you upfront.
" } }, - "instanceType": { + "monthlyRecurringCost": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The type of instance that Amazon Web Services recommends.
" + "smithy.api#documentation": "How much purchasing these reserved instances costs you on a monthly basis.
" } }, - "reservedInstancesRegion": { + "normalizedUnitsToPurchase": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The Amazon Web Services Region of the commitment.
" + "smithy.api#documentation": "The number of normalized units that Amazon Web Services recommends that you\n purchase.
" } }, - "sizeFlexEligible": { - "target": "smithy.api#Boolean", + "numberOfInstancesToPurchase": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "Determines whether the recommendation is size flexible.
" + "smithy.api#documentation": "The number of instances that Amazon Web Services recommends that you purchase.
" } }, - "currentGeneration": { + "instanceFamily": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "Determines whether the recommendation is for a current generation instance.
" + "smithy.api#documentation": "The instance family of the recommended reservation.
" } }, - "upfrontCost": { + "instanceType": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "How much purchasing this instance costs you upfront.
" + "smithy.api#documentation": "The type of instance that Amazon Web Services recommends.
" } }, - "monthlyRecurringCost": { + "sizeFlexEligible": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Determines whether the recommendation is size flexible.
" + } + }, + "currentGeneration": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "How much purchasing reserved instances costs you on a monthly basis.
" + "smithy.api#documentation": "Determines whether the recommendation is for a current generation instance.
" } } }, @@ -3105,6 +3299,24 @@ "traits": { "smithy.api#documentation": "The DB instance storage recommendation details.
" } + }, + "dynamoDbReservedCapacity": { + "target": "com.amazonaws.costoptimizationhub#DynamoDbReservedCapacity", + "traits": { + "smithy.api#documentation": "The DynamoDB reserved capacity recommendation\n details.
", + "smithy.api#tags": [ + "db_recs" + ] + } + }, + "memoryDbReservedInstances": { + "target": "com.amazonaws.costoptimizationhub#MemoryDbReservedInstances", + "traits": { + "smithy.api#documentation": "The MemoryDB reserved instances recommendation\n details.
", + "smithy.api#tags": [ + "db_recs" + ] + } } }, "traits": { @@ -3270,6 +3482,24 @@ "traits": { "smithy.api#enumValue": "RdsDbInstance" } + }, + "DYNAMO_DB_RESERVED_CAPACITY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DynamoDbReservedCapacity", + "smithy.api#tags": [ + "db_recs" + ] + } + }, + "MEMORY_DB_RESERVED_INSTANCES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MemoryDbReservedInstances", + "smithy.api#tags": [ + "db_recs" + ] + } } } }, @@ -3311,7 +3541,7 @@ "accountScope": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "The account scope that you want your recommendations for.
" + "smithy.api#documentation": "The account scope for which you want recommendations.
" } }, "term": { diff --git a/codegen/sdk/aws-models/datazone.json b/codegen/sdk/aws-models/datazone.json index fe3e8c35d83..8f6be557661 100644 --- a/codegen/sdk/aws-models/datazone.json +++ b/codegen/sdk/aws-models/datazone.json @@ -1830,7 +1830,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 10 + "max": 20 } } }, @@ -28463,6 +28463,12 @@ "com.amazonaws.datazone#RuleAction": { "type": "enum", "members": { + "CREATE_LISTING_CHANGE_SET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATE_LISTING_CHANGE_SET" + } + }, "CREATE_SUBSCRIPTION_REQUEST": { "target": "smithy.api#Unit", "traits": { @@ -29768,6 +29774,13 @@ "traits": { "smithy.api#documentation": "The single sign-on user assignment in Amazon DataZone.
" } + }, + "idcInstanceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The ARN of the IDC instance.
", + "smithy.api#pattern": "arn:(aws|aws-us-gov|aws-cn|aws-iso|aws-iso-b):sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}" + } } }, "traits": { diff --git a/codegen/sdk/aws-models/deadline.json b/codegen/sdk/aws-models/deadline.json index 6ad647129c4..6b0996fafec 100644 --- a/codegen/sdk/aws-models/deadline.json +++ b/codegen/sdk/aws-models/deadline.json @@ -414,8 +414,7 @@ "taskId": { "target": "com.amazonaws.deadline#TaskId", "traits": { - "smithy.api#documentation": "The task ID.
", - "smithy.api#required": {} + "smithy.api#documentation": "The task ID.
" } }, "stepId": { @@ -1741,8 +1740,10 @@ ], "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { + "disableConditionKeyInheritance": true, "name": "budget" - } + }, + "smithy.api#documentation": "Represents a budget that limits the expenses allowed on a queue or farm" } }, "com.amazonaws.deadline#BudgetSchedule": { @@ -2658,7 +2659,7 @@ "priority": { "target": "com.amazonaws.deadline#JobPriority", "traits": { - "smithy.api#documentation": "The priority of the job on a scale of 0 to 100. The highest priority (first scheduled)\n is 100. When two jobs have the same priority, the oldest job is scheduled first.
", + "smithy.api#documentation": "The priority of the job. The highest priority (first scheduled) is 100. When two jobs\n have the same priority, the oldest job is scheduled first.
", "smithy.api#required": {} } }, @@ -3237,7 +3238,7 @@ "priority": { "target": "com.amazonaws.deadline#Priority", "traits": { - "smithy.api#documentation": "Sets the priority of the environments in the queue from 0 to 10,000, where 0 is the\n highest priority. If two environments share the same priority value, the environment\n created first takes higher priority.
", + "smithy.api#documentation": "Sets the priority of the environments in the queue from 0 to 10,000, where 0 is the\n highest priority (activated first and deactivated last). If two environments share the same\n priority value, the environment created first takes higher priority.
", "smithy.api#required": {} } }, @@ -3687,9 +3688,17 @@ } ], "traits": { + "aws.iam#conditionKeys": [ + "aws:RequestTag/${TagKey}", + "aws:TagKeys" + ], "aws.iam#iamAction": { "name": "CreateWorker", - "documentation": "Grants permission to create a worker" + "documentation": "Grants permission to create a worker", + "requiredActions": [ + "deadline:TagResource", + "deadline:ListTagsForResource" + ] }, "smithy.api#documentation": "Creates a worker. A worker tells your instance how much processing power (vCPU), and\n memory (GiB) you’ll need to assemble the digital assets held within a particular instance.\n You can specify certain instance types to use, or let the worker know which instances types\n to exclude.
", "smithy.api#endpoint": { @@ -6702,7 +6711,8 @@ "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { "name": "farm" - } + }, + "smithy.api#documentation": "Represents a farm that contains Deadline Cloud queues and fleets" } }, "com.amazonaws.deadline#FarmSummaries": { @@ -6824,8 +6834,7 @@ "min": 1, "max": 64 }, - "smithy.api#pattern": "^[0-9A-Za-z ]*$", - "smithy.api#sensitive": {} + "smithy.api#pattern": "^[0-9A-Za-z ]*$" } }, "com.amazonaws.deadline#FileSystemLocationType": { @@ -7122,7 +7131,8 @@ "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { "name": "fleet" - } + }, + "smithy.api#documentation": "Represents a fleet of workers that process Deadline Cloud jobs" } }, "com.amazonaws.deadline#FleetStatus": { @@ -9449,7 +9459,7 @@ "processExitCode": { "target": "com.amazonaws.deadline#ProcessExitCode", "traits": { - "smithy.api#documentation": "The exit code to exit the session.
" + "smithy.api#documentation": "The process exit code. The default Deadline Cloud worker agent converts unsigned\n 32-bit exit codes to signed 32-bit exit codes.
" } }, "progressMessage": { @@ -11205,7 +11215,13 @@ "com.amazonaws.deadline#JobParameters": { "type": "map", "key": { - "target": "com.amazonaws.deadline#String" + "target": "com.amazonaws.deadline#String", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } }, "value": { "target": "com.amazonaws.deadline#JobParameter" @@ -11315,8 +11331,10 @@ ], "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { + "disableConditionKeyInheritance": true, "name": "job" - } + }, + "smithy.api#documentation": "Represents the steps and tasks for a Deadline Cloud processing job" } }, "com.amazonaws.deadline#JobRunAsUser": { @@ -11711,7 +11729,8 @@ "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { "name": "license-endpoint" - } + }, + "smithy.api#documentation": "Represents a license endpoint that is for licensed software or a product used within Deadline Cloud" } }, "com.amazonaws.deadline#LicenseEndpointStatus": { @@ -14916,6 +14935,14 @@ "ids": { "licenseEndpointId": "resourceArn" } + }, + { + "resource": "com.amazonaws.deadline#WorkerResource", + "ids": { + "farmId": "resourceArn", + "fleetId": "resourceArn", + "workerId": "resourceArn" + } } ] } @@ -15466,8 +15493,10 @@ }, "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { + "disableConditionKeyInheritance": true, "name": "metered-product" - } + }, + "smithy.api#documentation": "Represents a product with a license endpoint that is metered for use in Deadline Cloud" } }, "com.amazonaws.deadline#MeteredProductSummary": { @@ -15576,7 +15605,8 @@ "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { "name": "monitor" - } + }, + "smithy.api#documentation": "Represents a monitor that is used to manage Deadline Cloud resources" } }, "com.amazonaws.deadline#MonitorSummaries": { @@ -16404,7 +16434,8 @@ "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { "name": "queue" - } + }, + "smithy.api#documentation": "Represents a queue of Deadline Cloud jobs" } }, "com.amazonaws.deadline#QueueStatus": { @@ -17227,12 +17258,36 @@ "smithy.api#documentation": "The term to search for.
", "smithy.api#required": {} } + }, + "matchType": { + "target": "com.amazonaws.deadline#SearchTermMatchingType", + "traits": { + "smithy.api#default": "FUZZY_MATCH", + "smithy.api#documentation": "Specifies how Deadline Cloud matches your search term in the results. If you don't\n specify a matchType the default is FUZZY_MATCH.
\n FUZZY_MATCH - Matches if a portion of the search term is found in the result.
\n CONTAINS - Matches if the exact search term is contained in the result.
Searches for a particular search term.
" } }, + "com.amazonaws.deadline#SearchTermMatchingType": { + "type": "enum", + "members": { + "FUZZY_MATCH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FUZZY_MATCH" + } + }, + "CONTAINS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONTAINS" + } + } + } + }, "com.amazonaws.deadline#SearchWorkers": { "type": "operation", "input": { @@ -19187,6 +19242,14 @@ "ids": { "licenseEndpointId": "resourceArn" } + }, + { + "resource": "com.amazonaws.deadline#WorkerResource", + "ids": { + "farmId": "resourceArn", + "fleetId": "resourceArn", + "workerId": "resourceArn" + } } ] } @@ -19273,8 +19336,7 @@ "taskId": { "target": "com.amazonaws.deadline#TaskId", "traits": { - "smithy.api#documentation": "The task ID.
", - "smithy.api#required": {} + "smithy.api#documentation": "The task ID.
" } }, "stepId": { @@ -19302,8 +19364,7 @@ "taskId": { "target": "com.amazonaws.deadline#TaskId", "traits": { - "smithy.api#documentation": "The task ID.
", - "smithy.api#required": {} + "smithy.api#documentation": "The task ID.
" } }, "stepId": { @@ -19784,6 +19845,14 @@ "ids": { "licenseEndpointId": "resourceArn" } + }, + { + "resource": "com.amazonaws.deadline#WorkerResource", + "ids": { + "farmId": "resourceArn", + "fleetId": "resourceArn", + "workerId": "resourceArn" + } } ] } @@ -21655,7 +21724,7 @@ "processExitCode": { "target": "com.amazonaws.deadline#ProcessExitCode", "traits": { - "smithy.api#documentation": "The process exit code.
" + "smithy.api#documentation": "The process exit code. The default Deadline Cloud worker agent converts unsigned\n 32-bit exit codes to signed 32-bit exit codes.
" } }, "progressMessage": { @@ -22189,7 +22258,8 @@ "aws.iam#disableConditionKeyInference": {}, "aws.iam#iamResource": { "name": "worker" - } + }, + "smithy.api#documentation": "Represents a worker that is part of a fleet on a farm" } }, "com.amazonaws.deadline#WorkerSearchSummaries": { diff --git a/codegen/sdk/aws-models/detective.json b/codegen/sdk/aws-models/detective.json index 2d7bcd3c209..f503ae133de 100644 --- a/codegen/sdk/aws-models/detective.json +++ b/codegen/sdk/aws-models/detective.json @@ -348,12 +348,6 @@ "smithy.rules#endpointRuleSet": { "version": "1.0", "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, "UseDualStack": { "builtIn": "AWS::UseDualStack", "required": true, @@ -373,6 +367,12 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" } }, "rules": [ @@ -404,152 +404,158 @@ "type": "error" }, { - "conditions": [ + "conditions": [], + "rules": [ { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, - true - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree" } ], "type": "tree" }, { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], + "conditions": [], "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { "ref": "Region" } - ], - "assign": "PartitionResult" + ] } ], "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false ] }, { "fn": "booleanEquals", "argv": [ - true, + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://detective.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws" ] - } - ], - "rules": [ + }, { - "conditions": [], - "endpoint": { - "url": "https://api.detective-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "type": "tree" + "endpoint": { + "url": "https://detective-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ { "fn": "getAttr", @@ -557,316 +563,376 @@ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] }, - true + "aws-us-gov" ] - } - ], - "rules": [ + }, { - "conditions": [], - "endpoint": { - "url": "https://api.detective-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "type": "tree" + "endpoint": { + "url": "https://detective.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws-us-gov" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://detective-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://api.detective-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://api.detective.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ], "type": "tree" }, { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [], - "endpoint": { - "url": "https://api.detective.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] - }, - "smithy.rules#endpointTests": { - "testCases": [ - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.af-south-1.amazonaws.com" - } - }, - "params": { - "Region": "af-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-east-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-south-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.ca-central-1.amazonaws.com" - } - }, - "params": { - "Region": "ca-central-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.eu-central-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.eu-north-1.amazonaws.com" + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://api.detective-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://api.detective.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "endpoint": { + "url": "https://api.detective.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } - }, - "params": { - "Region": "eu-north-1", - "UseFIPS": false, - "UseDualStack": false - } - }, + ], + "type": "tree" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region not set and fips disabled", "expect": { "endpoint": { - "url": "https://api.detective.eu-south-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "Region": "eu-south-1", - "UseFIPS": false, - "UseDualStack": false + "Endpoint": "https://example.com", + "UseFIPS": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with fips enabled", "expect": { - "endpoint": { - "url": "https://api.detective.eu-west-1.amazonaws.com" - } + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "Region": "eu-west-1", - "UseFIPS": false, - "UseDualStack": false + "Endpoint": "https://example.com", + "UseFIPS": true } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with fips disabled and dualstack enabled", "expect": { - "endpoint": { - "url": "https://api.detective.eu-west-2.amazonaws.com" - } + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "Region": "eu-west-2", + "Endpoint": "https://example.com", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.detective.eu-west-3.amazonaws.com" + "url": "https://detective-fips.us-east-1.api.aws" } }, "params": { - "Region": "eu-west-3", - "UseFIPS": false, - "UseDualStack": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.me-south-1.amazonaws.com" + "url": "https://api.detective-fips.us-east-1.amazonaws.com" } }, "params": { - "Region": "me-south-1", - "UseFIPS": false, + "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.detective.sa-east-1.amazonaws.com" + "url": "https://detective.us-east-1.api.aws" } }, "params": { - "Region": "sa-east-1", + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { @@ -883,399 +949,301 @@ } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.us-east-1.amazonaws.com" + "url": "https://api.detective-fips.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "us-east-1", + "Region": "cn-northwest-1", "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.us-east-2.amazonaws.com" - } - }, - "params": { - "Region": "us-east-2", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.us-east-2.amazonaws.com" + "url": "https://api.detective-fips.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "us-east-2", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.detective.us-west-1.amazonaws.com" + "url": "https://api.detective.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "us-west-1", + "Region": "cn-northwest-1", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.us-west-1.amazonaws.com" + "url": "https://api.detective.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "us-west-1", - "UseFIPS": true, + "Region": "cn-northwest-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.detective.us-west-2.amazonaws.com" + "url": "https://detective-fips.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-west-2", - "UseFIPS": false, - "UseDualStack": false + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.us-west-2.amazonaws.com" + "url": "https://api.detective-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-west-2", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.us-east-1.api.aws" + "url": "https://detective.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-east-1", - "UseFIPS": true, + "Region": "us-gov-west-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.us-east-1.api.aws" + "url": "https://api.detective.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://api.detective-fips.cn-north-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "cn-north-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.cn-north-1.amazonaws.com.cn" + "url": "https://api.detective-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "cn-north-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://api.detective.cn-north-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "cn-north-1", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.us-gov-east-1.amazonaws.com" + "url": "https://api.detective.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://api.detective-fips.us-gov-east-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-gov-east-1", + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://api.detective.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.us-gov-west-1.amazonaws.com" + "url": "https://api.detective-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "us-gov-west-1", + "Region": "us-isob-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://api.detective-fips.us-gov-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, + "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.us-gov-east-1.api.aws" + "url": "https://api.detective.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", "expect": { "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-iso-east-1", + "Region": "eu-isoe-west-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://api.detective-fips.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-iso-east-1", + "Region": "eu-isoe-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", "expect": { "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-iso-east-1", + "Region": "eu-isoe-west-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.us-iso-east-1.c2s.ic.gov" + "url": "https://api.detective.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-iso-east-1", + "Region": "eu-isoe-west-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", "expect": { "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-isob-east-1", + "Region": "us-isof-south-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective-fips.us-isob-east-1.sc2s.sgov.gov" + "url": "https://api.detective-fips.us-isof-south-1.csp.hci.ic.gov" } }, "params": { - "Region": "us-isob-east-1", + "Region": "us-isof-south-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", "expect": { "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-isob-east-1", + "Region": "us-isof-south-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.detective.us-isob-east-1.sc2s.sgov.gov" + "url": "https://api.detective.us-isof-south-1.csp.hci.ic.gov" } }, "params": { - "Region": "us-isob-east-1", + "Region": "us-isof-south-1", "UseFIPS": false, "UseDualStack": false } }, - { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", - "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", - "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" - } - }, { "documentation": "Missing region", "expect": { diff --git a/codegen/sdk/aws-models/direct-connect.json b/codegen/sdk/aws-models/direct-connect.json index 20932898b8e..d2493a3cc9e 100644 --- a/codegen/sdk/aws-models/direct-connect.json +++ b/codegen/sdk/aws-models/direct-connect.json @@ -664,7 +664,7 @@ "id": { "target": "com.amazonaws.directconnect#CoreNetworkIdentifier", "traits": { - "smithy.api#documentation": "The ID of the Cloud WAN core network.
" + "smithy.api#documentation": "The ID of the Cloud WAN core network that the Direct Connect gateway is associated to.
" } }, "ownerAccount": { @@ -676,12 +676,12 @@ "attachmentId": { "target": "com.amazonaws.directconnect#CoreNetworkAttachmentId", "traits": { - "smithy.api#documentation": "the ID of the Direct Connect attachment
" + "smithy.api#documentation": "the ID of the Direct Connect gateway attachment.
" } } }, "traits": { - "smithy.api#documentation": "The Amazon Web Services Cloud WAN core network that the Direct Connect attachment is associated with.
" + "smithy.api#documentation": "The Amazon Web Services Cloud WAN core network that the Direct Connect gateway is associated to. This is only returned when a Direct Connect gateway is associated to a Cloud WAN core network.
" } }, "com.amazonaws.directconnect#AssociatedGateway": { @@ -1056,7 +1056,7 @@ "virtualInterfaceState": { "target": "com.amazonaws.directconnect#VirtualInterfaceState", "traits": { - "smithy.api#documentation": "The state of the virtual interface. The following are the possible values:
\n\n confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.
\n verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.
\n pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.
\n available: A virtual interface that is able to forward traffic.
\n down: A virtual interface that is BGP down.
\n deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.
\n deleted: A virtual interface that cannot forward traffic.
\n rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the Confirming state is deleted by the virtual interface owner, the virtual interface enters the Rejected state.
\n unknown: The state of the virtual interface is not available.
The state of the virtual interface. The following are the possible values:
\n\n confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.
\n verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.
\n pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.
\n available: A virtual interface that is able to forward traffic.
\n down: A virtual interface that is BGP down.
\n testing: A virtual interface is in this state immediately after calling StartBgpFailoverTest and remains in this state during the duration of the test.
\n deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.
\n deleted: A virtual interface that cannot forward traffic.
\n rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the Confirming state is deleted by the virtual interface owner, the virtual interface enters the Rejected state.
\n unknown: The state of the virtual interface is not available.
The state of the virtual interface. The following are the possible values:
\n\n confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.
\n verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.
\n pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.
\n available: A virtual interface that is able to forward traffic.
\n down: A virtual interface that is BGP down.
\n deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.
\n deleted: A virtual interface that cannot forward traffic.
\n rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the Confirming state is deleted by the virtual interface owner, the virtual interface enters the Rejected state.
\n unknown: The state of the virtual interface is not available.
The state of the virtual interface. The following are the possible values:
\n\n confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.
\n verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.
\n pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.
\n available: A virtual interface that is able to forward traffic.
\n down: A virtual interface that is BGP down.
\n testing: A virtual interface is in this state immediately after calling StartBgpFailoverTest and remains in this state during the duration of the test.
\n deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.
\n deleted: A virtual interface that cannot forward traffic.
\n rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the Confirming state is deleted by the virtual interface owner, the virtual interface enters the Rejected state.
\n unknown: The state of the virtual interface is not available.
The state of the virtual interface. The following are the possible values:
\n\n confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.
\n verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.
\n pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.
\n available: A virtual interface that is able to forward traffic.
\n down: A virtual interface that is BGP down.
\n deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.
\n deleted: A virtual interface that cannot forward traffic.
\n rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the Confirming state is deleted by the virtual interface owner, the virtual interface enters the Rejected state.
\n unknown: The state of the virtual interface is not available.
The state of the virtual interface. The following are the possible values:
\n\n confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.
\n verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.
\n pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.
\n available: A virtual interface that is able to forward traffic.
\n down: A virtual interface that is BGP down.
\n testing: A virtual interface is in this state immediately after calling StartBgpFailoverTest and remains in this state during the duration of the test.
\n deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.
\n deleted: A virtual interface that cannot forward traffic.
\n rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the Confirming state is deleted by the virtual interface owner, the virtual interface enters the Rejected state.
\n unknown: The state of the virtual interface is not available.
The key-value pair tags associated with the request.
" + } + }, "amazonSideAsn": { "target": "com.amazonaws.directconnect#LongAsn", "traits": { @@ -2480,7 +2486,7 @@ "virtualInterfaceState": { "target": "com.amazonaws.directconnect#VirtualInterfaceState", "traits": { - "smithy.api#documentation": "The state of the virtual interface. The following are the possible values:
\n\n confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.
\n verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.
\n pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.
\n available: A virtual interface that is able to forward traffic.
\n down: A virtual interface that is BGP down.
\n deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.
\n deleted: A virtual interface that cannot forward traffic.
\n rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the Confirming state is deleted by the virtual interface owner, the virtual interface enters the Rejected state.
\n unknown: The state of the virtual interface is not available.
The state of the virtual interface. The following are the possible values:
\n\n confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.
\n verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.
\n pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.
\n available: A virtual interface that is able to forward traffic.
\n down: A virtual interface that is BGP down.
\n testing: A virtual interface is in this state immediately after calling StartBgpFailoverTest and remains in this state during the duration of the test.
\n deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.
\n deleted: A virtual interface that cannot forward traffic.
\n rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the Confirming state is deleted by the virtual interface owner, the virtual interface enters the Rejected state.
\n unknown: The state of the virtual interface is not available.
Lists the associations between your Direct Connect gateways and virtual private gateways and transit gateways. You must specify one of the following:
\nA Direct Connect gateway
\nThe response contains all virtual private gateways and transit gateways associated with the Direct Connect gateway.
\nA virtual private gateway
\nThe response contains the Direct Connect gateway.
\nA transit gateway
\nThe response contains the Direct Connect gateway.
\nA Direct Connect gateway and a virtual private gateway
\nThe response contains the association between the Direct Connect gateway and virtual private gateway.
\nA Direct Connect gateway and a transit gateway
\nThe response contains the association between the Direct Connect gateway and transit gateway.
\nLists the associations between your Direct Connect gateways and virtual private gateways and transit gateways. You must specify one of the following:
\nA Direct Connect gateway
\nThe response contains all virtual private gateways and transit gateways associated with the Direct Connect gateway.
\nA virtual private gateway
\nThe response contains the Direct Connect gateway.
\nA transit gateway
\nThe response contains the Direct Connect gateway.
\nA Direct Connect gateway and a virtual private gateway
\nThe response contains the association between the Direct Connect gateway and virtual private gateway.
\nA Direct Connect gateway and a transit gateway
\nThe response contains the association between the Direct Connect gateway and transit gateway.
\nA Direct Connect gateway and a virtual private gateway
\nThe response contains the association between the Direct Connect gateway and virtual private gateway.
\nA Direct Connect gateway association to a Cloud WAN core network
\nThe response contains the Cloud WAN core network ID that the Direct Connect gateway is associated to.
\nThe error message if the state of an object failed to advance.
" } + }, + "tags": { + "target": "com.amazonaws.directconnect#TagList", + "traits": { + "smithy.api#documentation": "Information about a tag.
" + } } }, "traits": { @@ -3505,7 +3517,7 @@ "associatedCoreNetwork": { "target": "com.amazonaws.directconnect#AssociatedCoreNetwork", "traits": { - "smithy.api#documentation": "The ID of the Cloud WAN core network associated with the Direct Connect attachment.
" + "smithy.api#documentation": "The ID of the Cloud WAN core network associated with the Direct Connect gateway attachment.
" } }, "virtualGatewayId": { @@ -7159,7 +7171,7 @@ "virtualInterfaceState": { "target": "com.amazonaws.directconnect#VirtualInterfaceState", "traits": { - "smithy.api#documentation": "The state of the virtual interface. The following are the possible values:
\n\n confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.
\n verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.
\n pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.
\n available: A virtual interface that is able to forward traffic.
\n down: A virtual interface that is BGP down.
\n deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.
\n deleted: A virtual interface that cannot forward traffic.
\n rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the Confirming state is deleted by the virtual interface owner, the virtual interface enters the Rejected state.
\n unknown: The state of the virtual interface is not available.
The state of the virtual interface. The following are the possible values:
\n\n confirming: The creation of the virtual interface is pending confirmation from the virtual interface owner. If the owner of the virtual interface is different from the owner of the connection on which it is provisioned, then the virtual interface will remain in this state until it is confirmed by the virtual interface owner.
\n verifying: This state only applies to public virtual interfaces. Each public virtual interface needs validation before the virtual interface can be created.
\n pending: A virtual interface is in this state from the time that it is created until the virtual interface is ready to forward traffic.
\n available: A virtual interface that is able to forward traffic.
\n down: A virtual interface that is BGP down.
\n testing: A virtual interface is in this state immediately after calling StartBgpFailoverTest and remains in this state during the duration of the test.
\n deleting: A virtual interface is in this state immediately after calling DeleteVirtualInterface until it can no longer forward traffic.
\n deleted: A virtual interface that cannot forward traffic.
\n rejected: The virtual interface owner has declined creation of the virtual interface. If a virtual interface in the Confirming state is deleted by the virtual interface owner, the virtual interface enters the Rejected state.
\n unknown: The state of the virtual interface is not available.
Adds an existing user, group, or computer as a group member.
", + "smithy.api#examples": [ + { + "title": "To add a member to the Marketing group", + "documentation": "The following command adds an existing user to the Marketing group in the europe.example.com domain.", + "input": { + "ClientToken": "550e8400-e29b-41d4-a716-446655440000", + "DirectoryId": "d-12233abcde", + "GroupName": "Marketing", + "MemberName": "Pat Candella", + "MemberRealm": "europe.example.com" + }, + "output": {} + } + ], "smithy.api#http": { "uri": "/GroupMemberships/AddGroupMember", "method": "POST" @@ -238,6 +252,32 @@ ], "traits": { "smithy.api#documentation": "Creates a new group.
", + "smithy.api#examples": [ + { + "title": "To create a group", + "documentation": "The following command creates a distribution list group named AcctngMail.", + "input": { + "ClientToken": "550e8400-e29b-41d4-a716-446655440000", + "DirectoryId": "d-12233abcde", + "GroupScope": "DomainLocal", + "GroupType": "Distribution", + "OtherAttributes": { + "displayName": { + "S": "Acctng-mailing-list" + }, + "description": { + "S": "Accounting dept mailing list" + } + }, + "SAMAccountName": "AcctngMail" + }, + "output": { + "DirectoryId": "d-12233abcde", + "SAMAccountName": "AcctngMail", + "SID": "S-1-5-33-123" + } + } + ], "smithy.api#http": { "uri": "/Groups/CreateGroup", "method": "POST" @@ -348,6 +388,33 @@ ], "traits": { "smithy.api#documentation": "Creates a new user.
", + "smithy.api#examples": [ + { + "title": "To create a new user in the directory", + "documentation": "The following command", + "input": { + "ClientToken": "550e8400-e29b-41d4-a716-446655440000", + "DirectoryId": "d-12233abcde", + "EmailAddress": "pcandella@exampledomain.com", + "GivenName": "Pat Candella", + "OtherAttributes": { + "department": { + "S": "HR" + }, + "homePhone": { + "S": "212-555-0100" + } + }, + "SAMAccountName": "pcandella", + "Surname": "Candella" + }, + "output": { + "DirectoryId": "d-12233abcde", + "SAMAccountName": "pcandella", + "SID": "S-1-5-99-789" + } + } + ], "smithy.api#http": { "uri": "/Users/CreateUser", "method": "POST" @@ -467,6 +534,18 @@ ], "traits": { "smithy.api#documentation": "Deletes a group.
", + "smithy.api#examples": [ + { + "title": "To delete a group", + "documentation": "The following command deletes the marketing group from the specified directory.", + "input": { + "ClientToken": "550e8400-e29b-41d4-a716-446655440000", + "DirectoryId": "d-12233abcde", + "SAMAccountName": "marketing" + }, + "output": {} + } + ], "smithy.api#http": { "uri": "/Groups/DeleteGroup", "method": "POST" @@ -543,6 +622,18 @@ ], "traits": { "smithy.api#documentation": "Deletes a user.
", + "smithy.api#examples": [ + { + "title": "To delete a user", + "documentation": "The following command deletes a group from the directory.", + "input": { + "ClientToken": "550e8400-e29b-41d4-a716-446655440000", + "DirectoryId": "d-12233abcde", + "SAMAccountName": "pcandella" + }, + "output": {} + } + ], "smithy.api#http": { "uri": "/Users/DeleteUser", "method": "POST" @@ -616,6 +707,41 @@ ], "traits": { "smithy.api#documentation": "Returns information about a specific group.
", + "smithy.api#examples": [ + { + "title": "To return the attributes of a group", + "documentation": "The following command returns the mapped attributes for a group along with the display name, description, and GUID for the group.", + "input": { + "DirectoryId": "d-12233abcde", + "OtherAttributes": [ + "displayName", + "description", + "objectGUID" + ], + "Realm": "example.domain.com", + "SAMAccountName": "DevOpsMail" + }, + "output": { + "DirectoryId": "d-12233abcde", + "DistinguishedName": "DevOpsmail", + "GroupScope": "Global", + "GroupType": "Distribution", + "OtherAttributes": { + "displayName": { + "S": "DevOps mailing list" + }, + "description": { + "S": "A group for DevOps email." + }, + "objectGUID": { + "S": "123456789" + } + }, + "SAMAccountName": "DevOpsMail", + "SID": "S-1-5-55-678" + } + } + ], "smithy.api#http": { "uri": "/Groups/DescribeGroup", "method": "POST" @@ -668,7 +794,7 @@ "OtherAttributes": { "target": "com.amazonaws.directoryservicedata#LdapDisplayNameList", "traits": { - "smithy.api#documentation": "One or more attributes to be returned for the group. For a list of supported attributes,\n see Directory Service Data Attributes.\n
" + "smithy.api#documentation": "One or more attributes to be returned for the group. For a list of supported attributes,\n see Directory Service Data Attributes.\n
" } } }, @@ -762,6 +888,48 @@ ], "traits": { "smithy.api#documentation": "Returns information about a specific user.
", + "smithy.api#examples": [ + { + "title": "To return the attributes of a user", + "documentation": "The following command returns the mapped attributes for a user along with the department, manager, IP phone, and date the user last set a password.", + "input": { + "DirectoryId": "d-12233abcde", + "OtherAttributes": [ + "department", + "manager", + "ipPhone", + "pwdLastSet" + ], + "Realm": "examplecorp.com", + "SAMAccountName": "twhitlock" + }, + "output": { + "DirectoryId": "d-12233abcde", + "DistinguishedName": "Terry Whitlock", + "EmailAddress": "terry.whitlock@examplecorp.com", + "Enabled": true, + "GivenName": "Terry Whitlock", + "OtherAttributes": { + "department": { + "S": "communications" + }, + "manager": { + "S": "OU=Users,DC=mmajors" + }, + "ipPhone": { + "S": "111.111.111.111" + }, + "pwdLastSet": { + "N": 0 + } + }, + "SAMAccountName": "twhitlock", + "SID": "123-456-7890", + "Surname": "Whitlock", + "UserPrincipalName": "terry.whitlock" + } + } + ], "smithy.api#http": { "uri": "/Users/DescribeUser", "method": "POST" @@ -858,7 +1026,7 @@ "UserPrincipalName": { "target": "com.amazonaws.directoryservicedata#UserPrincipalName", "traits": { - "smithy.api#documentation": "The UPN that is an Internet-style login name for a user and is based on the Internet\n standard RFC 822. The UPN is shorter\n than the distinguished name and easier to remember.
" + "smithy.api#documentation": "The UPN that is an Internet-style login name for a user and is based on the Internet\n standard RFC 822. The UPN is shorter\n than the distinguished name and easier to remember.
" } }, "EmailAddress": { @@ -1729,6 +1897,18 @@ ], "traits": { "smithy.api#documentation": "Deactivates an active user account. For information about how to enable an inactive user\n account, see ResetUserPassword\n in the Directory Service API Reference.
", + "smithy.api#examples": [ + { + "title": "To disable a user account", + "documentation": "The following command disables the account for twhitlock.", + "input": { + "ClientToken": "550e8400-e29b-41d4-a716-446655440000", + "DirectoryId": "d-12233abcde", + "SAMAccountName": "twhitlock" + }, + "output": {} + } + ], "smithy.api#http": { "uri": "/Users/DisableUser", "method": "POST" @@ -2020,6 +2200,38 @@ ], "traits": { "smithy.api#documentation": "Returns member information for the specified group.
\n This operation supports pagination with the use of the NextToken request and\n response parameters. If more results are available, the\n ListGroupMembers.NextToken member contains a token that you pass in the next\n call to ListGroupMembers. This retrieves the next set of items.
You can also specify a maximum number of return results with the MaxResults\n parameter.
Returns group information for the specified directory.
\n This operation supports pagination with the use of the NextToken request and\n response parameters. If more results are available, the ListGroups.NextToken\n member contains a token that you pass in the next call to ListGroups. This\n retrieves the next set of items.
You can also specify a maximum number of return results with the MaxResults\n parameter.
Returns group information for the specified member.
\n This operation supports pagination with the use of the NextToken request and\n response parameters. If more results are available, the\n ListGroupsForMember.NextToken member contains a token that you pass in the next\n call to ListGroupsForMember. This retrieves the next set of items.
You can also specify a maximum number of return results with the MaxResults\n parameter.
Returns user information for the specified directory.
\n This operation supports pagination with the use of the NextToken request and\n response parameters. If more results are available, the ListUsers.NextToken\n member contains a token that you pass in the next call to ListUsers. This\n retrieves the next set of items.
You can also specify a maximum number of return results with the MaxResults\n parameter.
Removes a member from a group.
", + "smithy.api#examples": [ + { + "title": "To remove a member from a group", + "documentation": "The following command removes the specified member from the example.local domain.", + "input": { + "ClientToken": "550e8400-e29b-41d4-a716-446655440000", + "DirectoryId": "d-12233abcde", + "GroupName": "DevOps", + "MemberName": "Pat Candella", + "MemberRealm": "example.local" + }, + "output": {} + } + ], "smithy.api#http": { "uri": "/GroupMemberships/RemoveGroupMember", "method": "POST" @@ -2699,6 +3014,40 @@ ], "traits": { "smithy.api#documentation": " Searches the specified directory for a group. You can find groups that match the\n SearchString parameter with the value of their attributes included in the\n SearchString parameter.
This operation supports pagination with the use of the NextToken request and\n response parameters. If more results are available, the SearchGroups.NextToken\n member contains a token that you pass in the next call to SearchGroups. This\n retrieves the next set of items.
You can also specify a maximum number of return results with the MaxResults\n parameter.
Searches the specified directory for a user. You can find users that match the\n SearchString parameter with the value of their attributes included in the\n SearchString parameter.
This operation supports pagination with the use of the NextToken request and\n response parameters. If more results are available, the SearchUsers.NextToken\n member contains a token that you pass in the next call to SearchUsers. This\n retrieves the next set of items.
You can also specify a maximum number of return results with the MaxResults\n parameter.
Updates group information.
", + "smithy.api#examples": [ + { + "title": "To update a group", + "documentation": "The following command updates the preferred language and country attributes for the GuestsLocal group.", + "input": { + "ClientToken": "550e8400-e29b-41d4-a716-446655440000", + "DirectoryId": "d-12233abcde", + "GroupScope": "Global", + "GroupType": "Security", + "OtherAttributes": { + "preferredLanguage": { + "S": "English" + }, + "co": { + "S": "US" + } + }, + "SAMAccountName": "GuestsLocal", + "UpdateType": "REPLACE" + }, + "output": {} + } + ], "smithy.api#http": { "uri": "/Groups/UpdateGroup", "method": "POST" @@ -3137,6 +3544,45 @@ ], "traits": { "smithy.api#documentation": "Updates user information.
", + "smithy.api#examples": [ + { + "title": "To update user attributes", + "documentation": "The following command", + "input": { + "ClientToken": "550e8400-e29b-41d4-a716-446655440000", + "DirectoryId": "d-12233abcde", + "EmailAddress": "twhitlock@examplecorp.com", + "GivenName": "Terry", + "OtherAttributes": { + "telephoneNumber": { + "S": "212-555-1111" + }, + "homePhone": { + "S": "333-333-3333" + }, + "physicalDeliveryOfficeName": { + "S": "Example Company" + }, + "streetAddress": { + "S": "123 Any Street" + }, + "postalCode": { + "S": "54321" + }, + "st": { + "S": "WA" + }, + "co": { + "S": "US" + } + }, + "SAMAccountName": "twhitlock", + "Surname": "Whitlock", + "UpdateType": "ADD" + }, + "output": {} + } + ], "smithy.api#http": { "uri": "/Users/UpdateUser", "method": "POST" @@ -3182,7 +3628,7 @@ "OtherAttributes": { "target": "com.amazonaws.directoryservicedata#Attributes", "traits": { - "smithy.api#documentation": "An expression that defines one or more attribute names with the data type and value of\n each attribute. A key is an attribute name, and the value is a list of maps. For a list of\n supported attributes, see Directory Service Data Attributes.
\nAttribute names are case insensitive.
\nAn expression that defines one or more attribute names with the data type and value of\n each attribute. A key is an attribute name, and the value is a list of maps. For a list of\n supported attributes, see Directory Service Data Attributes.
\nAttribute names are case insensitive.
\nThe UPN that is an internet-style login name for a user and based on the internet\n standard RFC 822. The UPN is shorter\n than the distinguished name and easier to remember.
" + "smithy.api#documentation": "The UPN that is an internet-style login name for a user and based on the internet\n standard RFC 822. The UPN is shorter\n than the distinguished name and easier to remember.
" } }, "EmailAddress": { diff --git a/codegen/sdk/aws-models/dynamodb.json b/codegen/sdk/aws-models/dynamodb.json index eebce04d13c..7114bc3d72f 100644 --- a/codegen/sdk/aws-models/dynamodb.json +++ b/codegen/sdk/aws-models/dynamodb.json @@ -788,7 +788,7 @@ } ], "traits": { - "smithy.api#documentation": "This operation allows you to perform batch reads or writes on data stored in DynamoDB,\n using PartiQL. Each read statement in a BatchExecuteStatement must specify\n an equality condition on all key attributes. This enforces that each SELECT\n statement in a batch returns at most a single item. For more information, see Running batch operations with PartiQL for DynamoDB\n .
The entire batch must consist of either read statements or write statements, you\n cannot mix both in one batch.
\nA HTTP 200 response does not mean that all statements in the BatchExecuteStatement\n succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse for each\n statement.
This operation allows you to perform batch reads or writes on data stored in DynamoDB,\n using PartiQL. Each read statement in a BatchExecuteStatement must specify\n an equality condition on all key attributes. This enforces that each SELECT\n statement in a batch returns at most a single item. For more information, see Running batch operations with PartiQL for DynamoDB .
The entire batch must consist of either read statements or write statements, you\n cannot mix both in one batch.
\nA HTTP 200 response does not mean that all statements in the BatchExecuteStatement\n succeeded. Error details for individual statements can be found under the Error field of the BatchStatementResponse for each\n statement.
The BatchGetItem operation returns the attributes of one or more items\n from one or more tables. You identify requested items by primary key.
A single operation can retrieve up to 16 MB of data, which can contain as many as 100\n items. BatchGetItem returns a partial result if the response size limit is\n exceeded, the table's provisioned throughput is exceeded, more than 1MB per partition is\n requested, or an internal processing failure occurs. If a partial result is returned,\n the operation returns a value for UnprocessedKeys. You can use this value\n to retry the operation starting with the next item to get.
If you request more than 100 items, BatchGetItem returns a\n ValidationException with the message \"Too many items requested for\n the BatchGetItem call.\"
For example, if you ask to retrieve 100 items, but each individual item is 300 KB in\n size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns\n an appropriate UnprocessedKeys value so you can get the next page of\n results. If desired, your application can include its own logic to assemble the pages of\n results into one dataset.
If none of the items can be processed due to insufficient\n provisioned throughput on all of the tables in the request, then\n BatchGetItem returns a\n ProvisionedThroughputExceededException. If at least\n one of the items is successfully processed, then\n BatchGetItem completes successfully, while returning the keys of the\n unread items in UnprocessedKeys.
If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.
\nFor more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.
\nBy default, BatchGetItem performs eventually consistent reads on every\n table in the request. If you want strongly consistent reads instead, you can set\n ConsistentRead to true for any or all tables.
In order to minimize response latency, BatchGetItem may retrieve items in\n parallel.
When designing your application, keep in mind that DynamoDB does not return items in\n any particular order. To help parse the response by item, include the primary key values\n for the items in your request in the ProjectionExpression parameter.
If a requested item does not exist, it is not returned in the result. Requests for\n nonexistent items consume the minimum read capacity units according to the type of read.\n For more information, see Working with Tables in the Amazon DynamoDB Developer\n Guide.
", + "smithy.api#documentation": "The BatchGetItem operation returns the attributes of one or more items\n from one or more tables. You identify requested items by primary key.
A single operation can retrieve up to 16 MB of data, which can contain as many as 100\n items. BatchGetItem returns a partial result if the response size limit is\n exceeded, the table's provisioned throughput is exceeded, more than 1MB per partition is\n requested, or an internal processing failure occurs. If a partial result is returned,\n the operation returns a value for UnprocessedKeys. You can use this value\n to retry the operation starting with the next item to get.
If you request more than 100 items, BatchGetItem returns a\n ValidationException with the message \"Too many items requested for\n the BatchGetItem call.\"
For example, if you ask to retrieve 100 items, but each individual item is 300 KB in\n size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns\n an appropriate UnprocessedKeys value so you can get the next page of\n results. If desired, your application can include its own logic to assemble the pages of\n results into one dataset.
If none of the items can be processed due to insufficient\n provisioned throughput on all of the tables in the request, then\n BatchGetItem returns a\n ProvisionedThroughputExceededException. If at least\n one of the items is successfully processed, then\n BatchGetItem completes successfully, while returning the keys of the\n unread items in UnprocessedKeys.
If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.
\nFor more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.
\nBy default, BatchGetItem performs eventually consistent reads on every\n table in the request. If you want strongly consistent reads instead, you can set\n ConsistentRead to true for any or all tables.
In order to minimize response latency, BatchGetItem may retrieve items in\n parallel.
When designing your application, keep in mind that DynamoDB does not return items in\n any particular order. To help parse the response by item, include the primary key values\n for the items in your request in the ProjectionExpression parameter.
If a requested item does not exist, it is not returned in the result. Requests for\n nonexistent items consume the minimum read capacity units according to the type of read.\n For more information, see Working with Tables in the Amazon DynamoDB Developer\n Guide.
\n\n BatchGetItem will result in a ValidationException if the\n same key is specified multiple times.
A condition specified in the operation could not be evaluated.
", + "smithy.api#documentation": "A condition specified in the operation failed to be evaluated.
", "smithy.api#error": "client" } }, @@ -1984,7 +1984,7 @@ "OnDemandThroughput": { "target": "com.amazonaws.dynamodb#OnDemandThroughput", "traits": { - "smithy.api#documentation": "The maximum number of read and write units for the global secondary index being\n created. If you use this parameter, you must specify MaxReadRequestUnits,\n MaxWriteRequestUnits, or both.
The maximum number of read and write units for the global secondary index being\n created. If you use this parameter, you must specify MaxReadRequestUnits,\n MaxWriteRequestUnits, or both. You must use either\n OnDemand Throughput or ProvisionedThroughput based on your table's\n capacity mode.
One or more local secondary indexes (the maximum is 5) to be created on the table.\n Each index is scoped to a given partition key value. There is a 10 GB size limit per\n partition key value; otherwise, the size of a local secondary index is\n unconstrained.
\nEach local secondary index in the array includes the following:
\n\n IndexName - The name of the local secondary index. Must be unique\n only for this table.
\n KeySchema - Specifies the key schema for the local secondary index.\n The key schema must begin with the same partition key as the table.
\n Projection - Specifies attributes that are copied (projected) from\n the table into the index. These are in addition to the primary key attributes\n and index key attributes, which are automatically projected. Each attribute\n specification is composed of:
\n ProjectionType - One of the following:
\n KEYS_ONLY - Only the index and primary keys are\n projected into the index.
\n INCLUDE - Only the specified table attributes are\n projected into the index. The list of projected attributes is in\n NonKeyAttributes.
\n ALL - All of the table attributes are projected\n into the index.
\n NonKeyAttributes - A list of one or more non-key attribute\n names that are projected into the secondary index. The total count of\n attributes provided in NonKeyAttributes, summed across all\n of the secondary indexes, must not exceed 100. If you project the same\n attribute into two different indexes, this counts as two distinct\n attributes when determining the total.
One or more local secondary indexes (the maximum is 5) to be created on the table.\n Each index is scoped to a given partition key value. There is a 10 GB size limit per\n partition key value; otherwise, the size of a local secondary index is\n unconstrained.
\nEach local secondary index in the array includes the following:
\n\n IndexName - The name of the local secondary index. Must be unique\n only for this table.
\n KeySchema - Specifies the key schema for the local secondary index.\n The key schema must begin with the same partition key as the table.
\n Projection - Specifies attributes that are copied (projected) from\n the table into the index. These are in addition to the primary key attributes\n and index key attributes, which are automatically projected. Each attribute\n specification is composed of:
\n ProjectionType - One of the following:
\n KEYS_ONLY - Only the index and primary keys are\n projected into the index.
\n INCLUDE - Only the specified table attributes are\n projected into the index. The list of projected attributes is in\n NonKeyAttributes.
\n ALL - All of the table attributes are projected\n into the index.
\n NonKeyAttributes - A list of one or more non-key attribute\n names that are projected into the secondary index. The total count of\n attributes provided in NonKeyAttributes, summed across all\n of the secondary indexes, must not exceed 100. If you project the same\n attribute into two different indexes, this counts as two distinct\n attributes when determining the total. This limit only applies when you\n specify the ProjectionType of INCLUDE. You still can specify the\n ProjectionType of ALL to project all attributes from the\n source table, even if the table has more than 100 attributes.
One or more global secondary indexes (the maximum is 20) to be created on the table.\n Each global secondary index in the array includes the following:
\n\n IndexName - The name of the global secondary index. Must be unique\n only for this table.
\n KeySchema - Specifies the key schema for the global secondary\n index.
\n Projection - Specifies attributes that are copied (projected) from\n the table into the index. These are in addition to the primary key attributes\n and index key attributes, which are automatically projected. Each attribute\n specification is composed of:
\n ProjectionType - One of the following:
\n KEYS_ONLY - Only the index and primary keys are\n projected into the index.
\n INCLUDE - Only the specified table attributes are\n projected into the index. The list of projected attributes is in\n NonKeyAttributes.
\n ALL - All of the table attributes are projected\n into the index.
\n NonKeyAttributes - A list of one or more non-key attribute\n names that are projected into the secondary index. The total count of\n attributes provided in NonKeyAttributes, summed across all\n of the secondary indexes, must not exceed 100. If you project the same\n attribute into two different indexes, this counts as two distinct\n attributes when determining the total.
\n ProvisionedThroughput - The provisioned throughput settings for the\n global secondary index, consisting of read and write capacity units.
One or more global secondary indexes (the maximum is 20) to be created on the table.\n Each global secondary index in the array includes the following:
\n\n IndexName - The name of the global secondary index. Must be unique\n only for this table.
\n KeySchema - Specifies the key schema for the global secondary\n index.
\n Projection - Specifies attributes that are copied (projected) from\n the table into the index. These are in addition to the primary key attributes\n and index key attributes, which are automatically projected. Each attribute\n specification is composed of:
\n ProjectionType - One of the following:
\n KEYS_ONLY - Only the index and primary keys are\n projected into the index.
\n INCLUDE - Only the specified table attributes are\n projected into the index. The list of projected attributes is in\n NonKeyAttributes.
\n ALL - All of the table attributes are projected\n into the index.
\n NonKeyAttributes - A list of one or more non-key attribute\n names that are projected into the secondary index. The total count of\n attributes provided in NonKeyAttributes, summed across all\n of the secondary indexes, must not exceed 100. If you project the same\n attribute into two different indexes, this counts as two distinct\n attributes when determining the total. This limit only applies when you\n specify the ProjectionType of INCLUDE. You still can\n specify the ProjectionType of ALL to project all attributes\n from the source table, even if the table has more than 100 attributes.
\n ProvisionedThroughput - The provisioned throughput settings for the\n global secondary index, consisting of read and write capacity units.
Controls how you are charged for read and write throughput and how you manage\n capacity. This setting can be changed later.
\n\n PROVISIONED - We recommend using PROVISIONED for\n predictable workloads. PROVISIONED sets the billing mode to Provisioned capacity mode.
\n PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST\n for unpredictable workloads. PAY_PER_REQUEST sets the billing mode\n to On-demand capacity mode.
Controls how you are charged for read and write throughput and how you manage\n capacity. This setting can be changed later.
\n\n PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST\n for most DynamoDB workloads. PAY_PER_REQUEST sets the billing mode\n to On-demand capacity mode.
\n PROVISIONED - We recommend using PROVISIONED for\n steady workloads with predictable growth where capacity requirements can be\n reliably forecasted. PROVISIONED sets the billing mode to Provisioned capacity mode.
Represents the warm throughput (in read units per second and write units per second) for creating a table.
" + "smithy.api#documentation": "Represents the warm throughput (in read units per second and write units per second)\n for creating a table.
" } }, "ResourcePolicy": { @@ -2919,7 +2919,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "Checks the status of continuous backups and point in time recovery on the specified\n table. Continuous backups are ENABLED on all tables at table creation. If\n point in time recovery is enabled, PointInTimeRecoveryStatus will be set to\n ENABLED.
After continuous backups and point in time recovery are enabled, you can restore to\n any point in time within EarliestRestorableDateTime and\n LatestRestorableDateTime.
\n LatestRestorableDateTime is typically 5 minutes before the current time.\n You can restore your table to any point in time in the last 35 days. You can set the recovery period to any value between 1 and 35 days.
You can call DescribeContinuousBackups at a maximum rate of 10 times per\n second.
Checks the status of continuous backups and point in time recovery on the specified\n table. Continuous backups are ENABLED on all tables at table creation. If\n point in time recovery is enabled, PointInTimeRecoveryStatus will be set to\n ENABLED.
After continuous backups and point in time recovery are enabled, you can restore to\n any point in time within EarliestRestorableDateTime and\n LatestRestorableDateTime.
\n LatestRestorableDateTime is typically 5 minutes before the current time.\n You can restore your table to any point in time in the last 35 days. You can set the\n recovery period to any value between 1 and 35 days.
You can call DescribeContinuousBackups at a maximum rate of 10 times per\n second.
The number of preceding days for which continuous backups are taken and maintained.\n Your table data is only recoverable to any point-in-time from within the configured\n recovery period. This parameter is optional. If no value is provided, the value will\n default to 35.
" + "smithy.api#documentation": "The number of preceding days for which continuous backups are taken and maintained.\n Your table data is only recoverable to any point-in-time from within the configured\n recovery period. This parameter is optional.
" } }, "EarliestRestorableDateTime": { @@ -17036,7 +17036,7 @@ "NonKeyAttributes": { "target": "com.amazonaws.dynamodb#NonKeyAttributeNameList", "traits": { - "smithy.api#documentation": "Represents the non-key attribute names which will be projected into the index.
\nFor local secondary indexes, the total count of NonKeyAttributes summed\n across all of the local secondary indexes, must not exceed 100. If you project the same\n attribute into two different indexes, this counts as two distinct attributes when\n determining the total.
Represents the non-key attribute names which will be projected into the index.
\nFor global and local secondary indexes, the total count of NonKeyAttributes summed\n across all of the secondary indexes, must not exceed 100. If you project the same\n attribute into two different indexes, this counts as two distinct attributes when\n determining the total. This limit only applies when you specify the ProjectionType of\n INCLUDE. You still can specify the ProjectionType of ALL to\n project all attributes from the source table, even if the table has more than 100\n attributes.
Represents the provisioned throughput settings for a specified table or index. The\n settings can be modified using the UpdateTable operation.
For current minimum and maximum provisioned throughput values, see Service,\n Account, and Table Quotas in the Amazon DynamoDB Developer\n Guide.
" + "smithy.api#documentation": "Represents the provisioned throughput settings for the specified global secondary\n index. You must use ProvisionedThroughput or\n OnDemandThroughput based on your table’s capacity mode.
For current minimum and maximum provisioned throughput values, see Service,\n Account, and Table Quotas in the Amazon DynamoDB Developer\n Guide.
" } }, "com.amazonaws.dynamodb#ProvisionedThroughputDescription": { @@ -17693,7 +17693,7 @@ "target": "com.amazonaws.dynamodb#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The number of items evaluated, before any QueryFilter is applied. A high\n ScannedCount value with few, or no, Count results\n indicates an inefficient Query operation. For more information, see Count and\n ScannedCount in the Amazon DynamoDB Developer\n Guide.
If you did not use a filter in the request, then ScannedCount is the same\n as Count.
The number of items evaluated, before any QueryFilter is applied. A high\n ScannedCount value with few, or no, Count results\n indicates an inefficient Query operation. For more information, see Count and ScannedCount in the Amazon DynamoDB Developer\n Guide.
If you did not use a filter in the request, then ScannedCount is the same\n as Count.
Throughput exceeds the current throughput quota for your account. Please contact\n Amazon Web Services Support to request a\n quota increase.
", + "smithy.api#documentation": "Throughput exceeds the current throughput quota for your account. Please contact\n Amazon Web ServicesSupport to request a\n quota increase.
", "smithy.api#error": "client" } }, @@ -18625,7 +18625,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "Restores the specified table to the specified point in time within\n EarliestRestorableDateTime and LatestRestorableDateTime.\n You can restore your table to any point in time in the last 35 days. You can set the recovery period to any value between 1 and 35 days. Any number of\n users can execute up to 50 concurrent restores (any type of restore) in a given account.
When you restore using point in time recovery, DynamoDB restores your table data to\n the state based on the selected date and time (day:hour:minute:second) to a new table.
\nAlong with data, the following are also included on the new restored table using point\n in time recovery:
\nGlobal secondary indexes (GSIs)
\nLocal secondary indexes (LSIs)
\nProvisioned read and write capacity
\nEncryption settings
\nAll these settings come from the current settings of the source table at\n the time of restore.
\nYou must manually set up the following on the restored table:
\nAuto scaling policies
\nIAM policies
\nAmazon CloudWatch metrics and alarms
\nTags
\nStream settings
\nTime to Live (TTL) settings
\nPoint in time recovery settings
\nRestores the specified table to the specified point in time within\n EarliestRestorableDateTime and LatestRestorableDateTime.\n You can restore your table to any point in time in the last 35 days. You can set the\n recovery period to any value between 1 and 35 days. Any number of users can execute up\n to 50 concurrent restores (any type of restore) in a given account.
When you restore using point in time recovery, DynamoDB restores your table data to\n the state based on the selected date and time (day:hour:minute:second) to a new table.
\nAlong with data, the following are also included on the new restored table using point\n in time recovery:
\nGlobal secondary indexes (GSIs)
\nLocal secondary indexes (LSIs)
\nProvisioned read and write capacity
\nEncryption settings
\nAll these settings come from the current settings of the source table at\n the time of restore.
\nYou must manually set up the following on the restored table:
\nAuto scaling policies
\nIAM policies
\nAmazon CloudWatch metrics and alarms
\nTags
\nStream settings
\nTime to Live (TTL) settings
\nPoint in time recovery settings
\nRepresents one or more local secondary indexes on the table. Each index is scoped to a\n given partition key value. Tables with one or more local secondary indexes are subject\n to an item collection size limit, where the amount of data within a given item\n collection cannot exceed 10 GB. Each element is composed of:
\n\n IndexName - The name of the local secondary index.
\n KeySchema - Specifies the complete index key schema. The attribute\n names in the key schema must be between 1 and 255 characters (inclusive). The\n key schema must begin with the same partition key as the table.
\n Projection - Specifies attributes that are copied (projected) from\n the table into the index. These are in addition to the primary key attributes\n and index key attributes, which are automatically projected. Each attribute\n specification is composed of:
\n ProjectionType - One of the following:
\n KEYS_ONLY - Only the index and primary keys are\n projected into the index.
\n INCLUDE - Only the specified table attributes are\n projected into the index. The list of projected attributes is in\n NonKeyAttributes.
\n ALL - All of the table attributes are projected\n into the index.
\n NonKeyAttributes - A list of one or more non-key attribute\n names that are projected into the secondary index. The total count of\n attributes provided in NonKeyAttributes, summed across all\n of the secondary indexes, must not exceed 100. If you project the same\n attribute into two different indexes, this counts as two distinct\n attributes when determining the total.
\n IndexSizeBytes - Represents the total size of the index, in bytes.\n DynamoDB updates this value approximately every six hours. Recent changes might\n not be reflected in this value.
\n ItemCount - Represents the number of items in the index. DynamoDB\n updates this value approximately every six hours. Recent changes might not be\n reflected in this value.
If the table is in the DELETING state, no information about indexes will\n be returned.
Represents one or more local secondary indexes on the table. Each index is scoped to a\n given partition key value. Tables with one or more local secondary indexes are subject\n to an item collection size limit, where the amount of data within a given item\n collection cannot exceed 10 GB. Each element is composed of:
\n\n IndexName - The name of the local secondary index.
\n KeySchema - Specifies the complete index key schema. The attribute\n names in the key schema must be between 1 and 255 characters (inclusive). The\n key schema must begin with the same partition key as the table.
\n Projection - Specifies attributes that are copied (projected) from\n the table into the index. These are in addition to the primary key attributes\n and index key attributes, which are automatically projected. Each attribute\n specification is composed of:
\n ProjectionType - One of the following:
\n KEYS_ONLY - Only the index and primary keys are\n projected into the index.
\n INCLUDE - Only the specified table attributes are\n projected into the index. The list of projected attributes is in\n NonKeyAttributes.
\n ALL - All of the table attributes are projected\n into the index.
\n NonKeyAttributes - A list of one or more non-key attribute\n names that are projected into the secondary index. The total count of\n attributes provided in NonKeyAttributes, summed across all\n of the secondary indexes, must not exceed 100. If you project the same\n attribute into two different indexes, this counts as two distinct\n attributes when determining the total. This limit only applies when you\n specify the ProjectionType of INCLUDE. You still can\n specify the ProjectionType of ALL to project all attributes\n from the source table, even if the table has more than 100 attributes.
\n IndexSizeBytes - Represents the total size of the index, in bytes.\n DynamoDB updates this value approximately every six hours. Recent changes might\n not be reflected in this value.
\n ItemCount - Represents the number of items in the index. DynamoDB\n updates this value approximately every six hours. Recent changes might not be\n reflected in this value.
If the table is in the DELETING state, no information about indexes will\n be returned.
The global secondary indexes, if any, on the table. Each index is scoped to a given\n partition key value. Each element is composed of:
\n\n Backfilling - If true, then the index is currently in the\n backfilling phase. Backfilling occurs only when a new global secondary index is\n added to the table. It is the process by which DynamoDB populates the new index\n with data from the table. (This attribute does not appear for indexes that were\n created during a CreateTable operation.)
You can delete an index that is being created during the\n Backfilling phase when IndexStatus is set to\n CREATING and Backfilling is true. You can't delete the index that\n is being created when IndexStatus is set to CREATING and\n Backfilling is false. (This attribute does not appear for\n indexes that were created during a CreateTable operation.)
\n IndexName - The name of the global secondary index.
\n IndexSizeBytes - The total size of the global secondary index, in\n bytes. DynamoDB updates this value approximately every six hours. Recent changes\n might not be reflected in this value.
\n IndexStatus - The current status of the global secondary\n index:
\n CREATING - The index is being created.
\n UPDATING - The index is being updated.
\n DELETING - The index is being deleted.
\n ACTIVE - The index is ready for use.
\n ItemCount - The number of items in the global secondary index.\n DynamoDB updates this value approximately every six hours. Recent changes might\n not be reflected in this value.
\n KeySchema - Specifies the complete index key schema. The attribute\n names in the key schema must be between 1 and 255 characters (inclusive). The\n key schema must begin with the same partition key as the table.
\n Projection - Specifies attributes that are copied (projected) from\n the table into the index. These are in addition to the primary key attributes\n and index key attributes, which are automatically projected. Each attribute\n specification is composed of:
\n ProjectionType - One of the following:
\n KEYS_ONLY - Only the index and primary keys are\n projected into the index.
\n INCLUDE - In addition to the attributes described\n in KEYS_ONLY, the secondary index will include\n other non-key attributes that you specify.
\n ALL - All of the table attributes are projected\n into the index.
\n NonKeyAttributes - A list of one or more non-key attribute\n names that are projected into the secondary index. The total count of\n attributes provided in NonKeyAttributes, summed across all\n of the secondary indexes, must not exceed 100. If you project the same\n attribute into two different indexes, this counts as two distinct\n attributes when determining the total.
\n ProvisionedThroughput - The provisioned throughput settings for the\n global secondary index, consisting of read and write capacity units, along with\n data about increases and decreases.
If the table is in the DELETING state, no information about indexes will\n be returned.
The global secondary indexes, if any, on the table. Each index is scoped to a given\n partition key value. Each element is composed of:
\n\n Backfilling - If true, then the index is currently in the\n backfilling phase. Backfilling occurs only when a new global secondary index is\n added to the table. It is the process by which DynamoDB populates the new index\n with data from the table. (This attribute does not appear for indexes that were\n created during a CreateTable operation.)
You can delete an index that is being created during the\n Backfilling phase when IndexStatus is set to\n CREATING and Backfilling is true. You can't delete the index that\n is being created when IndexStatus is set to CREATING and\n Backfilling is false. (This attribute does not appear for\n indexes that were created during a CreateTable operation.)
\n IndexName - The name of the global secondary index.
\n IndexSizeBytes - The total size of the global secondary index, in\n bytes. DynamoDB updates this value approximately every six hours. Recent changes\n might not be reflected in this value.
\n IndexStatus - The current status of the global secondary\n index:
\n CREATING - The index is being created.
\n UPDATING - The index is being updated.
\n DELETING - The index is being deleted.
\n ACTIVE - The index is ready for use.
\n ItemCount - The number of items in the global secondary index.\n DynamoDB updates this value approximately every six hours. Recent changes might\n not be reflected in this value.
\n KeySchema - Specifies the complete index key schema. The attribute\n names in the key schema must be between 1 and 255 characters (inclusive). The\n key schema must begin with the same partition key as the table.
\n Projection - Specifies attributes that are copied (projected) from\n the table into the index. These are in addition to the primary key attributes\n and index key attributes, which are automatically projected. Each attribute\n specification is composed of:
\n ProjectionType - One of the following:
\n KEYS_ONLY - Only the index and primary keys are\n projected into the index.
\n INCLUDE - In addition to the attributes described\n in KEYS_ONLY, the secondary index will include\n other non-key attributes that you specify.
\n ALL - All of the table attributes are projected\n into the index.
\n NonKeyAttributes - A list of one or more non-key attribute\n names that are projected into the secondary index. The total count of\n attributes provided in NonKeyAttributes, summed across all\n of the secondary indexes, must not exceed 100. If you project the same\n attribute into two different indexes, this counts as two distinct\n attributes when determining the total. This limit only applies when you\n specify the ProjectionType of INCLUDE. You still can\n specify the ProjectionType of ALL to project all attributes\n from the source table, even if the table has more than 100 attributes.
\n ProvisionedThroughput - The provisioned throughput settings for the\n global secondary index, consisting of read and write capacity units, along with\n data about increases and decreases.
If the table is in the DELETING state, no information about indexes will\n be returned.
Represents warm throughput value of the base table..
" + "smithy.api#documentation": "Represents warm throughput value of the base table.
" } } }, "traits": { - "smithy.api#documentation": "Represents the warm throughput value (in read units per second and write units per\n second) of the base table.
" + "smithy.api#documentation": "Represents the warm throughput value (in read units per second and write units per second)\n of the table. Warm throughput is applicable for DynamoDB Standard-IA tables and specifies\n the minimum provisioned capacity maintained for immediate data access.
" } }, "com.amazonaws.dynamodb#Tag": { @@ -19987,7 +19987,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "Associate a set of tags with an Amazon DynamoDB resource. You can then activate these\n user-defined tags so that they appear on the Billing and Cost Management console for\n cost allocation tracking. You can call TagResource up to five times per second, per\n account.
\n\n TagResource is an asynchronous operation. If you issue a ListTagsOfResource request immediately after a TagResource request, DynamoDB might return your previous tag set, if there was one, or an empty tag set. This is because ListTagsOfResource uses an eventually consistent query, and the metadata for your tags or table might not be available at that moment. Wait for a few seconds, and then try the ListTagsOfResource request again.
The application or removal of tags using TagResource and UntagResource APIs is eventually consistent. ListTagsOfResource API will only reflect the changes after a few seconds.
For an overview on tagging DynamoDB resources, see Tagging for DynamoDB\n in the Amazon DynamoDB Developer Guide.
" + "smithy.api#documentation": "Associate a set of tags with an Amazon DynamoDB resource. You can then activate these\n user-defined tags so that they appear on the Billing and Cost Management console for\n cost allocation tracking. You can call TagResource up to five times per second, per\n account.
\n\n TagResource is an asynchronous operation. If you issue a ListTagsOfResource request immediately after a\n TagResource request, DynamoDB might return your\n previous tag set, if there was one, or an empty tag set. This is because\n ListTagsOfResource uses an eventually consistent query, and the\n metadata for your tags or table might not be available at that moment. Wait for\n a few seconds, and then try the ListTagsOfResource request\n again.
The application or removal of tags using TagResource and\n UntagResource APIs is eventually consistent.\n ListTagsOfResource API will only reflect the changes after a\n few seconds.
For an overview on tagging DynamoDB resources, see Tagging for DynamoDB\n in the Amazon DynamoDB Developer Guide.
" } }, "com.amazonaws.dynamodb#TagResourceInput": { @@ -20428,7 +20428,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "Removes the association of tags from an Amazon DynamoDB resource. You can call\n UntagResource up to five times per second, per account.
\n UntagResource is an asynchronous operation. If you issue a ListTagsOfResource request immediately after an UntagResource request, DynamoDB might return your previous tag set, if there was one, or an empty tag set. This is because ListTagsOfResource uses an eventually consistent query, and the metadata for your tags or table might not be available at that moment. Wait for a few seconds, and then try the ListTagsOfResource request again.
The application or removal of tags using TagResource and UntagResource APIs is eventually consistent. ListTagsOfResource API will only reflect the changes after a few seconds.
For an overview on tagging DynamoDB resources, see Tagging for DynamoDB\n in the Amazon DynamoDB Developer Guide.
" + "smithy.api#documentation": "Removes the association of tags from an Amazon DynamoDB resource. You can call\n UntagResource up to five times per second, per account.
\n UntagResource is an asynchronous operation. If you issue a ListTagsOfResource request immediately after an\n UntagResource request, DynamoDB might return your\n previous tag set, if there was one, or an empty tag set. This is because\n ListTagsOfResource uses an eventually consistent query, and the\n metadata for your tags or table might not be available at that moment. Wait for\n a few seconds, and then try the ListTagsOfResource request\n again.
The application or removal of tags using TagResource and\n UntagResource APIs is eventually consistent.\n ListTagsOfResource API will only reflect the changes after a\n few seconds.
For an overview on tagging DynamoDB resources, see Tagging for DynamoDB\n in the Amazon DynamoDB Developer Guide.
" } }, "com.amazonaws.dynamodb#UntagResourceInput": { @@ -20535,7 +20535,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "\n UpdateContinuousBackups enables or disables point in time recovery for\n the specified table. A successful UpdateContinuousBackups call returns the\n current ContinuousBackupsDescription. Continuous backups are\n ENABLED on all tables at table creation. If point in time recovery is\n enabled, PointInTimeRecoveryStatus will be set to ENABLED.
Once continuous backups and point in time recovery are enabled, you can restore to\n any point in time within EarliestRestorableDateTime and\n LatestRestorableDateTime.
\n LatestRestorableDateTime is typically 5 minutes before the current time.\n You can restore your table to any point in time in the last 35 days. You can set the recovery period to any value between 1 and 35 days.
\n UpdateContinuousBackups enables or disables point in time recovery for\n the specified table. A successful UpdateContinuousBackups call returns the\n current ContinuousBackupsDescription. Continuous backups are\n ENABLED on all tables at table creation. If point in time recovery is\n enabled, PointInTimeRecoveryStatus will be set to ENABLED.
Once continuous backups and point in time recovery are enabled, you can restore to\n any point in time within EarliestRestorableDateTime and\n LatestRestorableDateTime.
\n LatestRestorableDateTime is typically 5 minutes before the current time.\n You can restore your table to any point in time in the last 35 days. You can set the\n RecoveryPeriodInDays to any value between 1 and 35 days.
Controls how you are charged for read and write throughput and how you manage\n capacity. When switching from pay-per-request to provisioned capacity, initial\n provisioned capacity values must be set. The initial provisioned capacity values are\n estimated based on the consumed read and write capacity of your table and global\n secondary indexes over the past 30 minutes.
\n\n PROVISIONED - We recommend using PROVISIONED for\n predictable workloads. PROVISIONED sets the billing mode to Provisioned capacity mode.
\n PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST\n for unpredictable workloads. PAY_PER_REQUEST sets the billing mode\n to On-demand capacity mode.
Controls how you are charged for read and write throughput and how you manage\n capacity. When switching from pay-per-request to provisioned capacity, initial\n provisioned capacity values must be set. The initial provisioned capacity values are\n estimated based on the consumed read and write capacity of your table and global\n secondary indexes over the past 30 minutes.
\n\n PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST\n for most DynamoDB workloads. PAY_PER_REQUEST sets the billing mode\n to On-demand capacity mode.
\n PROVISIONED - We recommend using PROVISIONED for\n steady workloads with predictable growth where capacity requirements can be\n reliably forecasted. PROVISIONED sets the billing mode to Provisioned capacity mode.
Specifies the consistency mode for a new global table. This parameter is only valid when you create a global table by specifying one or more Create actions in the ReplicaUpdates action list.
\nYou can specify one of the following consistency modes:
\n\n EVENTUAL: Configures a new global table for multi-Region eventual consistency. This is the default consistency mode for global tables.
\n STRONG: Configures a new global table for multi-Region strong consistency (preview).
Multi-Region strong consistency (MRSC) is a new DynamoDB global tables capability currently available in preview mode. For more information, see Global tables multi-Region strong consistency.
\nIf you don't specify this parameter, the global table consistency mode defaults to EVENTUAL.
Specifies the consistency mode for a new global table. This parameter is only valid\n when you create a global table by specifying one or more Create actions in the ReplicaUpdates action list.
\nYou can specify one of the following consistency modes:
\n\n EVENTUAL: Configures a new global table for multi-Region eventual\n consistency. This is the default consistency mode for global tables.
\n STRONG: Configures a new global table for multi-Region strong\n consistency (preview).
Multi-Region strong consistency (MRSC) is a new DynamoDB global\n tables capability currently available in preview mode. For more information,\n see Global tables multi-Region strong consistency.
\nIf you don't specify this parameter, the global table consistency mode defaults to\n EVENTUAL.
Represents the warm throughput (in read units per second and write units per second) for updating a table.
" + "smithy.api#documentation": "Represents the warm throughput (in read units per second and write units per second)\n for updating a table.
" } } }, diff --git a/codegen/sdk/aws-models/ec2.json b/codegen/sdk/aws-models/ec2.json index 032fa4ff79a..2bac5bd4d4a 100644 --- a/codegen/sdk/aws-models/ec2.json +++ b/codegen/sdk/aws-models/ec2.json @@ -2347,6 +2347,9 @@ { "target": "com.amazonaws.ec2#AssociateNatGatewayAddress" }, + { + "target": "com.amazonaws.ec2#AssociateRouteServer" + }, { "target": "com.amazonaws.ec2#AssociateRouteTable" }, @@ -2587,6 +2590,15 @@ { "target": "com.amazonaws.ec2#CreateRoute" }, + { + "target": "com.amazonaws.ec2#CreateRouteServer" + }, + { + "target": "com.amazonaws.ec2#CreateRouteServerEndpoint" + }, + { + "target": "com.amazonaws.ec2#CreateRouteServerPeer" + }, { "target": "com.amazonaws.ec2#CreateRouteTable" }, @@ -2821,6 +2833,15 @@ { "target": "com.amazonaws.ec2#DeleteRoute" }, + { + "target": "com.amazonaws.ec2#DeleteRouteServer" + }, + { + "target": "com.amazonaws.ec2#DeleteRouteServerEndpoint" + }, + { + "target": "com.amazonaws.ec2#DeleteRouteServerPeer" + }, { "target": "com.amazonaws.ec2#DeleteRouteTable" }, @@ -3256,6 +3277,15 @@ { "target": "com.amazonaws.ec2#DescribeReservedInstancesOfferings" }, + { + "target": "com.amazonaws.ec2#DescribeRouteServerEndpoints" + }, + { + "target": "com.amazonaws.ec2#DescribeRouteServerPeers" + }, + { + "target": "com.amazonaws.ec2#DescribeRouteServers" + }, { "target": "com.amazonaws.ec2#DescribeRouteTables" }, @@ -3487,6 +3517,9 @@ { "target": "com.amazonaws.ec2#DisableIpamOrganizationAdminAccount" }, + { + "target": "com.amazonaws.ec2#DisableRouteServerPropagation" + }, { "target": "com.amazonaws.ec2#DisableSerialConsoleAccess" }, @@ -3532,6 +3565,9 @@ { "target": "com.amazonaws.ec2#DisassociateNatGatewayAddress" }, + { + "target": "com.amazonaws.ec2#DisassociateRouteServer" + }, { "target": "com.amazonaws.ec2#DisassociateRouteTable" }, @@ -3592,6 +3628,9 @@ { "target": "com.amazonaws.ec2#EnableReachabilityAnalyzerOrganizationSharing" }, + { + "target": "com.amazonaws.ec2#EnableRouteServerPropagation" + }, { "target": "com.amazonaws.ec2#EnableSerialConsoleAccess" }, @@ -3730,6 +3769,15 @@ { "target": "com.amazonaws.ec2#GetReservedInstancesExchangeQuote" }, + { + "target": "com.amazonaws.ec2#GetRouteServerAssociations" + }, + { + "target": "com.amazonaws.ec2#GetRouteServerPropagations" + }, + { + "target": "com.amazonaws.ec2#GetRouteServerRoutingDatabase" + }, { "target": "com.amazonaws.ec2#GetSecurityGroupsForVpc" }, @@ -3916,6 +3964,9 @@ { "target": "com.amazonaws.ec2#ModifyReservedInstances" }, + { + "target": "com.amazonaws.ec2#ModifyRouteServer" + }, { "target": "com.amazonaws.ec2#ModifySecurityGroupRules" }, @@ -5957,6 +6008,15 @@ } } }, + "com.amazonaws.ec2#AsPath": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, "com.amazonaws.ec2#AsnAssociation": { "type": "structure", "members": { @@ -7142,6 +7202,64 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#AssociateRouteServer": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#AssociateRouteServerRequest" + }, + "output": { + "target": "com.amazonaws.ec2#AssociateRouteServerResult" + }, + "traits": { + "smithy.api#documentation": "Associates a route server with a VPC to enable dynamic route updates.
\nA route server association is the connection established between a route server and a VPC.
\nFor more information see Dynamic routing in your VPC with VPC Route Server in the Amazon VPC User Guide.
" + } + }, + "com.amazonaws.ec2#AssociateRouteServerRequest": { + "type": "structure", + "members": { + "RouteServerId": { + "target": "com.amazonaws.ec2#RouteServerId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The unique identifier for the route server to be associated.
", + "smithy.api#required": {} + } + }, + "VpcId": { + "target": "com.amazonaws.ec2#VpcId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the VPC to associate with the route server.
", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Information about the association between the route server and the VPC.
", + "smithy.api#xmlName": "routeServerAssociation" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#AssociateRouteTable": { "type": "operation", "input": { @@ -9791,6 +9909,9 @@ "com.amazonaws.ec2#BoxedInteger": { "type": "integer" }, + "com.amazonaws.ec2#BoxedLong": { + "type": "long" + }, "com.amazonaws.ec2#BundleId": { "type": "string" }, @@ -14694,7 +14815,8 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "Unique, case-sensitive identifier you provide to ensure idempotency of the request. For\n more information, see Ensuring idempotency\n in the Amazon EC2 API Reference.
" + "smithy.api#documentation": "Unique, case-sensitive identifier you provide to ensure idempotency of the request. For\n more information, see Ensuring idempotency\n in the Amazon EC2 API Reference.
", + "smithy.api#idempotencyToken": {} } }, "Description": { @@ -19458,7 +19580,7 @@ "target": "com.amazonaws.ec2#CreateRestoreImageTaskResult" }, "traits": { - "smithy.api#documentation": "Starts a task that restores an AMI from an Amazon S3 object that was previously created by\n using CreateStoreImageTask.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon EC2 User Guide.
\nFor more information, see Store and restore an AMI using\n Amazon S3 in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "Starts a task that restores an AMI from an Amazon S3 object that was previously created by\n using CreateStoreImageTask.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon EC2 User Guide.
\nFor more information, see Store and restore an AMI using\n Amazon S3 in the Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#CreateRestoreImageTaskRequest": { @@ -19489,7 +19611,7 @@ "TagSpecifications": { "target": "com.amazonaws.ec2#TagSpecificationList", "traits": { - "smithy.api#documentation": "The tags to apply to the AMI and snapshots on restoration. You can tag the AMI, the\n snapshots, or both.
\nTo tag the AMI, the value for ResourceType must be\n image.
To tag the snapshots, the value for ResourceType must be\n snapshot. The same tag is applied to all of the snapshots that are\n created.
The tags to apply to the AMI and snapshots on restoration. You can tag the AMI, the\n snapshots, or both.
\nTo tag the AMI, the value for ResourceType must be\n image.
To tag the snapshots, the value for ResourceType must be\n snapshot. The same tag is applied to all of the snapshots that are\n created.
Creates a new route server to manage dynamic routing in a VPC.
\nAmazon VPC Route Server simplifies routing for traffic between workloads that are deployed within a VPC and its internet gateways. With this feature, \nVPC Route Server dynamically updates VPC and internet gateway route tables with your preferred IPv4 or IPv6 routes to achieve routing fault tolerance for those workloads. This enables you to automatically reroute traffic within a VPC, which increases the manageability of VPC routing and interoperability with third-party workloads.
\nRoute server supports the follow route table types:
\nVPC route tables not associated with subnets
\nSubnet route tables
\nInternet gateway route tables
\nRoute server does not support route tables associated with virtual private gateways. To propagate routes into a transit gateway route table, use Transit Gateway Connect.
\nFor more information see Dynamic routing in your VPC with VPC Route Server in the Amazon VPC User Guide.
" + } + }, + "com.amazonaws.ec2#CreateRouteServerEndpoint": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#CreateRouteServerEndpointRequest" + }, + "output": { + "target": "com.amazonaws.ec2#CreateRouteServerEndpointResult" + }, + "traits": { + "smithy.api#documentation": "Creates a new endpoint for a route server in a specified subnet.
\nA route server endpoint is an Amazon Web Services-managed component inside a subnet that facilitates BGP (Border Gateway Protocol) connections between your route server and your BGP peers.
\nFor more information see Dynamic routing in your VPC with VPC Route Server in the Amazon VPC User Guide.
" + } + }, + "com.amazonaws.ec2#CreateRouteServerEndpointRequest": { + "type": "structure", + "members": { + "RouteServerId": { + "target": "com.amazonaws.ec2#RouteServerId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the route server for which to create an endpoint.
", + "smithy.api#required": {} + } + }, + "SubnetId": { + "target": "com.amazonaws.ec2#SubnetId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the subnet in which to create the route server endpoint.
", + "smithy.api#required": {} + } + }, + "ClientToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "Unique, case-sensitive identifier to ensure idempotency of the request.
", + "smithy.api#idempotencyToken": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
The tags to apply to the route server endpoint during creation.
", + "smithy.api#xmlName": "TagSpecification" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#CreateRouteServerEndpointResult": { + "type": "structure", + "members": { + "RouteServerEndpoint": { + "target": "com.amazonaws.ec2#RouteServerEndpoint", + "traits": { + "aws.protocols#ec2QueryName": "RouteServerEndpoint", + "smithy.api#documentation": "Information about the created route server endpoint.
", + "smithy.api#xmlName": "routeServerEndpoint" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ec2#CreateRouteServerPeer": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#CreateRouteServerPeerRequest" + }, + "output": { + "target": "com.amazonaws.ec2#CreateRouteServerPeerResult" + }, + "traits": { + "smithy.api#documentation": "Creates a new BGP peer for a specified route server endpoint.
\nA route server peer is a session between a route server endpoint and the device deployed in Amazon Web Services (such as a firewall appliance or other network security function running on an EC2 instance). The device must meet these requirements:
\nHave an elastic network interface in the VPC
\nSupport BGP (Border Gateway Protocol)
\nCan initiate BGP sessions
\nFor more information see Dynamic routing in your VPC with VPC Route Server in the Amazon VPC User Guide.
" + } + }, + "com.amazonaws.ec2#CreateRouteServerPeerRequest": { + "type": "structure", + "members": { + "RouteServerEndpointId": { + "target": "com.amazonaws.ec2#RouteServerEndpointId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the route server endpoint for which to create a peer.
", + "smithy.api#required": {} + } + }, + "PeerAddress": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The IPv4 address of the peer device.
", + "smithy.api#required": {} + } + }, + "BgpOptions": { + "target": "com.amazonaws.ec2#RouteServerBgpOptionsRequest", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The BGP options for the peer, including ASN (Autonomous System Number) and BFD (Bidrectional Forwarding Detection) settings.
", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
The tags to apply to the route server peer during creation.
", + "smithy.api#xmlName": "TagSpecification" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#CreateRouteServerPeerResult": { + "type": "structure", + "members": { + "RouteServerPeer": { + "target": "com.amazonaws.ec2#RouteServerPeer", + "traits": { + "aws.protocols#ec2QueryName": "RouteServerPeer", + "smithy.api#documentation": "Information about the created route server peer.
", + "smithy.api#xmlName": "routeServerPeer" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ec2#CreateRouteServerRequest": { + "type": "structure", + "members": { + "AmazonSideAsn": { + "target": "com.amazonaws.ec2#Long", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The private Autonomous System Number (ASN) for the Amazon side of the BGP session. Valid values are from 1 to 4294967295. We recommend using a private ASN in the 64512–65534 (16-bit ASN) or 4200000000–4294967294 (32-bit ASN) range.
", + "smithy.api#required": {} + } + }, + "ClientToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "Unique, case-sensitive identifier to ensure idempotency of the request.
", + "smithy.api#idempotencyToken": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Indicates whether routes should be persisted after all BGP sessions are terminated.
" + } + }, + "PersistRoutesDuration": { + "target": "com.amazonaws.ec2#BoxedLong", + "traits": { + "smithy.api#documentation": "The number of minutes a route server will wait after BGP is re-established to unpersist the routes in the FIB and RIB. Value must be in the range of 1-5. Required if PersistRoutes is enabled.
If you set the duration to 1 minute, then when your network appliance re-establishes BGP with route server, it has 1 minute to relearn it's adjacent network and advertise those routes to route server before route server resumes normal functionality. In most cases, 1 minute is probably sufficient. If, however, you have concerns that your BGP network may not be capable of fully re-establishing and re-learning everything in 1 minute, you can increase the duration up to 5 minutes.
" + } + }, + "SnsNotificationsEnabled": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether SNS notifications should be enabled for route server events. Enabling SNS notifications persists BGP status changes to an SNS topic provisioned by Amazon Web Services.
" + } + }, + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "The tags to apply to the route server during creation.
", + "smithy.api#xmlName": "TagSpecification" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#CreateRouteServerResult": { + "type": "structure", + "members": { + "RouteServer": { + "target": "com.amazonaws.ec2#RouteServer", + "traits": { + "aws.protocols#ec2QueryName": "RouteServer", + "smithy.api#documentation": "Information about the created route server.
", + "smithy.api#xmlName": "routeServer" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#CreateRouteTable": { "type": "operation", "input": { @@ -20143,7 +20492,7 @@ "target": "com.amazonaws.ec2#CreateStoreImageTaskResult" }, "traits": { - "smithy.api#documentation": "Stores an AMI as a single object in an Amazon S3 bucket.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon EC2 User Guide.
\nFor more information, see Store and restore an AMI using\n Amazon S3 in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "Stores an AMI as a single object in an Amazon S3 bucket.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon EC2 User Guide.
\nFor more information, see Store and restore an AMI using\n Amazon S3 in the Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#CreateStoreImageTaskRequest": { @@ -26524,6 +26873,156 @@ "smithy.api#input": {} } }, + "com.amazonaws.ec2#DeleteRouteServer": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#DeleteRouteServerRequest" + }, + "output": { + "target": "com.amazonaws.ec2#DeleteRouteServerResult" + }, + "traits": { + "smithy.api#documentation": "Deletes the specified route server.
\nAmazon VPC Route Server simplifies routing for traffic between workloads that are deployed within a VPC and its internet gateways. With this feature, \nVPC Route Server dynamically updates VPC and internet gateway route tables with your preferred IPv4 or IPv6 routes to achieve routing fault tolerance for those workloads. This enables you to automatically reroute traffic within a VPC, which increases the manageability of VPC routing and interoperability with third-party workloads.
\nRoute server supports the follow route table types:
\nVPC route tables not associated with subnets
\nSubnet route tables
\nInternet gateway route tables
\nRoute server does not support route tables associated with virtual private gateways. To propagate routes into a transit gateway route table, use Transit Gateway Connect.
\nFor more information see Dynamic routing in your VPC with VPC Route Server in the Amazon VPC User Guide.
" + } + }, + "com.amazonaws.ec2#DeleteRouteServerEndpoint": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#DeleteRouteServerEndpointRequest" + }, + "output": { + "target": "com.amazonaws.ec2#DeleteRouteServerEndpointResult" + }, + "traits": { + "smithy.api#documentation": "Deletes the specified route server endpoint.
\nA route server endpoint is an Amazon Web Services-managed component inside a subnet that facilitates BGP (Border Gateway Protocol) connections between your route server and your BGP peers.
" + } + }, + "com.amazonaws.ec2#DeleteRouteServerEndpointRequest": { + "type": "structure", + "members": { + "RouteServerEndpointId": { + "target": "com.amazonaws.ec2#RouteServerEndpointId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the route server endpoint to delete.
", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Information about the deleted route server endpoint.
", + "smithy.api#xmlName": "routeServerEndpoint" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ec2#DeleteRouteServerPeer": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#DeleteRouteServerPeerRequest" + }, + "output": { + "target": "com.amazonaws.ec2#DeleteRouteServerPeerResult" + }, + "traits": { + "smithy.api#documentation": "Deletes the specified BGP peer from a route server.
\nA route server peer is a session between a route server endpoint and the device deployed in Amazon Web Services (such as a firewall appliance or other network security function running on an EC2 instance). The device must meet these requirements:
\nHave an elastic network interface in the VPC
\nSupport BGP (Border Gateway Protocol)
\nCan initiate BGP sessions
\nThe ID of the route server peer to delete.
", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Information about the deleted route server peer.
", + "smithy.api#xmlName": "routeServerPeer" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ec2#DeleteRouteServerRequest": { + "type": "structure", + "members": { + "RouteServerId": { + "target": "com.amazonaws.ec2#RouteServerId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the route server to delete.
", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Information about the deleted route server.
", + "smithy.api#xmlName": "routeServer" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#DeleteRouteTable": { "type": "operation", "input": { @@ -28585,7 +29084,7 @@ "target": "com.amazonaws.ec2#DeregisterImageResult" }, "traits": { - "smithy.api#documentation": "Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch new\n instances.
\nIf you deregister an AMI that matches a Recycle Bin retention rule, the AMI is retained in\n the Recycle Bin for the specified retention period. For more information, see Recycle Bin in\n the Amazon EC2 User Guide.
\nWhen you deregister an AMI, it doesn't affect any instances that you've already launched\n from the AMI. You'll continue to incur usage costs for those instances until you terminate\n them.
\nWhen you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was created\n for the root volume of the instance during the AMI creation process. When you deregister an\n instance store-backed AMI, it doesn't affect the files that you uploaded to Amazon S3 when you\n created the AMI.
" + "smithy.api#documentation": "Deregisters the specified AMI. A deregistered AMI can't be used to launch new\n instances.
\nIf a deregistered EBS-backed AMI matches a Recycle Bin retention rule, it moves to the\n Recycle Bin for the specified retention period. It can be restored before its retention period\n expires, after which it is permanently deleted. If the deregistered AMI doesn't match a\n retention rule, it is permanently deleted immediately. For more information, see Recycle Bin in\n the Amazon EBS User Guide.
\nDeregistering an AMI does not delete the following:
\nInstances already launched from the AMI. You'll continue to incur usage costs for the\n instances until you terminate them.
\nFor EBS-backed AMIs: The snapshots that were created of the root and data volumes of\n the instance during AMI creation. You'll continue to incur snapshot storage costs.
\nFor instance store-backed AMIs: The files uploaded to Amazon S3 during AMI creation. You'll\n continue to incur S3 storage costs.
\nFor more information, see Deregister an Amazon EC2 AMI in the\n Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#DeregisterImageRequest": { @@ -34021,7 +34520,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "The filters.
\n\n availability-zone - The name of the Availability Zone (for example,\n us-west-2a) or Local Zone (for example, us-west-2-lax-1b) of\n the instance.
\n instance-id - The ID of the instance.
\n image-allowed - A Boolean that indicates whether the image meets the\n criteria specified for Allowed AMIs.
\n instance-state-name - The state of the instance (pending |\n running | shutting-down | terminated |\n stopping | stopped).
\n instance-type - The type of instance (for example,\n t3.micro).
\n launch-time - The time when the instance was launched, in the ISO 8601\n format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example,\n 2023-09-29T11:04:43.305Z. You can use a wildcard (*), for\n example, 2023-09-29T*, which matches an entire day.
\n owner-alias - The owner alias (amazon |\n aws-marketplace | aws-backup-vault). The valid aliases are\n defined in an Amazon-maintained list. This is not the Amazon Web Services account alias that can be set\n using the IAM console. We recommend that you use the Owner request parameter\n instead of this filter.
\n owner-id - The Amazon Web Services account ID of the owner. We recommend that you use\n the Owner request parameter instead of this filter.
\n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.
\n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
\n zone-id - The ID of the Availability Zone (for example,\n usw2-az2) or Local Zone (for example, usw2-lax1-az1) of the\n instance.
The filters.
\n\n availability-zone - The name of the Availability Zone (for example,\n us-west-2a) or Local Zone (for example, us-west-2-lax-1b) of\n the instance.
\n instance-id - The ID of the instance.
\n image-allowed - A Boolean that indicates whether the image meets the\n criteria specified for Allowed AMIs.
\n instance-state-name - The state of the instance (pending |\n running | shutting-down | terminated |\n stopping | stopped).
\n instance-type - The type of instance (for example,\n t3.micro).
\n launch-time - The time when the instance was launched, in the ISO 8601\n format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example,\n 2023-09-29T11:04:43.305Z. You can use a wildcard (*), for\n example, 2023-09-29T*, which matches an entire day.
\n owner-alias - The owner alias (amazon |\n aws-marketplace | aws-backup-vault). The valid aliases are\n defined in an Amazon-maintained list. This is not the Amazon Web Services account alias that can be set\n using the IAM console. We recommend that you use the Owner request parameter\n instead of this filter.
\n owner-id - The Amazon Web Services account ID of the owner. We recommend that you use\n the Owner request parameter instead of this filter.
\n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.
\n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
\n zone-id - The ID of the Availability Zone (for example,\n usw2-az2) or Local Zone (for example, usw2-lax1-az1) of the\n instance.
One or more filters. Filter names and values are case-sensitive.
\n\n auto-recovery-supported - Indicates whether Amazon CloudWatch action\n based recovery is supported (true | false).
\n bare-metal - Indicates whether it is a bare metal instance type\n (true | false).
\n burstable-performance-supported - Indicates whether the instance type is a\n burstable performance T instance type (true | false).
\n current-generation - Indicates whether this instance type is the latest\n generation instance type of an instance family (true | false).
\n ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline\n bandwidth performance for an EBS-optimized instance type, in Mbps.
\n ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage\n operations per second for an EBS-optimized instance type.
\n ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline\n throughput performance for an EBS-optimized instance type, in MB/s.
\n ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth\n performance for an EBS-optimized instance type, in Mbps.
\n ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage\n operations per second for an EBS-optimized instance type.
\n ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum\n throughput performance for an EBS-optimized instance type, in MB/s.
\n ebs-info.ebs-optimized-support - Indicates whether the instance type is\n EBS-optimized (supported | unsupported |\n default).
\n ebs-info.encryption-support - Indicates whether EBS encryption is supported\n (supported | unsupported).
\n ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe)\n is supported for EBS volumes (required | supported |\n unsupported).
\n free-tier-eligible - Indicates whether the instance type is eligible to use\n in the free tier (true | false).
\n hibernation-supported - Indicates whether On-Demand hibernation is supported\n (true | false).
\n hypervisor - The hypervisor (nitro | xen).
\n instance-storage-info.disk.count - The number of local disks.
\n instance-storage-info.disk.size-in-gb - The storage size of each instance\n storage disk, in GB.
\n instance-storage-info.disk.type - The storage technology for the local\n instance storage disks (hdd | ssd).
\n instance-storage-info.encryption-support - Indicates whether data is\n encrypted at rest (required | supported |\n unsupported).
\n instance-storage-info.nvme-support - Indicates whether non-volatile memory\n express (NVMe) is supported for instance store (required | supported\n | unsupported).
\n instance-storage-info.total-size-in-gb - The total amount of storage\n available from all local instance storage, in GB.
\n instance-storage-supported - Indicates whether the instance type has local\n instance storage (true | false).
\n instance-type - The instance type (for example c5.2xlarge or\n c5*).
\n memory-info.size-in-mib - The memory size.
\n network-info.bandwidth-weightings - For instances that support bandwidth \n weighting to boost performance (default, vpc-1, ebs-1).
\n network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic\n Fabric Adapters (EFAs) per instance.
\n network-info.efa-supported - Indicates whether the instance type supports\n Elastic Fabric Adapter (EFA) (true | false).
\n network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is\n supported or required (required | supported |\n unsupported).
\n network-info.encryption-in-transit-supported - Indicates whether the instance\n type automatically encrypts in-transit traffic between instances (true | false).
\n network-info.ipv4-addresses-per-interface - The maximum number of private\n IPv4 addresses per network interface.
\n network-info.ipv6-addresses-per-interface - The maximum number of private\n IPv6 addresses per network interface.
\n network-info.ipv6-supported - Indicates whether the instance type supports\n IPv6 (true | false).
\n network-info.maximum-network-cards - The maximum number of network cards per\n instance.
\n network-info.maximum-network-interfaces - The maximum number of network\n interfaces per instance.
\n network-info.network-performance - The network performance (for example, \"25\n Gigabit\").
\n nitro-enclaves-support - Indicates whether Nitro Enclaves is supported\n (supported | unsupported).
\n nitro-tpm-support - Indicates whether NitroTPM is supported\n (supported | unsupported).
\n nitro-tpm-info.supported-versions - The supported NitroTPM version\n (2.0).
\n processor-info.supported-architecture - The CPU architecture\n (arm64 | i386 | x86_64).
\n processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in\n GHz.
\n processor-info.supported-features - The supported CPU features\n (amd-sev-snp).
\n supported-boot-mode - The boot mode (legacy-bios |\n uefi).
\n supported-root-device-type - The root device type (ebs |\n instance-store).
\n supported-usage-class - The usage class (on-demand | spot | \n capacity-block).
\n supported-virtualization-type - The virtualization type (hvm |\n paravirtual).
\n vcpu-info.default-cores - The default number of cores for the instance\n type.
\n vcpu-info.default-threads-per-core - The default number of threads per core\n for the instance type.
\n vcpu-info.default-vcpus - The default number of vCPUs for the instance\n type.
\n vcpu-info.valid-cores - The number of cores that can be configured for the\n instance type.
\n vcpu-info.valid-threads-per-core - The number of threads per core that can be\n configured for the instance type. For example, \"1\" or \"1,2\".
One or more filters. Filter names and values are case-sensitive.
\n\n auto-recovery-supported - Indicates whether Amazon CloudWatch action\n based recovery is supported (true | false).
\n bare-metal - Indicates whether it is a bare metal instance type\n (true | false).
\n burstable-performance-supported - Indicates whether the instance type is a\n burstable performance T instance type (true | false).
\n current-generation - Indicates whether this instance type is the latest\n generation instance type of an instance family (true | false).
\n dedicated-hosts-supported - Indicates whether the instance type supports\n Dedicated Hosts. (true | false)
\n ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline\n bandwidth performance for an EBS-optimized instance type, in Mbps.
\n ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage\n operations per second for an EBS-optimized instance type.
\n ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline\n throughput performance for an EBS-optimized instance type, in MB/s.
\n ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth\n performance for an EBS-optimized instance type, in Mbps.
\n ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage\n operations per second for an EBS-optimized instance type.
\n ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum\n throughput performance for an EBS-optimized instance type, in MB/s.
\n ebs-info.ebs-optimized-support - Indicates whether the instance type is\n EBS-optimized (supported | unsupported |\n default).
\n ebs-info.encryption-support - Indicates whether EBS encryption is supported\n (supported | unsupported).
\n ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe)\n is supported for EBS volumes (required | supported |\n unsupported).
\n free-tier-eligible - Indicates whether the instance type is eligible to use\n in the free tier (true | false).
\n hibernation-supported - Indicates whether On-Demand hibernation is supported\n (true | false).
\n hypervisor - The hypervisor (nitro | xen).
\n instance-storage-info.disk.count - The number of local disks.
\n instance-storage-info.disk.size-in-gb - The storage size of each instance\n storage disk, in GB.
\n instance-storage-info.disk.type - The storage technology for the local\n instance storage disks (hdd | ssd).
\n instance-storage-info.encryption-support - Indicates whether data is\n encrypted at rest (required | supported |\n unsupported).
\n instance-storage-info.nvme-support - Indicates whether non-volatile memory\n express (NVMe) is supported for instance store (required | supported\n | unsupported).
\n instance-storage-info.total-size-in-gb - The total amount of storage\n available from all local instance storage, in GB.
\n instance-storage-supported - Indicates whether the instance type has local\n instance storage (true | false).
\n instance-type - The instance type (for example c5.2xlarge or\n c5*).
\n memory-info.size-in-mib - The memory size.
\n network-info.bandwidth-weightings - For instances that support bandwidth \n weighting to boost performance (default, vpc-1, ebs-1).
\n network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic\n Fabric Adapters (EFAs) per instance.
\n network-info.efa-supported - Indicates whether the instance type supports\n Elastic Fabric Adapter (EFA) (true | false).
\n network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is\n supported or required (required | supported |\n unsupported).
\n network-info.encryption-in-transit-supported - Indicates whether the instance\n type automatically encrypts in-transit traffic between instances (true | false).
\n network-info.ipv4-addresses-per-interface - The maximum number of private\n IPv4 addresses per network interface.
\n network-info.ipv6-addresses-per-interface - The maximum number of private\n IPv6 addresses per network interface.
\n network-info.ipv6-supported - Indicates whether the instance type supports\n IPv6 (true | false).
\n network-info.maximum-network-cards - The maximum number of network cards per\n instance.
\n network-info.maximum-network-interfaces - The maximum number of network\n interfaces per instance.
\n network-info.network-performance - The network performance (for example, \"25\n Gigabit\").
\n nitro-enclaves-support - Indicates whether Nitro Enclaves is supported\n (supported | unsupported).
\n nitro-tpm-support - Indicates whether NitroTPM is supported\n (supported | unsupported).
\n nitro-tpm-info.supported-versions - The supported NitroTPM version\n (2.0).
\n processor-info.supported-architecture - The CPU architecture\n (arm64 | i386 | x86_64).
\n processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in\n GHz.
\n processor-info.supported-features - The supported CPU features\n (amd-sev-snp).
\n supported-boot-mode - The boot mode (legacy-bios |\n uefi).
\n supported-root-device-type - The root device type (ebs |\n instance-store).
\n supported-usage-class - The usage class (on-demand | spot | \n capacity-block).
\n supported-virtualization-type - The virtualization type (hvm |\n paravirtual).
\n vcpu-info.default-cores - The default number of cores for the instance\n type.
\n vcpu-info.default-threads-per-core - The default number of threads per core\n for the instance type.
\n vcpu-info.default-vcpus - The default number of vCPUs for the instance\n type.
\n vcpu-info.valid-cores - The number of cores that can be configured for the\n instance type.
\n vcpu-info.valid-threads-per-core - The number of threads per core that can be\n configured for the instance type. For example, \"1\" or \"1,2\".
Describes one or more route server endpoints.
\nA route server endpoint is an Amazon Web Services-managed component inside a subnet that facilitates BGP (Border Gateway Protocol) connections between your route server and your BGP peers.
\nFor more information see Dynamic routing in your VPC with VPC Route Server in the Amazon VPC User Guide.
", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "RouteServerEndpoints", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.ec2#DescribeRouteServerEndpointsRequest": { + "type": "structure", + "members": { + "RouteServerEndpointIds": { + "target": "com.amazonaws.ec2#RouteServerEndpointIdsList", + "traits": { + "smithy.api#documentation": "The IDs of the route server endpoints to describe.
", + "smithy.api#xmlName": "RouteServerEndpointId" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "The token for the next page of results.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#RouteServerMaxResults", + "traits": { + "smithy.api#documentation": "The maximum number of results to return with a single call.
" + } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "One or more filters to apply to the describe request.
", + "smithy.api#xmlName": "Filter" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Information about the described route server endpoints.
", + "smithy.api#xmlName": "routeServerEndpointSet" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "NextToken", + "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are no more results to return.
Describes one or more route server peers.
\nA route server peer is a session between a route server endpoint and the device deployed in Amazon Web Services (such as a firewall appliance or other network security function running on an EC2 instance). The device must meet these requirements:
\nHave an elastic network interface in the VPC
\nSupport BGP (Border Gateway Protocol)
\nCan initiate BGP sessions
\nFor more information see Dynamic routing in your VPC with VPC Route Server in the Amazon VPC User Guide.
", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "RouteServerPeers", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.ec2#DescribeRouteServerPeersRequest": { + "type": "structure", + "members": { + "RouteServerPeerIds": { + "target": "com.amazonaws.ec2#RouteServerPeerIdsList", + "traits": { + "smithy.api#documentation": "The IDs of the route server peers to describe.
", + "smithy.api#xmlName": "RouteServerPeerId" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "The token for the next page of results.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#RouteServerMaxResults", + "traits": { + "smithy.api#documentation": "The maximum number of results to return with a single call.
" + } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "One or more filters to apply to the describe request.
", + "smithy.api#xmlName": "Filter" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Information about the described route server peers.
", + "smithy.api#xmlName": "routeServerPeerSet" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "NextToken", + "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are no more results to return.
Describes one or more route servers.
\nAmazon VPC Route Server simplifies routing for traffic between workloads that are deployed within a VPC and its internet gateways. With this feature, \nVPC Route Server dynamically updates VPC and internet gateway route tables with your preferred IPv4 or IPv6 routes to achieve routing fault tolerance for those workloads. This enables you to automatically reroute traffic within a VPC, which increases the manageability of VPC routing and interoperability with third-party workloads.
\nRoute server supports the follow route table types:
\nVPC route tables not associated with subnets
\nSubnet route tables
\nInternet gateway route tables
\nRoute server does not support route tables associated with virtual private gateways. To propagate routes into a transit gateway route table, use Transit Gateway Connect.
\nFor more information see Dynamic routing in your VPC with VPC Route Server in the Amazon VPC User Guide.
", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "RouteServers", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.ec2#DescribeRouteServersRequest": { + "type": "structure", + "members": { + "RouteServerIds": { + "target": "com.amazonaws.ec2#RouteServerIdsList", + "traits": { + "smithy.api#documentation": "The IDs of the route servers to describe.
", + "smithy.api#xmlName": "RouteServerId" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "The token for the next page of results.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#RouteServerMaxResults", + "traits": { + "smithy.api#documentation": "The maximum number of results to return with a single call.
" + } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "One or more filters to apply to the describe request.
", + "smithy.api#xmlName": "Filter" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Information about the described route servers.
", + "smithy.api#xmlName": "routeServerSet" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "NextToken", + "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are no more results to return.
Describes the progress of the AMI store tasks. You can describe the store tasks for\n specified AMIs. If you don't specify the AMIs, you get a paginated list of store tasks from\n the last 31 days.
\nFor each AMI task, the response indicates if the task is InProgress,\n Completed, or Failed. For tasks InProgress, the\n response shows the estimated progress as a percentage.
Tasks are listed in reverse chronological order. Currently, only tasks from the past 31\n days can be viewed.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon EC2 User Guide.
\nFor more information, see Store and restore an AMI using\n Amazon S3 in the Amazon EC2 User Guide.
", + "smithy.api#documentation": "Describes the progress of the AMI store tasks. You can describe the store tasks for\n specified AMIs. If you don't specify the AMIs, you get a paginated list of store tasks from\n the last 31 days.
\nFor each AMI task, the response indicates if the task is InProgress,\n Completed, or Failed. For tasks InProgress, the\n response shows the estimated progress as a percentage.
Tasks are listed in reverse chronological order. Currently, only tasks from the past 31\n days can be viewed.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon EC2 User Guide.
\nFor more information, see Store and restore an AMI using\n Amazon S3 in the Amazon EC2 User Guide.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -41024,7 +41769,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "The filters.
\n\n task-state - Returns tasks in a certain state (InProgress |\n Completed | Failed)
\n bucket - Returns task information for tasks that targeted a specific\n bucket. For the filter value, specify the bucket name.
When you specify the ImageIds parameter, any filters that you specify are\n ignored. To use the filters, you must remove the ImageIds parameter.
The filters.
\n\n task-state - Returns tasks in a certain state (InProgress |\n Completed | Failed)
\n bucket - Returns task information for tasks that targeted a specific\n bucket. For the filter value, specify the bucket name.
When you specify the ImageIds parameter, any filters that you specify are\n ignored. To use the filters, you must remove the ImageIds parameter.
Disables Allowed AMIs for your account in the specified Amazon Web Services Region. When set to\n disabled, the image criteria in your Allowed AMIs settings do not apply, and no\n restrictions are placed on AMI discoverability or usage. Users in your account can launch\n instances using any public AMI or AMI shared with your account.
The Allowed AMIs feature does not restrict the AMIs owned by your account. Regardless of\n the criteria you set, the AMIs created by your account will always be discoverable and\n usable by users in your account.
\nFor more information, see Control the discovery and use of AMIs in\n Amazon EC2 with Allowed AMIs in\n Amazon EC2 User Guide.
" + "smithy.api#documentation": "Disables Allowed AMIs for your account in the specified Amazon Web Services Region. When set to\n disabled, the image criteria in your Allowed AMIs settings do not apply, and no\n restrictions are placed on AMI discoverability or usage. Users in your account can launch\n instances using any public AMI or AMI shared with your account.
The Allowed AMIs feature does not restrict the AMIs owned by your account. Regardless of\n the criteria you set, the AMIs created by your account will always be discoverable and\n usable by users in your account.
\nFor more information, see Control the discovery and use of AMIs in\n Amazon EC2 with Allowed AMIs in\n Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#DisableAllowedImagesSettingsRequest": { @@ -46255,7 +47000,7 @@ "target": "com.amazonaws.ec2#DisableImageResult" }, "traits": { - "smithy.api#documentation": "Sets the AMI state to disabled and removes all launch permissions from the\n AMI. A disabled AMI can't be used for instance launches.
A disabled AMI can't be shared. If an AMI was public or previously shared, it is made\n private. If an AMI was shared with an Amazon Web Services account, organization, or Organizational Unit,\n they lose access to the disabled AMI.
\nA disabled AMI does not appear in DescribeImages API calls by\n default.
\nOnly the AMI owner can disable an AMI.
\nYou can re-enable a disabled AMI using EnableImage.
\nFor more information, see Disable an AMI in the\n Amazon EC2 User Guide.
" + "smithy.api#documentation": "Sets the AMI state to disabled and removes all launch permissions from the\n AMI. A disabled AMI can't be used for instance launches.
A disabled AMI can't be shared. If an AMI was public or previously shared, it is made\n private. If an AMI was shared with an Amazon Web Services account, organization, or Organizational Unit,\n they lose access to the disabled AMI.
\nA disabled AMI does not appear in DescribeImages API calls by\n default.
\nOnly the AMI owner can disable an AMI.
\nYou can re-enable a disabled AMI using EnableImage.
\nFor more information, see Disable an AMI in the\n Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#DisableImageBlockPublicAccess": { @@ -46267,7 +47012,7 @@ "target": "com.amazonaws.ec2#DisableImageBlockPublicAccessResult" }, "traits": { - "smithy.api#documentation": "Disables block public access for AMIs at the account level in the\n specified Amazon Web Services Region. This removes the block public access restriction\n from your account. With the restriction removed, you can publicly share your AMIs in the\n specified Amazon Web Services Region.
\nThe API can take up to 10 minutes to configure this setting. During this time, if you run\n GetImageBlockPublicAccessState, the response will be\n block-new-sharing. When the API has completed the configuration, the response\n will be unblocked.
For more information, see Block\n public access to your AMIs in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "Disables block public access for AMIs at the account level in the\n specified Amazon Web Services Region. This removes the block public access restriction\n from your account. With the restriction removed, you can publicly share your AMIs in the\n specified Amazon Web Services Region.
\nThe API can take up to 10 minutes to configure this setting. During this time, if you run\n GetImageBlockPublicAccessState, the response will be\n block-new-sharing. When the API has completed the configuration, the response\n will be unblocked.
For more information, see Block\n public access to your AMIs in the Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#DisableImageBlockPublicAccessRequest": { @@ -46309,7 +47054,7 @@ "target": "com.amazonaws.ec2#DisableImageDeprecationResult" }, "traits": { - "smithy.api#documentation": "Cancels the deprecation of the specified AMI.
\nFor more information, see Deprecate an AMI in the\n Amazon EC2 User Guide.
" + "smithy.api#documentation": "Cancels the deprecation of the specified AMI.
\nFor more information, see Deprecate an AMI in the\n Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#DisableImageDeprecationRequest": { @@ -46359,7 +47104,7 @@ "target": "com.amazonaws.ec2#DisableImageDeregistrationProtectionResult" }, "traits": { - "smithy.api#documentation": "Disables deregistration protection for an AMI. When deregistration protection is disabled,\n the AMI can be deregistered.
\nIf you chose to include a 24-hour cooldown period when you enabled deregistration\n protection for the AMI, then, when you disable deregistration protection, you won’t\n immediately be able to deregister the AMI.
\nFor more information, see Protect an\n AMI from deregistration in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "Disables deregistration protection for an AMI. When deregistration protection is disabled,\n the AMI can be deregistered.
\nIf you chose to include a 24-hour cooldown period when you enabled deregistration\n protection for the AMI, then, when you disable deregistration protection, you won’t\n immediately be able to deregister the AMI.
\nFor more information, see Protect an\n AMI from deregistration in the Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#DisableImageDeregistrationProtectionRequest": { @@ -46488,6 +47233,64 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#DisableRouteServerPropagation": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#DisableRouteServerPropagationRequest" + }, + "output": { + "target": "com.amazonaws.ec2#DisableRouteServerPropagationResult" + }, + "traits": { + "smithy.api#documentation": "Disables route propagation from a route server to a specified route table.
\nWhen enabled, route server propagation installs the routes in the FIB on the route table you've specified. Route server supports IPv4 and IPv6 route propagation.
\nAmazon VPC Route Server simplifies routing for traffic between workloads that are deployed within a VPC and its internet gateways. With this feature, \nVPC Route Server dynamically updates VPC and internet gateway route tables with your preferred IPv4 or IPv6 routes to achieve routing fault tolerance for those workloads. This enables you to automatically reroute traffic within a VPC, which increases the manageability of VPC routing and interoperability with third-party workloads.
\nRoute server supports the follow route table types:
\nVPC route tables not associated with subnets
\nSubnet route tables
\nInternet gateway route tables
\nRoute server does not support route tables associated with virtual private gateways. To propagate routes into a transit gateway route table, use Transit Gateway Connect.
\nFor more information see Dynamic routing in your VPC with VPC Route Server in the Amazon VPC User Guide.
" + } + }, + "com.amazonaws.ec2#DisableRouteServerPropagationRequest": { + "type": "structure", + "members": { + "RouteServerId": { + "target": "com.amazonaws.ec2#RouteServerId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the route server for which to disable propagation.
", + "smithy.api#required": {} + } + }, + "RouteTableId": { + "target": "com.amazonaws.ec2#RouteTableId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the route table for which to disable route server propagation.
", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Information about the disabled route server propagation.
", + "smithy.api#xmlName": "routeServerPropagation" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#DisableSerialConsoleAccess": { "type": "operation", "input": { @@ -47318,6 +48121,64 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#DisassociateRouteServer": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#DisassociateRouteServerRequest" + }, + "output": { + "target": "com.amazonaws.ec2#DisassociateRouteServerResult" + }, + "traits": { + "smithy.api#documentation": "Disassociates a route server from a VPC.
\nA route server association is the connection established between a route server and a VPC.
\nFor more information see Dynamic routing in your VPC with VPC Route Server in the Amazon VPC User Guide.
" + } + }, + "com.amazonaws.ec2#DisassociateRouteServerRequest": { + "type": "structure", + "members": { + "RouteServerId": { + "target": "com.amazonaws.ec2#RouteServerId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the route server to disassociate.
", + "smithy.api#required": {} + } + }, + "VpcId": { + "target": "com.amazonaws.ec2#VpcId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the VPC to disassociate from the route server.
", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Information about the disassociated route server.
", + "smithy.api#xmlName": "routeServerAssociation" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#DisassociateRouteTable": { "type": "operation", "input": { @@ -49342,13 +50203,13 @@ "EnaSrdEnabled": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "Specifies whether ENA Express is enabled for the network interface when you \n\t\t\tlaunch an instance from your launch template.
" + "smithy.api#documentation": "Specifies whether ENA Express is enabled for the network interface when you \n\t\t\tlaunch an instance.
" } }, "EnaSrdUdpSpecification": { "target": "com.amazonaws.ec2#EnaSrdUdpSpecificationRequest", "traits": { - "smithy.api#documentation": "Contains ENA Express settings for UDP network traffic in your launch template.
" + "smithy.api#documentation": "Contains ENA Express settings for UDP network traffic for the network interface \n\t attached to the instance.
" } } }, @@ -49379,7 +50240,7 @@ "EnaSrdUdpEnabled": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "Indicates whether UDP traffic uses ENA Express for your instance. To ensure that \n\t\t\tUDP traffic can use ENA Express when you launch an instance, you must also set \n\t\t\tEnaSrdEnabled in the EnaSrdSpecificationRequest to true in your \n\t\t\tlaunch template.
Indicates whether UDP traffic uses ENA Express for your instance. To ensure that \n\t\t\tUDP traffic can use ENA Express when you launch an instance, you must also set \n\t\t\tEnaSrdEnabled in the EnaSrdSpecificationRequest to true.
Enables Allowed AMIs for your account in the specified Amazon Web Services Region. Two values are\n accepted:
\n\n enabled: The image criteria in your Allowed AMIs settings are applied. As\n a result, only AMIs matching these criteria are discoverable and can be used by your\n account to launch instances.
\n audit-mode: The image criteria in your Allowed AMIs settings are not\n applied. No restrictions are placed on AMI discoverability or usage. Users in your account\n can launch instances using any public AMI or AMI shared with your account.
The purpose of audit-mode is to indicate which AMIs will be affected when\n Allowed AMIs is enabled. In audit-mode, each AMI displays either\n \"ImageAllowed\": true or \"ImageAllowed\": false to indicate\n whether the AMI will be discoverable and available to users in the account when Allowed\n AMIs is enabled.
The Allowed AMIs feature does not restrict the AMIs owned by your account. Regardless of\n the criteria you set, the AMIs created by your account will always be discoverable and\n usable by users in your account.
\nFor more information, see Control the discovery and use of AMIs in\n Amazon EC2 with Allowed AMIs in\n Amazon EC2 User Guide.
" + "smithy.api#documentation": "Enables Allowed AMIs for your account in the specified Amazon Web Services Region. Two values are\n accepted:
\n\n enabled: The image criteria in your Allowed AMIs settings are applied. As\n a result, only AMIs matching these criteria are discoverable and can be used by your\n account to launch instances.
\n audit-mode: The image criteria in your Allowed AMIs settings are not\n applied. No restrictions are placed on AMI discoverability or usage. Users in your account\n can launch instances using any public AMI or AMI shared with your account.
The purpose of audit-mode is to indicate which AMIs will be affected when\n Allowed AMIs is enabled. In audit-mode, each AMI displays either\n \"ImageAllowed\": true or \"ImageAllowed\": false to indicate\n whether the AMI will be discoverable and available to users in the account when Allowed\n AMIs is enabled.
The Allowed AMIs feature does not restrict the AMIs owned by your account. Regardless of\n the criteria you set, the AMIs created by your account will always be discoverable and\n usable by users in your account.
\nFor more information, see Control the discovery and use of AMIs in\n Amazon EC2 with Allowed AMIs in\n Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#EnableAllowedImagesSettingsRequest": { @@ -50036,7 +50897,7 @@ "target": "com.amazonaws.ec2#EnableImageResult" }, "traits": { - "smithy.api#documentation": "Re-enables a disabled AMI. The re-enabled AMI is marked as available and can\n be used for instance launches, appears in describe operations, and can be shared. Amazon Web Services\n accounts, organizations, and Organizational Units that lost access to the AMI when it was\n disabled do not regain access automatically. Once the AMI is available, it can be shared with\n them again.
Only the AMI owner can re-enable a disabled AMI.
\nFor more information, see Disable an AMI in the\n Amazon EC2 User Guide.
" + "smithy.api#documentation": "Re-enables a disabled AMI. The re-enabled AMI is marked as available and can\n be used for instance launches, appears in describe operations, and can be shared. Amazon Web Services\n accounts, organizations, and Organizational Units that lost access to the AMI when it was\n disabled do not regain access automatically. Once the AMI is available, it can be shared with\n them again.
Only the AMI owner can re-enable a disabled AMI.
\nFor more information, see Disable an AMI in the\n Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#EnableImageBlockPublicAccess": { @@ -50048,7 +50909,7 @@ "target": "com.amazonaws.ec2#EnableImageBlockPublicAccessResult" }, "traits": { - "smithy.api#documentation": "Enables block public access for AMIs at the account level in the\n specified Amazon Web Services Region. This prevents the public sharing of your AMIs. However, if you already\n have public AMIs, they will remain publicly available.
\nThe API can take up to 10 minutes to configure this setting. During this time, if you run\n GetImageBlockPublicAccessState, the response will be unblocked. When\n the API has completed the configuration, the response will be\n block-new-sharing.
For more information, see Block\n public access to your AMIs in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "Enables block public access for AMIs at the account level in the\n specified Amazon Web Services Region. This prevents the public sharing of your AMIs. However, if you already\n have public AMIs, they will remain publicly available.
\nThe API can take up to 10 minutes to configure this setting. During this time, if you run\n GetImageBlockPublicAccessState, the response will be unblocked. When\n the API has completed the configuration, the response will be\n block-new-sharing.
For more information, see Block\n public access to your AMIs in the Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#EnableImageBlockPublicAccessRequest": { @@ -50098,7 +50959,7 @@ "target": "com.amazonaws.ec2#EnableImageDeprecationResult" }, "traits": { - "smithy.api#documentation": "Enables deprecation of the specified AMI at the specified date and time.
\nFor more information, see Deprecate an AMI in the\n Amazon EC2 User Guide.
" + "smithy.api#documentation": "Enables deprecation of the specified AMI at the specified date and time.
\nFor more information, see Deprecate an AMI in the\n Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#EnableImageDeprecationRequest": { @@ -50116,7 +50977,7 @@ "target": "com.amazonaws.ec2#MillisecondDateTime", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The date and time to deprecate the AMI, in UTC, in the following format:\n YYYY-MM-DDTHH:MM:SSZ.\n If you specify a value for seconds, Amazon EC2 rounds the seconds to the nearest minute.
\nYou can’t specify a date in the past. The upper limit for DeprecateAt is 10\n years from now, except for public AMIs, where the upper limit is 2 years from the creation\n date.
The date and time to deprecate the AMI, in UTC, in the following format:\n YYYY-MM-DDTHH:MM:SSZ.\n If you specify a value for seconds, Amazon EC2 rounds the seconds to the nearest minute.
\nYou can’t specify a date in the past. The upper limit for DeprecateAt is 10\n years from now, except for public AMIs, where the upper limit is 2 years from the creation\n date.
Enables deregistration protection for an AMI. When deregistration protection is enabled,\n the AMI can't be deregistered.
\nTo allow the AMI to be deregistered, you must first disable deregistration protection\n using DisableImageDeregistrationProtection.
\nFor more information, see Protect an\n AMI from deregistration in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "Enables deregistration protection for an AMI. When deregistration protection is enabled,\n the AMI can't be deregistered.
\nTo allow the AMI to be deregistered, you must first disable deregistration protection\n using DisableImageDeregistrationProtection.
\nFor more information, see Protect an\n AMI from deregistration in the Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#EnableImageDeregistrationProtectionRequest": { @@ -50333,6 +51194,64 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#EnableRouteServerPropagation": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#EnableRouteServerPropagationRequest" + }, + "output": { + "target": "com.amazonaws.ec2#EnableRouteServerPropagationResult" + }, + "traits": { + "smithy.api#documentation": "Defines which route tables the route server can update with routes.
\nWhen enabled, route server propagation installs the routes in the FIB on the route table you've specified. Route server supports IPv4 and IPv6 route propagation.
\nFor more information see Dynamic routing in your VPC with VPC Route Server in the Amazon VPC User Guide.
" + } + }, + "com.amazonaws.ec2#EnableRouteServerPropagationRequest": { + "type": "structure", + "members": { + "RouteServerId": { + "target": "com.amazonaws.ec2#RouteServerId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the route server for which to enable propagation.
", + "smithy.api#required": {} + } + }, + "RouteTableId": { + "target": "com.amazonaws.ec2#RouteTableId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the route table to which route server will propagate routes.
", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Information about the enabled route server propagation.
", + "smithy.api#xmlName": "routeServerPropagation" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#EnableSerialConsoleAccess": { "type": "operation", "input": { @@ -52249,7 +53168,7 @@ } }, "traits": { - "smithy.api#documentation": "Request to create a launch template for a Windows fast launch enabled AMI.
\nNote - You can specify either the LaunchTemplateName or the\n LaunchTemplateId, but not both.
Request to create a launch template for a Windows fast launch enabled AMI.
\nNote - You can specify either the LaunchTemplateName or the\n LaunchTemplateId, but not both.
Gets the current state of the Allowed AMIs setting and the list of Allowed AMIs criteria\n at the account level in the specified Region.
\nThe Allowed AMIs feature does not restrict the AMIs owned by your account. Regardless of\n the criteria you set, the AMIs created by your account will always be discoverable and\n usable by users in your account.
\nFor more information, see Control the discovery and use of AMIs in\n Amazon EC2 with Allowed AMIs in\n Amazon EC2 User Guide.
" + "smithy.api#documentation": "Gets the current state of the Allowed AMIs setting and the list of Allowed AMIs criteria\n at the account level in the specified Region.
\nThe Allowed AMIs feature does not restrict the AMIs owned by your account. Regardless of\n the criteria you set, the AMIs created by your account will always be discoverable and\n usable by users in your account.
\nFor more information, see Control the discovery and use of AMIs in\n Amazon EC2 with Allowed AMIs in\n Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#GetAllowedImagesSettingsRequest": { @@ -55415,7 +56334,7 @@ "target": "com.amazonaws.ec2#GetImageBlockPublicAccessStateResult" }, "traits": { - "smithy.api#documentation": "Gets the current state of block public access for AMIs at the account\n level in the specified Amazon Web Services Region.
\nFor more information, see Block\n public access to your AMIs in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "Gets the current state of block public access for AMIs at the account\n level in the specified Amazon Web Services Region.
\nFor more information, see Block\n public access to your AMIs in the Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#GetImageBlockPublicAccessStateRequest": { @@ -57049,6 +57968,197 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#GetRouteServerAssociations": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#GetRouteServerAssociationsRequest" + }, + "output": { + "target": "com.amazonaws.ec2#GetRouteServerAssociationsResult" + }, + "traits": { + "smithy.api#documentation": "Gets information about the associations for the specified route server.
\nA route server association is the connection established between a route server and a VPC.
\nFor more information see Dynamic routing in your VPC with VPC Route Server in the Amazon VPC User Guide.
" + } + }, + "com.amazonaws.ec2#GetRouteServerAssociationsRequest": { + "type": "structure", + "members": { + "RouteServerId": { + "target": "com.amazonaws.ec2#RouteServerId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the route server for which to get association information.
", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Information about the associations for the specified route server.
", + "smithy.api#xmlName": "routeServerAssociationSet" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ec2#GetRouteServerPropagations": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#GetRouteServerPropagationsRequest" + }, + "output": { + "target": "com.amazonaws.ec2#GetRouteServerPropagationsResult" + }, + "traits": { + "smithy.api#documentation": "Gets information about the route propagations for the specified route server.
\nWhen enabled, route server propagation installs the routes in the FIB on the route table you've specified. Route server supports IPv4 and IPv6 route propagation.
\nAmazon VPC Route Server simplifies routing for traffic between workloads that are deployed within a VPC and its internet gateways. With this feature, \nVPC Route Server dynamically updates VPC and internet gateway route tables with your preferred IPv4 or IPv6 routes to achieve routing fault tolerance for those workloads. This enables you to automatically reroute traffic within a VPC, which increases the manageability of VPC routing and interoperability with third-party workloads.
\nRoute server supports the follow route table types:
\nVPC route tables not associated with subnets
\nSubnet route tables
\nInternet gateway route tables
\nRoute server does not support route tables associated with virtual private gateways. To propagate routes into a transit gateway route table, use Transit Gateway Connect.
" + } + }, + "com.amazonaws.ec2#GetRouteServerPropagationsRequest": { + "type": "structure", + "members": { + "RouteServerId": { + "target": "com.amazonaws.ec2#RouteServerId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the route server for which to get propagation information.
", + "smithy.api#required": {} + } + }, + "RouteTableId": { + "target": "com.amazonaws.ec2#RouteTableId", + "traits": { + "smithy.api#documentation": "The ID of the route table for which to get propagation information.
" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Information about the route propagations for the specified route server.
", + "smithy.api#xmlName": "routeServerPropagationSet" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.ec2#GetRouteServerRoutingDatabase": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#GetRouteServerRoutingDatabaseRequest" + }, + "output": { + "target": "com.amazonaws.ec2#GetRouteServerRoutingDatabaseResult" + }, + "traits": { + "smithy.api#documentation": "Gets the routing database for the specified route server. The Routing Information Base (RIB) serves as a database that stores all the routing information and network topology data collected by a router or routing system, such as routes learned from BGP peers. The RIB is constantly updated as new routing information is received or existing routes change. This ensures that the route server always has the most current view of the network topology and can make optimal routing decisions.
\nAmazon VPC Route Server simplifies routing for traffic between workloads that are deployed within a VPC and its internet gateways. With this feature, \nVPC Route Server dynamically updates VPC and internet gateway route tables with your preferred IPv4 or IPv6 routes to achieve routing fault tolerance for those workloads. This enables you to automatically reroute traffic within a VPC, which increases the manageability of VPC routing and interoperability with third-party workloads.
\nRoute server supports the follow route table types:
\nVPC route tables not associated with subnets
\nSubnet route tables
\nInternet gateway route tables
\nRoute server does not support route tables associated with virtual private gateways. To propagate routes into a transit gateway route table, use Transit Gateway Connect.
" + } + }, + "com.amazonaws.ec2#GetRouteServerRoutingDatabaseRequest": { + "type": "structure", + "members": { + "RouteServerId": { + "target": "com.amazonaws.ec2#RouteServerId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the route server for which to get the routing database.
", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "The token for the next page of results.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#RouteServerMaxResults", + "traits": { + "smithy.api#documentation": "The maximum number of routing database entries to return in a single response.
" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Filters to apply to the routing database query.
", + "smithy.api#xmlName": "Filter" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#GetRouteServerRoutingDatabaseResult": { + "type": "structure", + "members": { + "AreRoutesPersisted": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "AreRoutesPersisted", + "smithy.api#documentation": "Indicates whether routes are being persisted in the routing database.
", + "smithy.api#xmlName": "areRoutesPersisted" + } + }, + "Routes": { + "target": "com.amazonaws.ec2#RouteServerRouteList", + "traits": { + "aws.protocols#ec2QueryName": "RouteSet", + "smithy.api#documentation": "The collection of routes in the route server's routing database.
", + "smithy.api#xmlName": "routeSet" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "NextToken", + "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null when there are no more results to return.
If true, the AMI satisfies the criteria for Allowed AMIs and can be\n discovered and used in the account. If false and Allowed AMIs is set to\n enabled, the AMI can't be discovered or used in the account. If\n false and Allowed AMIs is set to audit-mode, the AMI can be\n discovered and used in the account.
For more information, see Control the discovery and use of AMIs in\n Amazon EC2 with Allowed AMIs in\n Amazon EC2 User Guide.
", + "smithy.api#documentation": "If true, the AMI satisfies the criteria for Allowed AMIs and can be\n discovered and used in the account. If false and Allowed AMIs is set to\n enabled, the AMI can't be discovered or used in the account. If\n false and Allowed AMIs is set to audit-mode, the AMI can be\n discovered and used in the account.
For more information, see Control the discovery and use of AMIs in\n Amazon EC2 with Allowed AMIs in\n Amazon EC2 User Guide.
", "smithy.api#xmlName": "imageAllowed" } }, @@ -60119,7 +61229,7 @@ } }, "traits": { - "smithy.api#documentation": "The list of criteria that are evaluated to determine whch AMIs are discoverable and usable\n in the account in the specified Amazon Web Services Region. Currently, the only criteria that can be\n specified are AMI providers.
\nUp to 10 imageCriteria objects can be specified, and up to a total of 200\n values for all imageProviders. For more information, see JSON\n configuration for the Allowed AMIs criteria in the\n Amazon EC2 User Guide.
The list of criteria that are evaluated to determine whch AMIs are discoverable and usable\n in the account in the specified Amazon Web Services Region. Currently, the only criteria that can be\n specified are AMI providers.
\nUp to 10 imageCriteria objects can be specified, and up to a total of 200\n values for all imageProviders. For more information, see JSON\n configuration for the Allowed AMIs criteria in the\n Amazon EC2 User Guide.
The list of criteria that are evaluated to determine whch AMIs are discoverable and usable\n in the account in the specified Amazon Web Services Region. Currently, the only criteria that can be\n specified are AMI providers.
\nUp to 10 imageCriteria objects can be specified, and up to a total of 200\n values for all imageProviders. For more information, see JSON\n configuration for the Allowed AMIs criteria in the\n Amazon EC2 User Guide.
The list of criteria that are evaluated to determine whch AMIs are discoverable and usable\n in the account in the specified Amazon Web Services Region. Currently, the only criteria that can be\n specified are AMI providers.
\nUp to 10 imageCriteria objects can be specified, and up to a total of 200\n values for all imageProviders. For more information, see JSON\n configuration for the Allowed AMIs criteria in the\n Amazon EC2 User Guide.
The alias of the AMI owner.
\nValid values: amazon | aws-backup-vault |\n aws-marketplace\n
The alias of the AMI owner.
\nValid values: amazon | aws-backup-vault |\n aws-marketplace\n
The deprecation date and time of the AMI, in UTC, in the following format:\n YYYY-MM-DDTHH:MM:SSZ.
", + "smithy.api#documentation": "The deprecation date and time of the AMI, in UTC, in the following format:\n YYYY-MM-DDTHH:MM:SSZ.
", "smithy.api#xmlName": "deprecationTime" } }, @@ -76338,7 +77448,7 @@ "DeviceIndex": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "The device index for the network interface attachment. Each network interface requires\n a device index. If you create a launch template that includes secondary network interfaces \n but not a primary network interface, then you must add a primary network interface as a \n launch parameter when you launch an instance from the template.
" + "smithy.api#documentation": "The device index for the network interface attachment. The primary network interface\n has a device index of 0. Each network interface is of type interface, you\n must specify a device index. If you create a launch template that includes secondary \n network interfaces but not a primary network interface, then you must add a primary \n network interface as a launch parameter when you launch an instance from the template.
Lists one or more AMIs that are currently in the Recycle Bin. For more information, see\n Recycle\n Bin in the Amazon EC2 User Guide.
", + "smithy.api#documentation": "Lists one or more AMIs that are currently in the Recycle Bin. For more information, see\n Recycle\n Bin in the Amazon EC2 User Guide.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -80244,7 +81354,7 @@ "target": "com.amazonaws.ec2#InstanceAttributeName", "traits": { "aws.protocols#ec2QueryName": "Attribute", - "smithy.api#documentation": "The name of the attribute to modify.
\nYou can modify the following attributes only: disableApiTermination |\n instanceType | kernel | ramdisk |\n instanceInitiatedShutdownBehavior | blockDeviceMapping\n | userData | sourceDestCheck | groupSet |\n ebsOptimized | sriovNetSupport |\n enaSupport | nvmeSupport | disableApiStop\n | enclaveOptions\n
The name of the attribute to modify.
\nWhen changing the instance type: If the original instance type is configured for\n configurable bandwidth, and the desired instance type doesn't support configurable\n bandwidth, first set the existing bandwidth configuration to default\n using the ModifyInstanceNetworkPerformanceOptions\n operation.
You can modify the following attributes only: disableApiTermination |\n instanceType | kernel | ramdisk |\n instanceInitiatedShutdownBehavior | blockDeviceMapping\n | userData | sourceDestCheck | groupSet |\n ebsOptimized | sriovNetSupport |\n enaSupport | nvmeSupport | disableApiStop\n | enclaveOptions\n
Modifies the configuration of an existing route server.
\nAmazon VPC Route Server simplifies routing for traffic between workloads that are deployed within a VPC and its internet gateways. With this feature, \nVPC Route Server dynamically updates VPC and internet gateway route tables with your preferred IPv4 or IPv6 routes to achieve routing fault tolerance for those workloads. This enables you to automatically reroute traffic within a VPC, which increases the manageability of VPC routing and interoperability with third-party workloads.
\nRoute server supports the follow route table types:
\nVPC route tables not associated with subnets
\nSubnet route tables
\nInternet gateway route tables
\nRoute server does not support route tables associated with virtual private gateways. To propagate routes into a transit gateway route table, use Transit Gateway Connect.
\nFor more information see Dynamic routing in your VPC with VPC Route Server in the Amazon VPC User Guide.
" + } + }, + "com.amazonaws.ec2#ModifyRouteServerRequest": { + "type": "structure", + "members": { + "RouteServerId": { + "target": "com.amazonaws.ec2#RouteServerId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The ID of the route server to modify.
", + "smithy.api#required": {} + } + }, + "PersistRoutes": { + "target": "com.amazonaws.ec2#RouteServerPersistRoutesAction", + "traits": { + "smithy.api#documentation": "Specifies whether to persist routes after all BGP sessions are terminated.
\nenable: Routes will be persisted in FIB and RIB after all BGP sessions are terminated.
\ndisable: Routes will not be persisted in FIB and RIB after all BGP sessions are terminated.
\nreset: If a route server has persisted routes due to all BGP sessions having ended, reset will withdraw all routes and reset route server to an empty FIB and RIB.
\nThe number of minutes a route server will wait after BGP is re-established to unpersist the routes in the FIB and RIB. Value must be in the range of 1-5. Required if PersistRoutes is enabled.
If you set the duration to 1 minute, then when your network appliance re-establishes BGP with route server, it has 1 minute to relearn it's adjacent network and advertise those routes to route server before route server resumes normal functionality. In most cases, 1 minute is probably sufficient. If, however, you have concerns that your BGP network may not be capable of fully re-establishing and re-learning everything in 1 minute, you can increase the duration up to 5 minutes.
" + } + }, + "SnsNotificationsEnabled": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "Specifies whether to enable SNS notifications for route server events. Enabling SNS notifications persists BGP status changes to an SNS topic provisioned by Amazon Web Services.
" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "A check for whether you have the required permissions for the action without actually making the request \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.
Information about the modified route server.
", + "smithy.api#xmlName": "routeServer" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#ModifySecurityGroupRules": { "type": "operation", "input": { @@ -92922,7 +94100,7 @@ "target": "com.amazonaws.ec2#ReplaceImageCriteriaInAllowedImagesSettingsResult" }, "traits": { - "smithy.api#documentation": "Sets or replaces the criteria for Allowed AMIs.
\nThe Allowed AMIs feature does not restrict the AMIs owned by your account. Regardless of\n the criteria you set, the AMIs created by your account will always be discoverable and\n usable by users in your account.
\nFor more information, see Control the discovery and use of AMIs in\n Amazon EC2 with Allowed AMIs in\n Amazon EC2 User Guide.
" + "smithy.api#documentation": "Sets or replaces the criteria for Allowed AMIs.
\nThe Allowed AMIs feature does not restrict the AMIs owned by your account. Regardless of\n the criteria you set, the AMIs created by your account will always be discoverable and\n usable by users in your account.
\nFor more information, see Control the discovery and use of AMIs in\n Amazon EC2 with Allowed AMIs in\n Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#ReplaceImageCriteriaInAllowedImagesSettingsRequest": { @@ -96598,6 +97776,24 @@ "smithy.api#enumValue": "vpc-block-public-access-exclusion" } }, + "route_server": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "route-server" + } + }, + "route_server_endpoint": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "route-server-endpoint" + } + }, + "route_server_peer": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "route-server-peer" + } + }, "ipam_resource_discovery": { "target": "smithy.api#Unit", "traits": { @@ -97997,6 +99193,941 @@ } } }, + "com.amazonaws.ec2#RouteServer": { + "type": "structure", + "members": { + "RouteServerId": { + "target": "com.amazonaws.ec2#RouteServerId", + "traits": { + "aws.protocols#ec2QueryName": "RouteServerId", + "smithy.api#documentation": "The unique identifier of the route server.
", + "smithy.api#xmlName": "routeServerId" + } + }, + "AmazonSideAsn": { + "target": "com.amazonaws.ec2#Long", + "traits": { + "aws.protocols#ec2QueryName": "AmazonSideAsn", + "smithy.api#documentation": "The Border Gateway Protocol (BGP) Autonomous System Number (ASN) for the appliance. Valid values are from 1 to 4294967295. We recommend using a private ASN in the 64512–65534 (16-bit ASN) or 4200000000–4294967294 (32-bit ASN) range.
", + "smithy.api#xmlName": "amazonSideAsn" + } + }, + "State": { + "target": "com.amazonaws.ec2#RouteServerState", + "traits": { + "aws.protocols#ec2QueryName": "State", + "smithy.api#documentation": "The current state of the route server.
", + "smithy.api#xmlName": "state" + } + }, + "Tags": { + "target": "com.amazonaws.ec2#TagList", + "traits": { + "aws.protocols#ec2QueryName": "TagSet", + "smithy.api#documentation": "Any tags assigned to the route server.
", + "smithy.api#xmlName": "tagSet" + } + }, + "PersistRoutesState": { + "target": "com.amazonaws.ec2#RouteServerPersistRoutesState", + "traits": { + "aws.protocols#ec2QueryName": "PersistRoutesState", + "smithy.api#documentation": "The current state of route persistence for the route server.
", + "smithy.api#xmlName": "persistRoutesState" + } + }, + "PersistRoutesDuration": { + "target": "com.amazonaws.ec2#BoxedLong", + "traits": { + "aws.protocols#ec2QueryName": "PersistRoutesDuration", + "smithy.api#documentation": "The number of minutes a route server will wait after BGP is re-established to unpersist the routes in the FIB and RIB. Value must be in the range of 1-5. The default value is 1. Only valid if persistRoutesState is 'enabled'.
If you set the duration to 1 minute, then when your network appliance re-establishes BGP with route server, it has 1 minute to relearn it's adjacent network and advertise those routes to route server before route server resumes normal functionality. In most cases, 1 minute is probably sufficient. If, however, you have concerns that your BGP network may not be capable of fully re-establishing and re-learning everything in 1 minute, you can increase the duration up to 5 minutes.
", + "smithy.api#xmlName": "persistRoutesDuration" + } + }, + "SnsNotificationsEnabled": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "SnsNotificationsEnabled", + "smithy.api#documentation": "Indicates whether SNS notifications are enabled for the route server. Enabling SNS notifications persists BGP status changes to an SNS topic provisioned by Amazon Web Services.
", + "smithy.api#xmlName": "snsNotificationsEnabled" + } + }, + "SnsTopicArn": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "SnsTopicArn", + "smithy.api#documentation": "The ARN of the SNS topic where notifications are published.
", + "smithy.api#xmlName": "snsTopicArn" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes a route server and its configuration.
\nAmazon VPC Route Server simplifies routing for traffic between workloads that are deployed within a VPC and its internet gateways. With this feature, \nVPC Route Server dynamically updates VPC and internet gateway route tables with your preferred IPv4 or IPv6 routes to achieve routing fault tolerance for those workloads. This enables you to automatically reroute traffic within a VPC, which increases the manageability of VPC routing and interoperability with third-party workloads.
\nRoute server supports the follow route table types:
\nVPC route tables not associated with subnets
\nSubnet route tables
\nInternet gateway route tables
\nRoute server does not support route tables associated with virtual private gateways. To propagate routes into a transit gateway route table, use Transit Gateway Connect.
" + } + }, + "com.amazonaws.ec2#RouteServerAssociation": { + "type": "structure", + "members": { + "RouteServerId": { + "target": "com.amazonaws.ec2#RouteServerId", + "traits": { + "aws.protocols#ec2QueryName": "RouteServerId", + "smithy.api#documentation": "The ID of the associated route server.
", + "smithy.api#xmlName": "routeServerId" + } + }, + "VpcId": { + "target": "com.amazonaws.ec2#VpcId", + "traits": { + "aws.protocols#ec2QueryName": "VpcId", + "smithy.api#documentation": "The ID of the associated VPC.
", + "smithy.api#xmlName": "vpcId" + } + }, + "State": { + "target": "com.amazonaws.ec2#RouteServerAssociationState", + "traits": { + "aws.protocols#ec2QueryName": "State", + "smithy.api#documentation": "The current state of the association.
", + "smithy.api#xmlName": "state" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes the association between a route server and a VPC.
\nA route server association is the connection established between a route server and a VPC.
" + } + }, + "com.amazonaws.ec2#RouteServerAssociationState": { + "type": "enum", + "members": { + "ASSOCIATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "associating" + } + }, + "ASSOCIATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "associated" + } + }, + "DISASSOCIATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "disassociating" + } + } + } + }, + "com.amazonaws.ec2#RouteServerAssociationsList": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#RouteServerAssociation", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, + "com.amazonaws.ec2#RouteServerBfdState": { + "type": "enum", + "members": { + "UP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "up" + } + }, + "DOWN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "down" + } + } + } + }, + "com.amazonaws.ec2#RouteServerBfdStatus": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.ec2#RouteServerBfdState", + "traits": { + "aws.protocols#ec2QueryName": "Status", + "smithy.api#documentation": "The operational status of the BFD session.
", + "smithy.api#xmlName": "status" + } + } + }, + "traits": { + "smithy.api#documentation": "The current status of Bidirectional Forwarding Detection (BFD) for a BGP session.
" + } + }, + "com.amazonaws.ec2#RouteServerBgpOptions": { + "type": "structure", + "members": { + "PeerAsn": { + "target": "com.amazonaws.ec2#Long", + "traits": { + "aws.protocols#ec2QueryName": "PeerAsn", + "smithy.api#documentation": "The Border Gateway Protocol (BGP) Autonomous System Number (ASN) for the appliance. Valid values are from 1 to 4294967295. We recommend using a private ASN in the 64512–65534 (16-bit ASN) or 4200000000–4294967294 (32-bit ASN) range.
", + "smithy.api#xmlName": "peerAsn" + } + }, + "PeerLivenessDetection": { + "target": "com.amazonaws.ec2#RouteServerPeerLivenessMode", + "traits": { + "aws.protocols#ec2QueryName": "PeerLivenessDetection", + "smithy.api#documentation": "The liveness detection protocol used for the BGP peer.
\nThe requested liveness detection protocol for the BGP peer.
\n\n bgp-keepalive: The standard BGP keep alive mechanism (RFC4271) that is stable but may take longer to fail-over in cases of network impact or router failure.
\n bfd: An additional Bidirectional Forwarding Detection (BFD) protocol (RFC5880) that enables fast failover by using more sensitive liveness detection.
Defaults to bgp-keepalive.
The BGP configuration options for a route server peer.
" + } + }, + "com.amazonaws.ec2#RouteServerBgpOptionsRequest": { + "type": "structure", + "members": { + "PeerAsn": { + "target": "com.amazonaws.ec2#Long", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The Border Gateway Protocol (BGP) Autonomous System Number (ASN) for the appliance. Valid values are from 1 to 4294967295. We recommend using a private ASN in the 64512–65534 (16-bit ASN) or 4200000000–4294967294 (32-bit ASN) range.
", + "smithy.api#required": {} + } + }, + "PeerLivenessDetection": { + "target": "com.amazonaws.ec2#RouteServerPeerLivenessMode", + "traits": { + "smithy.api#documentation": "The requested liveness detection protocol for the BGP peer.
\n\n bgp-keepalive: The standard BGP keep alive mechanism (RFC4271) that is stable but may take longer to fail-over in cases of network impact or router failure.
\n bfd: An additional Bidirectional Forwarding Detection (BFD) protocol (RFC5880) that enables fast failover by using more sensitive liveness detection.
Defaults to bgp-keepalive.
The BGP configuration options requested for a route server peer.
" + } + }, + "com.amazonaws.ec2#RouteServerBgpState": { + "type": "enum", + "members": { + "UP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "up" + } + }, + "DOWN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "down" + } + } + } + }, + "com.amazonaws.ec2#RouteServerBgpStatus": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.ec2#RouteServerBgpState", + "traits": { + "aws.protocols#ec2QueryName": "Status", + "smithy.api#documentation": "The operational status of the BGP session. The status enables you to monitor session liveness if you lack monitoring on your router/appliance.
", + "smithy.api#xmlName": "status" + } + } + }, + "traits": { + "smithy.api#documentation": "The current status of a BGP session.
" + } + }, + "com.amazonaws.ec2#RouteServerEndpoint": { + "type": "structure", + "members": { + "RouteServerId": { + "target": "com.amazonaws.ec2#RouteServerId", + "traits": { + "aws.protocols#ec2QueryName": "RouteServerId", + "smithy.api#documentation": "The ID of the route server associated with this endpoint.
", + "smithy.api#xmlName": "routeServerId" + } + }, + "RouteServerEndpointId": { + "target": "com.amazonaws.ec2#RouteServerEndpointId", + "traits": { + "aws.protocols#ec2QueryName": "RouteServerEndpointId", + "smithy.api#documentation": "The unique identifier of the route server endpoint.
", + "smithy.api#xmlName": "routeServerEndpointId" + } + }, + "VpcId": { + "target": "com.amazonaws.ec2#VpcId", + "traits": { + "aws.protocols#ec2QueryName": "VpcId", + "smithy.api#documentation": "The ID of the VPC containing the endpoint.
", + "smithy.api#xmlName": "vpcId" + } + }, + "SubnetId": { + "target": "com.amazonaws.ec2#SubnetId", + "traits": { + "aws.protocols#ec2QueryName": "SubnetId", + "smithy.api#documentation": "The ID of the subnet to place the route server endpoint into.
", + "smithy.api#xmlName": "subnetId" + } + }, + "EniId": { + "target": "com.amazonaws.ec2#NetworkInterfaceId", + "traits": { + "aws.protocols#ec2QueryName": "EniId", + "smithy.api#documentation": "The ID of the Elastic network interface for the endpoint.
", + "smithy.api#xmlName": "eniId" + } + }, + "EniAddress": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "EniAddress", + "smithy.api#documentation": "The IP address of the Elastic network interface for the endpoint.
", + "smithy.api#xmlName": "eniAddress" + } + }, + "State": { + "target": "com.amazonaws.ec2#RouteServerEndpointState", + "traits": { + "aws.protocols#ec2QueryName": "State", + "smithy.api#documentation": "The current state of the route server endpoint.
", + "smithy.api#xmlName": "state" + } + }, + "FailureReason": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "FailureReason", + "smithy.api#documentation": "The reason for any failure in endpoint creation or operation.
", + "smithy.api#xmlName": "failureReason" + } + }, + "Tags": { + "target": "com.amazonaws.ec2#TagList", + "traits": { + "aws.protocols#ec2QueryName": "TagSet", + "smithy.api#documentation": "Any tags assigned to the route server endpoint.
", + "smithy.api#xmlName": "tagSet" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes a route server endpoint and its properties.
\nA route server endpoint is an Amazon Web Services-managed component inside a subnet that facilitates BGP (Border Gateway Protocol) connections between your route server and your BGP peers.
" + } + }, + "com.amazonaws.ec2#RouteServerEndpointId": { + "type": "string" + }, + "com.amazonaws.ec2#RouteServerEndpointIdsList": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#RouteServerEndpointId" + } + }, + "com.amazonaws.ec2#RouteServerEndpointState": { + "type": "enum", + "members": { + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "pending" + } + }, + "AVAILABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "available" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "deleting" + } + }, + "DELETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "deleted" + } + }, + "FAILING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "failing" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "failed" + } + }, + "DELETE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "delete-failed" + } + } + } + }, + "com.amazonaws.ec2#RouteServerEndpointsList": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#RouteServerEndpoint", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, + "com.amazonaws.ec2#RouteServerId": { + "type": "string" + }, + "com.amazonaws.ec2#RouteServerIdsList": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#RouteServerId" + } + }, + "com.amazonaws.ec2#RouteServerMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 5, + "max": 1000 + } + } + }, + "com.amazonaws.ec2#RouteServerPeer": { + "type": "structure", + "members": { + "RouteServerPeerId": { + "target": "com.amazonaws.ec2#RouteServerPeerId", + "traits": { + "aws.protocols#ec2QueryName": "RouteServerPeerId", + "smithy.api#documentation": "The unique identifier of the route server peer.
", + "smithy.api#xmlName": "routeServerPeerId" + } + }, + "RouteServerEndpointId": { + "target": "com.amazonaws.ec2#RouteServerEndpointId", + "traits": { + "aws.protocols#ec2QueryName": "RouteServerEndpointId", + "smithy.api#documentation": "The ID of the route server endpoint associated with this peer.
", + "smithy.api#xmlName": "routeServerEndpointId" + } + }, + "RouteServerId": { + "target": "com.amazonaws.ec2#RouteServerId", + "traits": { + "aws.protocols#ec2QueryName": "RouteServerId", + "smithy.api#documentation": "The ID of the route server associated with this peer.
", + "smithy.api#xmlName": "routeServerId" + } + }, + "VpcId": { + "target": "com.amazonaws.ec2#VpcId", + "traits": { + "aws.protocols#ec2QueryName": "VpcId", + "smithy.api#documentation": "The ID of the VPC containing the route server peer.
", + "smithy.api#xmlName": "vpcId" + } + }, + "SubnetId": { + "target": "com.amazonaws.ec2#SubnetId", + "traits": { + "aws.protocols#ec2QueryName": "SubnetId", + "smithy.api#documentation": "The ID of the subnet containing the route server peer.
", + "smithy.api#xmlName": "subnetId" + } + }, + "State": { + "target": "com.amazonaws.ec2#RouteServerPeerState", + "traits": { + "aws.protocols#ec2QueryName": "State", + "smithy.api#documentation": "The current state of the route server peer.
", + "smithy.api#xmlName": "state" + } + }, + "FailureReason": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "FailureReason", + "smithy.api#documentation": "The reason for any failure in peer creation or operation.
", + "smithy.api#xmlName": "failureReason" + } + }, + "EndpointEniId": { + "target": "com.amazonaws.ec2#NetworkInterfaceId", + "traits": { + "aws.protocols#ec2QueryName": "EndpointEniId", + "smithy.api#documentation": "The ID of the Elastic network interface for the route server endpoint.
", + "smithy.api#xmlName": "endpointEniId" + } + }, + "EndpointEniAddress": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "EndpointEniAddress", + "smithy.api#documentation": "The IP address of the Elastic network interface for the route server endpoint.
", + "smithy.api#xmlName": "endpointEniAddress" + } + }, + "PeerAddress": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "PeerAddress", + "smithy.api#documentation": "The IPv4 address of the peer device.
", + "smithy.api#xmlName": "peerAddress" + } + }, + "BgpOptions": { + "target": "com.amazonaws.ec2#RouteServerBgpOptions", + "traits": { + "aws.protocols#ec2QueryName": "BgpOptions", + "smithy.api#documentation": "The BGP configuration options for this peer, including ASN (Autonomous System Number) and BFD (Bidrectional Forwarding Detection) settings.
", + "smithy.api#xmlName": "bgpOptions" + } + }, + "BgpStatus": { + "target": "com.amazonaws.ec2#RouteServerBgpStatus", + "traits": { + "aws.protocols#ec2QueryName": "BgpStatus", + "smithy.api#documentation": "The current status of the BGP session with this peer.
", + "smithy.api#xmlName": "bgpStatus" + } + }, + "BfdStatus": { + "target": "com.amazonaws.ec2#RouteServerBfdStatus", + "traits": { + "aws.protocols#ec2QueryName": "BfdStatus", + "smithy.api#documentation": "The current status of the BFD session with this peer.
", + "smithy.api#xmlName": "bfdStatus" + } + }, + "Tags": { + "target": "com.amazonaws.ec2#TagList", + "traits": { + "aws.protocols#ec2QueryName": "TagSet", + "smithy.api#documentation": "Any tags assigned to the route server peer.
", + "smithy.api#xmlName": "tagSet" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes a BGP peer configuration for a route server endpoint.
\nA route server peer is a session between a route server endpoint and the device deployed in Amazon Web Services (such as a firewall appliance or other network security function running on an EC2 instance). The device must meet these requirements:
\nHave an elastic network interface in the VPC
\nSupport BGP (Border Gateway Protocol)
\nCan initiate BGP sessions
\nThe ID of the route server configured for route propagation.
", + "smithy.api#xmlName": "routeServerId" + } + }, + "RouteTableId": { + "target": "com.amazonaws.ec2#RouteTableId", + "traits": { + "aws.protocols#ec2QueryName": "RouteTableId", + "smithy.api#documentation": "The ID of the route table configured for route server propagation.
", + "smithy.api#xmlName": "routeTableId" + } + }, + "State": { + "target": "com.amazonaws.ec2#RouteServerPropagationState", + "traits": { + "aws.protocols#ec2QueryName": "State", + "smithy.api#documentation": "The current state of route propagation.
", + "smithy.api#xmlName": "state" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes the route propagation configuration between a route server and a route table.
\nWhen enabled, route server propagation installs the routes in the FIB on the route table you've specified. Route server supports IPv4 and IPv6 route propagation.
" + } + }, + "com.amazonaws.ec2#RouteServerPropagationState": { + "type": "enum", + "members": { + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "pending" + } + }, + "AVAILABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "available" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "deleting" + } + } + } + }, + "com.amazonaws.ec2#RouteServerPropagationsList": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#RouteServerPropagation", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, + "com.amazonaws.ec2#RouteServerRoute": { + "type": "structure", + "members": { + "RouteServerEndpointId": { + "target": "com.amazonaws.ec2#RouteServerEndpointId", + "traits": { + "aws.protocols#ec2QueryName": "RouteServerEndpointId", + "smithy.api#documentation": "The ID of the route server endpoint that received this route.
", + "smithy.api#xmlName": "routeServerEndpointId" + } + }, + "RouteServerPeerId": { + "target": "com.amazonaws.ec2#RouteServerPeerId", + "traits": { + "aws.protocols#ec2QueryName": "RouteServerPeerId", + "smithy.api#documentation": "The ID of the route server peer that advertised this route.
", + "smithy.api#xmlName": "routeServerPeerId" + } + }, + "RouteInstallationDetails": { + "target": "com.amazonaws.ec2#RouteServerRouteInstallationDetails", + "traits": { + "aws.protocols#ec2QueryName": "RouteInstallationDetailSet", + "smithy.api#documentation": "Details about the installation status of this route in route tables.
", + "smithy.api#xmlName": "routeInstallationDetailSet" + } + }, + "RouteStatus": { + "target": "com.amazonaws.ec2#RouteServerRouteStatus", + "traits": { + "aws.protocols#ec2QueryName": "RouteStatus", + "smithy.api#documentation": "The current status of the route in the routing database. Values are in-rib or in-fib depending on if the routes are in the RIB or the FIB database.
The Routing Information Base (RIB) serves as a database that stores all the routing information and network topology data collected by a router or routing system, such as routes learned from BGP peers. The RIB is constantly updated as new routing information is received or existing routes change. This ensures that the route server always has the most current view of the network topology and can make optimal routing decisions.
\nThe Forwarding Information Base (FIB) serves as a forwarding table for what route server has determined are the best-path routes in the RIB after evaluating all available routing information and policies. The FIB routes are installed on the route tables. The FIB is recomputed whenever there are changes to the RIB.
", + "smithy.api#xmlName": "routeStatus" + } + }, + "Prefix": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Prefix", + "smithy.api#documentation": "The destination CIDR block of the route.
", + "smithy.api#xmlName": "prefix" + } + }, + "AsPaths": { + "target": "com.amazonaws.ec2#AsPath", + "traits": { + "aws.protocols#ec2QueryName": "AsPathSet", + "smithy.api#documentation": "The AS path attributes of the BGP route.
", + "smithy.api#xmlName": "asPathSet" + } + }, + "Med": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "Med", + "smithy.api#documentation": "The Multi-Exit Discriminator (MED) value of the BGP route.
", + "smithy.api#xmlName": "med" + } + }, + "NextHopIp": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "NextHopIp", + "smithy.api#documentation": "The IP address for the next hop.
", + "smithy.api#xmlName": "nextHopIp" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes a route in the route server's routing database.
" + } + }, + "com.amazonaws.ec2#RouteServerRouteInstallationDetail": { + "type": "structure", + "members": { + "RouteTableId": { + "target": "com.amazonaws.ec2#RouteTableId", + "traits": { + "aws.protocols#ec2QueryName": "RouteTableId", + "smithy.api#documentation": "The ID of the route table where the route is being installed.
", + "smithy.api#xmlName": "routeTableId" + } + }, + "RouteInstallationStatus": { + "target": "com.amazonaws.ec2#RouteServerRouteInstallationStatus", + "traits": { + "aws.protocols#ec2QueryName": "RouteInstallationStatus", + "smithy.api#documentation": "The current installation status of the route in the route table.
", + "smithy.api#xmlName": "routeInstallationStatus" + } + }, + "RouteInstallationStatusReason": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "RouteInstallationStatusReason", + "smithy.api#documentation": "The reason for the current installation status of the route.
", + "smithy.api#xmlName": "routeInstallationStatusReason" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes the installation status of a route in a route table.
" + } + }, + "com.amazonaws.ec2#RouteServerRouteInstallationDetails": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#RouteServerRouteInstallationDetail", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, + "com.amazonaws.ec2#RouteServerRouteInstallationStatus": { + "type": "enum", + "members": { + "INSTALLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "installed" + } + }, + "REJECTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "rejected" + } + } + } + }, + "com.amazonaws.ec2#RouteServerRouteList": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#RouteServerRoute", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, + "com.amazonaws.ec2#RouteServerRouteStatus": { + "type": "enum", + "members": { + "IN_RIB": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "in-rib" + } + }, + "IN_FIB": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "in-fib" + } + } + } + }, + "com.amazonaws.ec2#RouteServerState": { + "type": "enum", + "members": { + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "pending" + } + }, + "AVAILABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "available" + } + }, + "MODIFYING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "modifying" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "deleting" + } + }, + "DELETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "deleted" + } + } + } + }, + "com.amazonaws.ec2#RouteServersList": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#RouteServer", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, "com.amazonaws.ec2#RouteState": { "type": "enum", "members": { @@ -104014,7 +106145,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "StoreTaskState", - "smithy.api#documentation": "The state of the store task (InProgress, Completed, or\n Failed).
The state of the store task (InProgress, Completed, or\n Failed).
If the tasks fails, the reason for the failure is returned. If the task succeeds,\n null is returned.
If the tasks fails, the reason for the failure is returned. If the task succeeds,\n null is returned.
The registry URL of the upstream public registry to use as the source for the pull\n through cache rule. The following is the syntax to use for each supported upstream\n registry.
\nAmazon ECR (ecr) –\n dkr.ecr.\n
Amazon ECR Public (ecr-public) – public.ecr.aws\n
Docker Hub (docker-hub) –\n registry-1.docker.io\n
GitHub Container Registry (github-container-registry) –\n ghcr.io\n
GitLab Container Registry (gitlab-container-registry) –\n registry.gitlab.com\n
Kubernetes (k8s) – registry.k8s.io\n
Microsoft Azure Container Registry (azure-container-registry) –\n \n
Quay (quay) – quay.io\n
The registry URL of the upstream public registry to use as the source for the pull\n through cache rule. The following is the syntax to use for each supported upstream\n registry.
\nAmazon ECR (ecr) –\n \n
Amazon ECR Public (ecr-public) – public.ecr.aws\n
Docker Hub (docker-hub) –\n registry-1.docker.io\n
GitHub Container Registry (github-container-registry) –\n ghcr.io\n
GitLab Container Registry (gitlab-container-registry) –\n registry.gitlab.com\n
Kubernetes (k8s) – registry.k8s.io\n
Microsoft Azure Container Registry (azure-container-registry) –\n \n
Quay (quay) – quay.io\n
Returns metadata about the images in a repository.
\nBeginning with Docker version 1.9, the Docker client compresses image layers\n before pushing them to a V2 Docker registry. The output of the docker\n images command shows the uncompressed image size, so it may return a\n larger image size than the image sizes returned by DescribeImages.
Returns metadata about the images in a repository.
\nStarting with Docker version 1.9, the Docker client compresses image layers before\n pushing them to a V2 Docker registry. The output of the docker images\n command shows the uncompressed image size. Therefore, Docker might return a larger\n image than the image shown in the Amazon Web Services Management Console.
A list of authorization token data objects that correspond to the\n registryIds values in the request.
A list of authorization token data objects that correspond to the\n registryIds values in the request.
The size of the authorization token returned by Amazon ECR is not fixed. We recommend\n that you don't make assumptions about the maximum size.
\nThe size, in bytes, of the image in the repository.
\nIf the image is a manifest list, this will be the max size of all manifests in the\n list.
\nStarting with Docker version 1.9, the Docker client compresses image layers before\n pushing them to a V2 Docker registry. The output of the docker images\n command shows the uncompressed image size. Therefore, Docker might return a larger\n image than the image sizes returned by DescribeImages.
The size, in bytes, of the image in the repository.
\nIf the image is a manifest list, this will be the max size of all manifests in the\n list.
\nStarting with Docker version 1.9, the Docker client compresses image layers before\n pushing them to a V2 Docker registry. The output of the docker images\n command shows the uncompressed image size. Therefore, Docker might return a larger\n image than the image shown in the Amazon Web Services Management Console.
The Amazon ECS account setting name to modify.
\nThe following are the valid values for the account setting name.
\n\n serviceLongArnFormat - When modified, the Amazon Resource Name\n\t\t\t\t\t(ARN) and resource ID format of the resource type for a specified user, role, or\n\t\t\t\t\tthe root user for an account is affected. The opt-in and opt-out account setting\n\t\t\t\t\tmust be set for each Amazon ECS resource separately. The ARN and resource ID format\n\t\t\t\t\tof a resource is defined by the opt-in status of the user or role that created\n\t\t\t\t\tthe resource. You must turn on this setting to use Amazon ECS features such as\n\t\t\t\t\tresource tagging.
\n taskLongArnFormat - When modified, the Amazon Resource Name (ARN)\n\t\t\t\t\tand resource ID format of the resource type for a specified user, role, or the\n\t\t\t\t\troot user for an account is affected. The opt-in and opt-out account setting must\n\t\t\t\t\tbe set for each Amazon ECS resource separately. The ARN and resource ID format of a\n\t\t\t\t\tresource is defined by the opt-in status of the user or role that created the\n\t\t\t\t\tresource. You must turn on this setting to use Amazon ECS features such as resource\n\t\t\t\t\ttagging.
\n fargateFIPSMode - When turned on, you can run Fargate workloads\n\t\t\t\t\tin a manner that is compliant with Federal Information Processing Standard\n\t\t\t\t\t(FIPS-140). For more information, see Fargate\n\t\t\t\t\t\tFederal Information Processing Standard (FIPS-140).
\n containerInstanceLongArnFormat - When modified, the Amazon\n\t\t\t\t\tResource Name (ARN) and resource ID format of the resource type for a specified\n\t\t\t\t\tuser, role, or the root user for an account is affected. The opt-in and opt-out\n\t\t\t\t\taccount setting must be set for each Amazon ECS resource separately. The ARN and\n\t\t\t\t\tresource ID format of a resource is defined by the opt-in status of the user or\n\t\t\t\t\trole that created the resource. You must turn on this setting to use Amazon ECS\n\t\t\t\t\tfeatures such as resource tagging.
\n awsvpcTrunking - When modified, the elastic network interface\n\t\t\t\t\t(ENI) limit for any new container instances that support the feature is changed.\n\t\t\t\t\tIf awsvpcTrunking is turned on, any new container instances that\n\t\t\t\t\tsupport the feature are launched have the increased ENI limits available to\n\t\t\t\t\tthem. For more information, see Elastic\n\t\t\t\t\t\tNetwork Interface Trunking in the Amazon Elastic Container Service Developer Guide.
\n containerInsights - Container Insights with enhanced\n\t\t\t\t\tobservability provides all the Container Insights metrics, plus additional task\n\t\t\t\t\tand container metrics. This version supports enhanced observability for Amazon ECS\n\t\t\t\t\tclusters using the Amazon EC2 and Fargate launch types. After you configure\n\t\t\t\t\tContainer Insights with enhanced observability on Amazon ECS, Container Insights\n\t\t\t\t\tauto-collects detailed infrastructure telemetry from the cluster level down to\n\t\t\t\t\tthe container level in your environment and displays these critical performance\n\t\t\t\t\tdata in curated dashboards removing the heavy lifting in observability set-up.
To use Container Insights with enhanced observability, set the\n\t\t\t\t\t\tcontainerInsights account setting to\n\t\t\t\t\tenhanced.
To use Container Insights, set the containerInsights account\n\t\t\t\t\tsetting to enabled.
For more information, see Monitor Amazon ECS containers using Container Insights with enhanced\n\t\t\t\t\t\tobservability in the Amazon Elastic Container Service Developer Guide.
\n\n dualStackIPv6 - When turned on, when using a VPC in dual stack\n\t\t\t\t\tmode, your tasks using the awsvpc network mode can have an IPv6\n\t\t\t\t\taddress assigned. For more information on using IPv6 with tasks launched on\n\t\t\t\t\tAmazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6\n\t\t\t\t\twith tasks launched on Fargate, see Using a VPC in dual-stack mode.
\n fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a\n\t\t\t\t\tsecurity or infrastructure update is needed for an Amazon ECS task hosted on\n\t\t\t\t\tFargate, the tasks need to be stopped and new tasks launched to replace them.\n\t\t\t\t\tUse fargateTaskRetirementWaitPeriod to configure the wait time to\n\t\t\t\t\tretire a Fargate task. For information about the Fargate tasks maintenance,\n\t\t\t\t\tsee Amazon Web Services Fargate\n\t\t\t\t\t\ttask maintenance in the Amazon ECS Developer\n\t\t\t\t\tGuide.
\n tagResourceAuthorization - Amazon ECS is introducing tagging\n\t\t\t\t\tauthorization for resource creation. Users must have permissions for actions\n\t\t\t\t\tthat create the resource, such as ecsCreateCluster. If tags are\n\t\t\t\t\tspecified when you create a resource, Amazon Web Services performs additional authorization to\n\t\t\t\t\tverify if users or roles have permissions to create tags. Therefore, you must\n\t\t\t\t\tgrant explicit permissions to use the ecs:TagResource action. For\n\t\t\t\t\tmore information, see Grant permission to tag resources on creation in the\n\t\t\t\t\t\tAmazon ECS Developer Guide.
\n guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether\n\t\t\tAmazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your\n\t\t\tAmazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.
The Amazon ECS account setting name to modify.
\nThe following are the valid values for the account setting name.
\n\n serviceLongArnFormat - When modified, the Amazon Resource Name\n\t\t\t\t\t(ARN) and resource ID format of the resource type for a specified user, role, or\n\t\t\t\t\tthe root user for an account is affected. The opt-in and opt-out account setting\n\t\t\t\t\tmust be set for each Amazon ECS resource separately. The ARN and resource ID format\n\t\t\t\t\tof a resource is defined by the opt-in status of the user or role that created\n\t\t\t\t\tthe resource. You must turn on this setting to use Amazon ECS features such as\n\t\t\t\t\tresource tagging.
\n taskLongArnFormat - When modified, the Amazon Resource Name (ARN)\n\t\t\t\t\tand resource ID format of the resource type for a specified user, role, or the\n\t\t\t\t\troot user for an account is affected. The opt-in and opt-out account setting must\n\t\t\t\t\tbe set for each Amazon ECS resource separately. The ARN and resource ID format of a\n\t\t\t\t\tresource is defined by the opt-in status of the user or role that created the\n\t\t\t\t\tresource. You must turn on this setting to use Amazon ECS features such as resource\n\t\t\t\t\ttagging.
\n containerInstanceLongArnFormat - When modified, the Amazon\n\t\t\t\t\tResource Name (ARN) and resource ID format of the resource type for a specified\n\t\t\t\t\tuser, role, or the root user for an account is affected. The opt-in and opt-out\n\t\t\t\t\taccount setting must be set for each Amazon ECS resource separately. The ARN and\n\t\t\t\t\tresource ID format of a resource is defined by the opt-in status of the user or\n\t\t\t\t\trole that created the resource. You must turn on this setting to use Amazon ECS\n\t\t\t\t\tfeatures such as resource tagging.
\n awsvpcTrunking - When modified, the elastic network interface\n\t\t\t\t\t(ENI) limit for any new container instances that support the feature is changed.\n\t\t\t\t\tIf awsvpcTrunking is turned on, any new container instances that\n\t\t\t\t\tsupport the feature are launched have the increased ENI limits available to\n\t\t\t\t\tthem. For more information, see Elastic\n\t\t\t\t\t\tNetwork Interface Trunking in the Amazon Elastic Container Service Developer Guide.
\n containerInsights - Container Insights with enhanced\n\t\t\t\t\tobservability provides all the Container Insights metrics, plus additional task\n\t\t\t\t\tand container metrics. This version supports enhanced observability for Amazon ECS\n\t\t\t\t\tclusters using the Amazon EC2 and Fargate launch types. After you configure\n\t\t\t\t\tContainer Insights with enhanced observability on Amazon ECS, Container Insights\n\t\t\t\t\tauto-collects detailed infrastructure telemetry from the cluster level down to\n\t\t\t\t\tthe container level in your environment and displays these critical performance\n\t\t\t\t\tdata in curated dashboards removing the heavy lifting in observability set-up.
To use Container Insights with enhanced observability, set the\n\t\t\t\t\t\tcontainerInsights account setting to\n\t\t\t\t\tenhanced.
To use Container Insights, set the containerInsights account\n\t\t\t\t\tsetting to enabled.
For more information, see Monitor Amazon ECS containers using Container Insights with enhanced\n\t\t\t\t\t\tobservability in the Amazon Elastic Container Service Developer Guide.
\n\n dualStackIPv6 - When turned on, when using a VPC in dual stack\n\t\t\t\t\tmode, your tasks using the awsvpc network mode can have an IPv6\n\t\t\t\t\taddress assigned. For more information on using IPv6 with tasks launched on\n\t\t\t\t\tAmazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6\n\t\t\t\t\twith tasks launched on Fargate, see Using a VPC in dual-stack mode.
\n fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a\n\t\t\t\t\tsecurity or infrastructure update is needed for an Amazon ECS task hosted on\n\t\t\t\t\tFargate, the tasks need to be stopped and new tasks launched to replace them.\n\t\t\t\t\tUse fargateTaskRetirementWaitPeriod to configure the wait time to\n\t\t\t\t\tretire a Fargate task. For information about the Fargate tasks maintenance,\n\t\t\t\t\tsee Amazon Web Services Fargate\n\t\t\t\t\t\ttask maintenance in the Amazon ECS Developer\n\t\t\t\t\tGuide.
\n tagResourceAuthorization - Amazon ECS is introducing tagging\n\t\t\t\t\tauthorization for resource creation. Users must have permissions for actions\n\t\t\t\t\tthat create the resource, such as ecsCreateCluster. If tags are\n\t\t\t\t\tspecified when you create a resource, Amazon Web Services performs additional authorization to\n\t\t\t\t\tverify if users or roles have permissions to create tags. Therefore, you must\n\t\t\t\t\tgrant explicit permissions to use the ecs:TagResource action. For\n\t\t\t\t\tmore information, see Grant permission to tag resources on creation in the\n\t\t\t\t\t\tAmazon ECS Developer Guide.
\n guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether\n\t\t\tAmazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your\n\t\t\tAmazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.
The ARN of the service revision deployed as part of the rollback.
\nWhen the type is GPU, the value is the number of physical\n\t\t\t\tGPUs the Amazon ECS container agent reserves for the container. The number\n\t\t\tof GPUs that's reserved for all containers in a task can't exceed the number of\n\t\t\tavailable GPUs on the container instance that the task is launched on.
When the type is InferenceAccelerator, the value matches the\n\t\t\t\tdeviceName for an InferenceAccelerator specified in a task definition.
The ARN of the service revision deployed as part of the rollback.
" } } }, diff --git a/codegen/sdk/aws-models/eks.json b/codegen/sdk/aws-models/eks.json index 7ae6d721493..e36a11e2139 100644 --- a/codegen/sdk/aws-models/eks.json +++ b/codegen/sdk/aws-models/eks.json @@ -68,6 +68,18 @@ "smithy.api#enumValue": "BOTTLEROCKET_x86_64" } }, + "BOTTLEROCKET_ARM_64_FIPS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BOTTLEROCKET_ARM_64_FIPS" + } + }, + "BOTTLEROCKET_x86_64_FIPS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BOTTLEROCKET_x86_64_FIPS" + } + }, "BOTTLEROCKET_ARM_64_NVIDIA": { "target": "smithy.api#Unit", "traits": { @@ -2086,6 +2098,9 @@ }, { "target": "com.amazonaws.eks#ServerException" + }, + { + "target": "com.amazonaws.eks#ThrottlingException" } ], "traits": { @@ -2164,6 +2179,9 @@ }, { "target": "com.amazonaws.eks#ServerException" + }, + { + "target": "com.amazonaws.eks#ThrottlingException" } ], "traits": { @@ -2630,7 +2648,7 @@ "remoteNetworkConfig": { "target": "com.amazonaws.eks#RemoteNetworkConfigResponse", "traits": { - "smithy.api#documentation": "The configuration in the cluster for EKS Hybrid Nodes. You can't change or update this\n configuration after the cluster is created.
" + "smithy.api#documentation": "The configuration in the cluster for EKS Hybrid Nodes. You can add, change, or remove this\n configuration after the cluster is created.
" } }, "computeConfig": { @@ -3588,7 +3606,7 @@ "remoteNetworkConfig": { "target": "com.amazonaws.eks#RemoteNetworkConfigRequest", "traits": { - "smithy.api#documentation": "The configuration in the cluster for EKS Hybrid Nodes. You can't change or update this\n configuration after the cluster is created.
" + "smithy.api#documentation": "The configuration in the cluster for EKS Hybrid Nodes. You can add, change, or remove this\n configuration after the cluster is created.
" } }, "computeConfig": { @@ -6090,6 +6108,9 @@ }, { "target": "com.amazonaws.eks#ServerException" + }, + { + "target": "com.amazonaws.eks#ThrottlingException" } ], "traits": { @@ -6216,7 +6237,7 @@ "licenses": { "target": "com.amazonaws.eks#LicenseList", "traits": { - "smithy.api#documentation": "Includes all of the claims in the license token necessary to validate the license for\n\t extended support.
" + "smithy.api#documentation": "Includes all of the claims in the license token necessary to validate the license for\n extended support.
" } }, "tags": { @@ -7172,6 +7193,25 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.eks#InvalidStateException": { + "type": "structure", + "members": { + "clusterName": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "The Amazon EKS cluster associated with the exception.
" + } + }, + "message": { + "target": "com.amazonaws.eks#String" + } + }, + "traits": { + "smithy.api#documentation": "Amazon EKS detected upgrade readiness issues. Call the \n ListInsights\n API to view detected upgrade blocking issues.\n Pass the \n force\n flag when updating to override upgrade readiness\n errors.
The configuration in the cluster for EKS Hybrid Nodes. You can't change or update this\n configuration after the cluster is created.
" + "smithy.api#documentation": "The configuration in the cluster for EKS Hybrid Nodes. You can add, change, or remove this\n configuration after the cluster is created.
" } }, "com.amazonaws.eks#RemoteNetworkConfigResponse": { @@ -9759,7 +9799,7 @@ } }, "traits": { - "smithy.api#documentation": "The configuration in the cluster for EKS Hybrid Nodes. You can't change or update this\n configuration after the cluster is created.
" + "smithy.api#documentation": "The configuration in the cluster for EKS Hybrid Nodes. You can add, change, or remove this\n configuration after the cluster is created.
" } }, "com.amazonaws.eks#RemoteNodeNetwork": { @@ -10238,6 +10278,25 @@ } } }, + "com.amazonaws.eks#ThrottlingException": { + "type": "structure", + "members": { + "clusterName": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "The Amazon EKS cluster associated with the exception.
" + } + }, + "message": { + "target": "com.amazonaws.eks#String" + } + }, + "traits": { + "smithy.api#documentation": "The request or operation couldn't be performed because a service is throttling\n requests.
", + "smithy.api#error": "client", + "smithy.api#httpError": 429 + } + }, "com.amazonaws.eks#Timestamp": { "type": "timestamp" }, @@ -10612,10 +10671,13 @@ }, { "target": "com.amazonaws.eks#ServerException" + }, + { + "target": "com.amazonaws.eks#ThrottlingException" } ], "traits": { - "smithy.api#documentation": "Updates an Amazon EKS cluster configuration. Your cluster continues to function during the\n update. The response output includes an update ID that you can use to track the status\n of your cluster update with DescribeUpdate\"/>.
You can use this API operation to enable or disable exporting the Kubernetes control plane\n logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported\n to CloudWatch Logs. For more information, see Amazon EKS\n Cluster control plane logs in the\n \n Amazon EKS User Guide\n .
\nCloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported\n control plane logs. For more information, see CloudWatch Pricing.
\nYou can also use this API operation to enable or disable public and private access to\n your cluster's Kubernetes API server endpoint. By default, public access is enabled, and\n private access is disabled. For more information, see Amazon EKS cluster\n endpoint access control in the \n Amazon EKS User Guide\n .
\nYou can also use this API operation to choose different subnets and security groups\n for the cluster. You must specify at least two subnets that are in different Availability Zones. You\n can't change which VPC the subnets are from, the subnets must be in the same VPC as the\n subnets that the cluster was created with. For more information about the VPC\n requirements, see https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html in the \n Amazon EKS User Guide\n .
\nYou can also use this API operation to enable or disable ARC zonal shift. If zonal\n shift is enabled, Amazon Web Services configures zonal autoshift for the cluster.
\nCluster updates are asynchronous, and they should finish within a few minutes. During\n an update, the cluster status moves to UPDATING (this status transition is\n eventually consistent). When the update is complete (either Failed or\n Successful), the cluster status moves to Active.
Updates an Amazon EKS cluster configuration. Your cluster continues to function during the\n update. The response output includes an update ID that you can use to track the status\n of your cluster update with DescribeUpdate.
You can use this operation to do the following actions:
\nYou can use this API operation to enable or disable exporting the Kubernetes\n control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane\n logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the\n \n Amazon EKS User Guide\n .
\nCloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch Pricing.
\nYou can also use this API operation to enable or disable public and private\n access to your cluster's Kubernetes API server endpoint. By default, public access is\n enabled, and private access is disabled. For more information, see Amazon EKS\n cluster endpoint access control in the\n \n Amazon EKS User Guide\n .
\nYou can also use this API operation to choose different subnets and security\n groups for the cluster. You must specify at least two subnets that are in\n different Availability Zones. You can't change which VPC the subnets are from, the subnets\n must be in the same VPC as the subnets that the cluster was created with. For\n more information about the VPC requirements, see https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html in the\n \n Amazon EKS User Guide\n .
\nYou can also use this API operation to enable or disable ARC zonal shift. If\n zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster.
\nYou can also use this API operation to add, change, or remove the\n configuration in the cluster for EKS Hybrid Nodes. To remove the configuration,\n use the remoteNetworkConfig key with an object containing both\n subkeys with empty arrays for each. Here is an inline example:\n \"remoteNetworkConfig\": { \"remoteNodeNetworks\": [],\n \"remotePodNetworks\": [] }.
Cluster updates are asynchronous, and they should finish within a few minutes. During\n an update, the cluster status moves to UPDATING (this status transition is\n eventually consistent). When the update is complete (either Failed or\n Successful), the cluster status moves to Active.
Update the configuration of the block storage capability of your EKS Auto Mode\n cluster. For example, enable the capability.
" } + }, + "remoteNetworkConfig": { + "target": "com.amazonaws.eks#RemoteNetworkConfigRequest" } }, "traits": { @@ -10717,6 +10782,9 @@ { "target": "com.amazonaws.eks#InvalidRequestException" }, + { + "target": "com.amazonaws.eks#InvalidStateException" + }, { "target": "com.amazonaws.eks#ResourceInUseException" }, @@ -10725,6 +10793,9 @@ }, { "target": "com.amazonaws.eks#ServerException" + }, + { + "target": "com.amazonaws.eks#ThrottlingException" } ], "traits": { @@ -10760,6 +10831,13 @@ "smithy.api#documentation": "A unique, case-sensitive identifier that you provide to ensure\nthe idempotency of the request.
", "smithy.api#idempotencyToken": {} } + }, + "force": { + "target": "com.amazonaws.eks#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "Set this value to true to override upgrade-blocking readiness checks when\n updating a cluster.
Modifies the engine listed in a cluster message. The options are redis, memcached or valkey.
" + "smithy.api#documentation": "The engine type used by the cache cluster. The options are valkey, memcached or redis.
" } }, "EngineVersion": { @@ -11403,6 +11403,12 @@ "traits": { "smithy.api#documentation": "The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2\n to 7.1 or Memcached engine version 1.6.6 and above on all instances built on the Nitro system.
Configures horizontal or vertical scaling for Memcached clusters, specifying the scaling percentage and interval.
" + } } }, "traits": { @@ -13226,6 +13232,12 @@ "traits": { "smithy.api#documentation": "A setting that allows you to migrate your clients to use in-transit encryption, with\n no downtime.
" } + }, + "ScaleConfig": { + "target": "com.amazonaws.elasticache#ScaleConfig", + "traits": { + "smithy.api#documentation": "The scaling configuration changes that are pending for the Memcached cluster.
" + } } }, "traits": { @@ -14591,6 +14603,26 @@ "smithy.api#output": {} } }, + "com.amazonaws.elasticache#ScaleConfig": { + "type": "structure", + "members": { + "ScalePercentage": { + "target": "com.amazonaws.elasticache#IntegerOptional", + "traits": { + "smithy.api#documentation": "The percentage by which to scale the Memcached cluster, either horizontally by adding nodes or vertically by increasing resources.
" + } + }, + "ScaleIntervalMinutes": { + "target": "com.amazonaws.elasticache#IntegerOptional", + "traits": { + "smithy.api#documentation": "The time interval in seconds between scaling operations when performing gradual scaling for a Memcached cluster.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Configuration settings for horizontal or vertical scaling operations on Memcached clusters.
" + } + }, "com.amazonaws.elasticache#SecurityGroupIdsList": { "type": "list", "member": { diff --git a/codegen/sdk/aws-models/entityresolution.json b/codegen/sdk/aws-models/entityresolution.json index ef74bc38aef..1cd4be07fd6 100644 --- a/codegen/sdk/aws-models/entityresolution.json +++ b/codegen/sdk/aws-models/entityresolution.json @@ -964,7 +964,7 @@ "smithy.api#length": { "max": 255 }, - "smithy.api#pattern": "^[a-zA-Z_0-9- \\t]*$" + "smithy.api#pattern": "^[a-zA-Z_0-9- ]*$" } }, "com.amazonaws.entityresolution#AwsAccountId": { @@ -2020,7 +2020,7 @@ "type": "structure", "members": { "uniqueId": { - "target": "com.amazonaws.entityresolution#UniqueId", + "target": "com.amazonaws.entityresolution#HeaderSafeUniqueId", "traits": { "smithy.api#documentation": "The unique ID that could not be deleted.
", "smithy.api#required": {} @@ -2082,7 +2082,7 @@ "type": "structure", "members": { "uniqueId": { - "target": "com.amazonaws.entityresolution#UniqueId", + "target": "com.amazonaws.entityresolution#HeaderSafeUniqueId", "traits": { "smithy.api#documentation": "The unique ID of the deleted item.
", "smithy.api#required": {} @@ -2110,7 +2110,7 @@ "com.amazonaws.entityresolution#DisconnectedUniqueIdsList": { "type": "list", "member": { - "target": "com.amazonaws.entityresolution#UniqueId" + "target": "com.amazonaws.entityresolution#HeaderSafeUniqueId" } }, "com.amazonaws.entityresolution#EntityName": { @@ -2569,7 +2569,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns the corresponding Match ID of a customer record if the record has been\n processed.
", + "smithy.api#documentation": "Returns the corresponding Match ID of a customer record if the record has been\n processed in a\n rule-based matching workflow or ML matching workflow.
\nYou can call this API as a dry run of an incremental load on the rule-based\n matching\n workflow.
", "smithy.api#http": { "code": 200, "method": "POST", @@ -3218,6 +3218,16 @@ "smithy.api#output": {} } }, + "com.amazonaws.entityresolution#HeaderSafeUniqueId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 780 + }, + "smithy.api#pattern": "^[a-zA-Z_0-9-+=/,]*$" + } + }, "com.amazonaws.entityresolution#IdMappingJobMetrics": { "type": "structure", "members": { @@ -3256,10 +3266,16 @@ "traits": { "smithy.api#documentation": "The total number of distinct mapped target records.
" } + }, + "uniqueRecordsLoaded": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "The\n number of records remaining after loading and aggregating duplicate records. Duplicates are\n determined by the field marked as UNIQUE_ID in your schema mapping - records sharing the\n same value in this field are considered duplicates. For example, if you specified\n \"customer_id\" as a UNIQUE_ID field and had three records with the same customer_id value,\n they would count as one unique record in this metric.
" + } } }, "traits": { - "smithy.api#documentation": "An object containing InputRecords, RecordsNotProcessed,\n TotalRecordsProcessed, TotalMappedRecords,\n TotalMappedSourceRecords, and TotalMappedTargetRecords.
An\n object that contains metrics about an ID mapping job, including counts of input records,\n processed records, and mapped records between source and target identifiers.\n
" } }, "com.amazonaws.entityresolution#IdMappingJobOutputSource": { @@ -5365,7 +5381,7 @@ "smithy.api#length": { "max": 255 }, - "smithy.api#pattern": "^[a-zA-Z_0-9- \\t]*$", + "smithy.api#pattern": "^[a-zA-Z_0-9- ]*$", "smithy.api#required": {} } }, @@ -5374,7 +5390,6 @@ "traits": { "smithy.api#documentation": "A list of MatchingKeys. The MatchingKeys must have been\n defined in the SchemaMapping. Two records are considered to match according to\n this rule if all of the MatchingKeys match.
The type of the attribute, selected from a list of values.
", + "smithy.api#documentation": "The type of the attribute, selected from a list of values.
\nNormalization is only supported for NAME, ADDRESS,\n PHONE, and EMAIL_ADDRESS.
If you want to normalize NAME_FIRST, NAME_MIDDLE, and\n NAME_LAST, you must group them by assigning them to the\n NAME\n groupName.
If you want to normalize ADDRESS_STREET1, ADDRESS_STREET2,\n ADDRESS_STREET3, ADDRESS_CITY, ADDRESS_STATE,\n ADDRESS_COUNTRY, and ADDRESS_POSTALCODE, you must group\n them by assigning them to the ADDRESS\n groupName.
If you want to normalize PHONE_NUMBER and\n PHONE_COUNTRYCODE, you must group them by assigning them to the\n PHONE\n groupName.
A string that instructs Entity Resolution to combine several columns into a unified\n column with the identical attribute type.
\nFor example, when working with columns such as first_name,\n middle_name, and last_name, assigning them a common\n groupName will prompt Entity Resolution to concatenate them into a single\n value.
A string that instructs Entity Resolution to combine several columns into a unified\n column with the identical attribute type.
\nFor example, when working with columns such as\n NAME_FIRST,\n NAME_MIDDLE,\n and\n NAME_LAST,\n assigning them a common groupName will prompt Entity Resolution to concatenate\n them into a single value.
Indicates if the column values are hashed in the schema input. If the value is set to\n TRUE, the column values are hashed. If the value is set to\n FALSE, the column values are cleartext.
Indicates if the column values are hashed in the schema input.
\nIf the value is set to TRUE, the column values are hashed.
If the value is set to FALSE, the column values are cleartext.
An object containing FieldName, Type, GroupName,\n MatchKey, Hashing, and SubType.
A\n configuration object for defining input data fields in Entity Resolution. The\n SchemaInputAttribute specifies how individual fields in your input data should be processed\n and matched.
" } }, "com.amazonaws.entityresolution#SchemaInputAttributes": { @@ -6050,20 +6065,10 @@ } } }, - "com.amazonaws.entityresolution#UniqueId": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 760 - }, - "smithy.api#pattern": "^[a-zA-Z_0-9-+=/,]*$" - } - }, "com.amazonaws.entityresolution#UniqueIdList": { "type": "list", "member": { - "target": "com.amazonaws.entityresolution#UniqueId" + "target": "com.amazonaws.entityresolution#HeaderSafeUniqueId" } }, "com.amazonaws.entityresolution#UntagResource": { diff --git a/codegen/sdk/aws-models/eventbridge.json b/codegen/sdk/aws-models/eventbridge.json index 41df041a238..d7f578eac78 100644 --- a/codegen/sdk/aws-models/eventbridge.json +++ b/codegen/sdk/aws-models/eventbridge.json @@ -1606,7 +1606,7 @@ } }, "traits": { - "smithy.api#documentation": "You do not have the necessary permissons for this action.
", + "smithy.api#documentation": "You do not have the necessary permissions for this action.
", "smithy.api#error": "client" } }, @@ -1868,7 +1868,7 @@ } }, "EventSourceArn": { - "target": "com.amazonaws.eventbridge#Arn", + "target": "com.amazonaws.eventbridge#EventBusArn", "traits": { "smithy.api#documentation": "The ARN of the event bus associated with the archive. Only events from this event bus are\n sent to the archive.
" } @@ -2793,13 +2793,13 @@ "ResourceConfigurationArn": { "target": "com.amazonaws.eventbridge#ResourceConfigurationArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource configuration for the resource endpoint.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon VPC Lattice resource configuration for the resource endpoint.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource configuration for the resource endpoint.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon VPC Lattice resource configuration for the resource endpoint.
" } }, "com.amazonaws.eventbridge#ConnectivityResourceParameters": { @@ -2952,7 +2952,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an archive of events with the specified settings. When you create an archive,\n incoming events might not immediately start being sent to the archive. Allow a short period of\n time for changes to take effect. If you do not specify a pattern to filter events sent to the\n archive, all events are sent to the archive except replayed events. Replayed events are not\n sent to an archive.
\nArchives and schema discovery are not supported for event buses encrypted using a\n customer managed key. EventBridge returns an error if:
\nYou call \n CreateArchive\n on an event bus set to use a customer managed key for encryption.
You call \n CreateDiscoverer\n on an event bus set to use a customer managed key for encryption.
You call \n UpdatedEventBus\n to set a customer managed key on an event bus with an archives or schema discovery enabled.
To enable archives or schema discovery on an event bus, choose to\n use an Amazon Web Services owned key. For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide.
\nCreates an archive of events with the specified settings. When you create an archive,\n incoming events might not immediately start being sent to the archive. Allow a short period of\n time for changes to take effect. If you do not specify a pattern to filter events sent to the\n archive, all events are sent to the archive except replayed events. Replayed events are not\n sent to an archive.
\nIf you have specified that EventBridge use a customer managed key for encrypting the source event bus, we strongly recommend you also specify a \n customer managed key for any archives for the event bus as well.
\nFor more information, see Encrypting archives in the Amazon EventBridge User Guide.
\nThe ARN of the event bus that sends events to the archive.
", "smithy.api#required": {} @@ -2989,6 +2989,12 @@ "traits": { "smithy.api#documentation": "The number of days to retain events for. Default value is 0. If set to 0, events are\n retained indefinitely
" } + }, + "KmsKeyIdentifier": { + "target": "com.amazonaws.eventbridge#KmsKeyIdentifier", + "traits": { + "smithy.api#documentation": "The identifier of the KMS\n customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt this archive. The identifier can be the key \n Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN.
\nIf you do not specify a customer managed key identifier, EventBridge uses an\n Amazon Web Services owned key to encrypt the archive.
\nFor more information, see Identify and view keys in the Key Management Service\n Developer Guide.
\nIf you have specified that EventBridge use a customer managed key for encrypting the source event bus, we strongly recommend you also specify a \n customer managed key for any archives for the event bus as well.
\nFor more information, see Encrypting archives in the Amazon EventBridge User Guide.
\nFor connections to private resource endpoints, the parameters to use for invoking the resource endpoint.
\nFor more information, see Connecting to private resources in the \n Amazon EventBridge User Guide\n .
" + "smithy.api#documentation": "For connections to private APIs, the parameters to use for invoking the API.
\nFor more information, see Connecting to private APIs in the \n Amazon EventBridge User Guide\n .
" } } }, @@ -3451,7 +3457,7 @@ "KmsKeyIdentifier": { "target": "com.amazonaws.eventbridge#KmsKeyIdentifier", "traits": { - "smithy.api#documentation": "The identifier of the KMS\n customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt events on this event bus. The identifier can be the key \n Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN.
\nIf you do not specify a customer managed key identifier, EventBridge uses an\n Amazon Web Services owned key to encrypt events on the event bus.
\nFor more information, see Managing keys in the Key Management Service\n Developer Guide.
\nArchives and schema discovery are not supported for event buses encrypted using a\n customer managed key. EventBridge returns an error if:
\nYou call \n CreateArchive\n on an event bus set to use a customer managed key for encryption.
You call \n CreateDiscoverer\n on an event bus set to use a customer managed key for encryption.
You call \n UpdatedEventBus\n to set a customer managed key on an event bus with an archives or schema discovery enabled.
To enable archives or schema discovery on an event bus, choose to\n use an Amazon Web Services owned key. For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide.
\nThe identifier of the KMS\n customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt events on this event bus. The identifier can be the key \n Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN.
\nIf you do not specify a customer managed key identifier, EventBridge uses an\n Amazon Web Services owned key to encrypt events on the event bus.
\nFor more information, see Identify and view keys in the Key Management Service\n Developer Guide.
\nSchema discovery is not supported for event buses encrypted using a\n customer managed key. EventBridge returns an error if you call \n CreateDiscoverer\n on an event bus set to use a customer managed key for encryption.
To enable schema discovery on an event bus, choose to\n use an Amazon Web Services owned key. For more information, see Encrypting events in the Amazon EventBridge User Guide.
\nIf you have specified that EventBridge use a customer managed key for encrypting the source event bus, we strongly recommend you also specify a \n customer managed key for any archives for the event bus as well.
\nFor more information, see Encrypting archives in the Amazon EventBridge User Guide.
\nThe ARN of the event source associated with the archive.
" } @@ -4256,6 +4262,12 @@ "smithy.api#documentation": "The reason that the archive is in the state.
" } }, + "KmsKeyIdentifier": { + "target": "com.amazonaws.eventbridge#KmsKeyIdentifier", + "traits": { + "smithy.api#documentation": "The identifier of the KMS\n customer managed key for EventBridge to use to encrypt this archive, if one has been specified.
\nFor more information, see Encrypting archives in the Amazon EventBridge User Guide.
" + } + }, "RetentionDays": { "target": "com.amazonaws.eventbridge#RetentionDays", "traits": { @@ -4350,7 +4362,7 @@ "ResourceAssociationArn": { "target": "com.amazonaws.eventbridge#ResourceAssociationArn", "traits": { - "smithy.api#documentation": "For connections to private APIs, the Amazon Resource Name (ARN) of the resource association EventBridge created between the connection and the private API's resource configuration.
", + "smithy.api#documentation": "For connections to private APIs, the Amazon Resource Name (ARN) of the resource association EventBridge created between the connection and the private API's resource configuration.
\nFor more information, see \n Managing service network resource associations for connections in the \n Amazon EventBridge User Guide\n .
", "smithy.api#required": {} } } @@ -4383,7 +4395,7 @@ "InvocationConnectivityParameters": { "target": "com.amazonaws.eventbridge#DescribeConnectionConnectivityParameters", "traits": { - "smithy.api#documentation": "For connections to private resource endpoints. The parameters EventBridge uses to invoke the resource endpoint.
\nFor more information, see Connecting to private resources in the \n Amazon EventBridge User Guide\n .
" + "smithy.api#documentation": "For connections to private APIs The parameters EventBridge uses to invoke the resource\n endpoint.
\nFor more information, see Connecting to private APIs in the \n Amazon EventBridge User Guide\n .
" } }, "ConnectionState": { @@ -4862,7 +4874,7 @@ } }, "EventSourceArn": { - "target": "com.amazonaws.eventbridge#Arn", + "target": "com.amazonaws.eventbridge#ArchiveArn", "traits": { "smithy.api#documentation": "The ARN of the archive events were replayed from.
" } @@ -5497,6 +5509,16 @@ "smithy.api#documentation": "An event bus receives events from a source, uses rules to evaluate them, applies any\n configured input transformation, and routes them to the appropriate target(s). Your account's\n default event bus receives events from Amazon Web Services services. A custom event bus can\n receive events from your custom applications and services. A partner event bus receives events\n from an event source created by an SaaS partner. These events come from the partners services\n or applications.
" } }, + "com.amazonaws.eventbridge#EventBusArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1600 + }, + "smithy.api#pattern": "^arn:aws([a-z]|\\-)*:events:([a-z]|\\d|\\-)*:([0-9]{12})?:.+\\/.+$" + } + }, "com.amazonaws.eventbridge#EventBusDescription": { "type": "string", "traits": { @@ -5905,7 +5927,8 @@ "smithy.api#length": { "min": 0, "max": 2048 - } + }, + "smithy.api#pattern": "^[a-zA-Z0-9_\\-/:]*$" } }, "com.amazonaws.eventbridge#LaunchType": { @@ -6059,7 +6082,7 @@ } }, "EventSourceArn": { - "target": "com.amazonaws.eventbridge#Arn", + "target": "com.amazonaws.eventbridge#EventBusArn", "traits": { "smithy.api#documentation": "The ARN of the event source associated with the archive.
" } @@ -6544,7 +6567,7 @@ } }, "EventSourceArn": { - "target": "com.amazonaws.eventbridge#Arn", + "target": "com.amazonaws.eventbridge#ArchiveArn", "traits": { "smithy.api#documentation": "The ARN of the archive from which the events are replayed.
" } @@ -7255,7 +7278,7 @@ } ], "traits": { - "smithy.api#documentation": "Sends custom events to Amazon EventBridge so that they can be matched to rules.
\nThe maximum size for a PutEvents event entry is 256 KB. Entry size is calculated including\n the event and any necessary characters and keys of the JSON representation of the event. To\n learn more, see Calculating PutEvents event entry\n size in the \n Amazon EventBridge User Guide\n \n
\nPutEvents accepts the data in JSON format. For the JSON number (integer) data type, the\n constraints are: a minimum value of -9,223,372,036,854,775,808 and a maximum value of\n 9,223,372,036,854,775,807.
\nPutEvents will only process nested JSON up to 1000 levels deep.
\nSends custom events to Amazon EventBridge so that they can be matched to rules.
\nYou can batch multiple event entries into one request for efficiency. \n However, the total entry size must be less than 256KB. You can calculate the entry size before you send the events. \n For more information, see Calculating PutEvents event entry\n size in the \n Amazon EventBridge User Guide\n .
\nPutEvents accepts the data in JSON format. For the JSON number (integer) data type, the\n constraints are: a minimum value of -9,223,372,036,854,775,808 and a maximum value of\n 9,223,372,036,854,775,807.
\nPutEvents will only process nested JSON up to 1000 levels deep.
\nThe ARN of the archive to replay event from.
" } @@ -8633,20 +8656,20 @@ "Name": { "target": "com.amazonaws.eventbridge#SageMakerPipelineParameterName", "traits": { - "smithy.api#documentation": "Name of parameter to start execution of a SageMaker Model Building\n Pipeline.
", + "smithy.api#documentation": "Name of parameter to start execution of a SageMaker AI Model Building\n Pipeline.
", "smithy.api#required": {} } }, "Value": { "target": "com.amazonaws.eventbridge#SageMakerPipelineParameterValue", "traits": { - "smithy.api#documentation": "Value of parameter to start execution of a SageMaker Model Building\n Pipeline.
", + "smithy.api#documentation": "Value of parameter to start execution of a SageMaker AI Model Building\n Pipeline.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Name/Value pair of a parameter to start execution of a SageMaker Model Building\n Pipeline.
" + "smithy.api#documentation": "Name/Value pair of a parameter to start execution of a SageMaker AI Model Building\n Pipeline.
" } }, "com.amazonaws.eventbridge#SageMakerPipelineParameterList": { @@ -8686,12 +8709,12 @@ "PipelineParameterList": { "target": "com.amazonaws.eventbridge#SageMakerPipelineParameterList", "traits": { - "smithy.api#documentation": "List of Parameter names and values for SageMaker Model Building Pipeline\n execution.
" + "smithy.api#documentation": "List of Parameter names and values for SageMaker AI Model Building Pipeline\n execution.
" } } }, "traits": { - "smithy.api#documentation": "These are custom parameters to use when the target is a SageMaker Model Building\n Pipeline that starts based on EventBridge events.
" + "smithy.api#documentation": "These are custom parameters to use when the target is a SageMaker AI Model Building\n Pipeline that starts based on EventBridge events.
" } }, "com.amazonaws.eventbridge#ScheduleExpression": { @@ -8819,7 +8842,7 @@ } }, "EventSourceArn": { - "target": "com.amazonaws.eventbridge#Arn", + "target": "com.amazonaws.eventbridge#ArchiveArn", "traits": { "smithy.api#documentation": "The ARN of the archive to replay events from.
", "smithy.api#required": {} @@ -9105,7 +9128,7 @@ "SageMakerPipelineParameters": { "target": "com.amazonaws.eventbridge#SageMakerPipelineParameters", "traits": { - "smithy.api#documentation": "Contains the SageMaker Model Building Pipeline parameters to start execution of a\n SageMaker Model Building Pipeline.
\nIf you specify a SageMaker Model Building Pipeline as a target, you can use this\n to specify parameters to start a pipeline execution based on EventBridge events.
" + "smithy.api#documentation": "Contains the SageMaker AI Model Building Pipeline parameters to start execution of a\n SageMaker AI Model Building Pipeline.
\nIf you specify a SageMaker AI Model Building Pipeline as a target, you can use this\n to specify parameters to start a pipeline execution based on EventBridge events.
" } }, "DeadLetterConfig": { @@ -9520,6 +9543,12 @@ "traits": { "smithy.api#documentation": "The number of days to retain events in the archive.
" } + }, + "KmsKeyIdentifier": { + "target": "com.amazonaws.eventbridge#KmsKeyIdentifier", + "traits": { + "smithy.api#documentation": "The identifier of the KMS\n customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt this archive. The identifier can be the key \n Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN.
\nIf you do not specify a customer managed key identifier, EventBridge uses an\n Amazon Web Services owned key to encrypt the archive.
\nFor more information, see Identify and view keys in the Key Management Service\n Developer Guide.
\nIf you have specified that EventBridge use a customer managed key for encrypting the source event bus, we strongly recommend you also specify a \n customer managed key for any archives for the event bus as well.
\nFor more information, see Encrypting archives in the Amazon EventBridge User Guide.
\nFor connections to private resource endpoints, the parameters to use for invoking the resource endpoint.
\nFor more information, see Connecting to private resources in the \n Amazon EventBridge User Guide\n .
" + "smithy.api#documentation": "For connections to private APIs, the parameters to use for invoking the API.
\nFor more information, see Connecting to private APIs in the \n Amazon EventBridge User Guide\n .
" } } }, @@ -9965,7 +9994,7 @@ "KmsKeyIdentifier": { "target": "com.amazonaws.eventbridge#KmsKeyIdentifier", "traits": { - "smithy.api#documentation": "The identifier of the KMS\n customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt events on this event bus. The identifier can be the key \n Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN.
\nIf you do not specify a customer managed key identifier, EventBridge uses an\n Amazon Web Services owned key to encrypt events on the event bus.
\nFor more information, see Managing keys in the Key Management Service\n Developer Guide.
\nArchives and schema discovery are not supported for event buses encrypted using a\n customer managed key. EventBridge returns an error if:
\nYou call \n CreateArchive\n on an event bus set to use a customer managed key for encryption.
You call \n CreateDiscoverer\n on an event bus set to use a customer managed key for encryption.
You call \n UpdatedEventBus\n to set a customer managed key on an event bus with an archives or schema discovery enabled.
To enable archives or schema discovery on an event bus, choose to\n use an Amazon Web Services owned key. For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide.
\nThe identifier of the KMS\n customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt events on this event bus. The identifier can be the key \n Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN.
\nIf you do not specify a customer managed key identifier, EventBridge uses an\n Amazon Web Services owned key to encrypt events on the event bus.
\nFor more information, see Identify and view keys in the Key Management Service\n Developer Guide.
\nSchema discovery is not supported for event buses encrypted using a\n customer managed key. EventBridge returns an error if you call \n CreateDiscoverer\n on an event bus set to use a customer managed key for encryption.
To enable schema discovery on an event bus, choose to\n use an Amazon Web Services owned key. For more information, see Encrypting events in the Amazon EventBridge User Guide.
\nIf you have specified that EventBridge use a customer managed key for encrypting the source event bus, we strongly recommend you also specify a \n customer managed key for any archives for the event bus as well.
\nFor more information, see Encrypting archives in the Amazon EventBridge User Guide.
\nOperating system that the game server binaries are built to run on. This value\n determines the type of fleet resources that you can use for this build.
\nAmazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in \n the Amazon Linux 2 FAQs. \n For game servers\n that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the\n game server build to server SDK 5.x, and then deploy to AL2023 instances. See\n \n Migrate to Amazon GameLift server SDK version 5.\n
\nOperating system that the game server binaries are built to run on. This value\n determines the type of fleet resources that you can use for this build.
\nAmazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in \n the Amazon Linux 2 FAQs. \n For game servers\n that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the\n game server build to server SDK 5.x, and then deploy to AL2023 instances. See\n \n Migrate to server SDK version 5.\n
\nThe type of operating system on the compute resource.
\nAmazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in \n the Amazon Linux 2 FAQs. \n For game servers\n that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the\n game server build to server SDK 5.x, and then deploy to AL2023 instances. See\n \n Migrate to Amazon GameLift server SDK version 5.\n
\nThe type of operating system on the compute resource.
\nAmazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in \n the Amazon Linux 2 FAQs. \n For game servers\n that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the\n game server build to server SDK 5.x, and then deploy to AL2023 instances. See\n \n Migrate to server SDK version 5.\n
\nThe platform that all containers in the container group definition run on.
\nAmazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game\n servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x, first update the game\n server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to\n Amazon GameLift server SDK version 5.\n
\nThe platform that all containers in the container group definition run on.
\nAmazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game\n servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the game\n server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to\n server SDK version 5.\n
\nThe operating system that your game server binaries run on. This value determines the\n type of fleet resources that you use for this build. If your game build contains\n multiple executables, they all must run on the same operating system. You must specify a\n valid operating system in this request. There is no default value. You can't change a\n build's operating system later.
\nAmazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in \n the Amazon Linux 2 FAQs. \n For game servers\n that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the\n game server build to server SDK 5.x, and then deploy to AL2023 instances. See\n \n Migrate to Amazon GameLift server SDK version 5.\n
\nThe operating system that your game server binaries run on. This value determines the\n type of fleet resources that you use for this build. If your game build contains\n multiple executables, they all must run on the same operating system. You must specify a\n valid operating system in this request. There is no default value. You can't change a\n build's operating system later.
\nAmazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in \n the Amazon Linux 2 FAQs. \n For game servers\n that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the\n game server build to server SDK 5.x, and then deploy to AL2023 instances. See\n \n Migrate to server SDK version 5.\n
\nThe platform that all containers in the group use. Containers in a group must run on the\n same operating system.
\nDefault value: AMAZON_LINUX_2023\n
Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game\n servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x, first update the game\n server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to\n Amazon GameLift server SDK version 5.\n
\nThe platform that all containers in the group use. Containers in a group must run on the\n same operating system.
\nDefault value: AMAZON_LINUX_2023\n
Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game\n servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the game\n server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to\n server SDK version 5.\n
\nThe IP address ranges and port settings that allow inbound traffic to access game\n server processes and other processes on this fleet. Set this parameter for managed EC2 fleets. You can leave this parameter empty when creating the fleet, but you must call \n https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetPortSettings to set it before players can connect to game sessions. \n As a best practice, we recommend \n opening ports for remote access only when you need them and closing them when you're finished. \n For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges.
" + "smithy.api#documentation": "The IP address ranges and port settings that allow inbound traffic to access game\n server processes and other processes on this fleet. Set this parameter for managed EC2 fleets. You can leave this parameter empty when creating the fleet, but you must call \n https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetPortSettings to set it before players can connect to game sessions. \n As a best practice, we recommend \n opening ports for remote access only when you need them and closing them when you're finished. \n For Amazon GameLift Realtime fleets, Amazon GameLift automatically sets TCP and UDP ranges.
" } }, "NewGameSessionProtectionPolicy": { @@ -2703,7 +2703,7 @@ "IdempotencyToken": { "target": "com.amazonaws.gamelift#IdStringModel", "traits": { - "smithy.api#documentation": "Custom string that uniquely identifies the new game session request. This is useful\n for ensuring that game session requests with the same idempotency token are processed\n only once. Subsequent requests with the same string return the original\n GameSession object, with an updated status. Maximum token length is 48\n characters. If provided, this string is included in the new game session's ID.\n A game session ARN has the following format: \n arn:aws:gamelift:. Idempotency tokens remain in use for 30 days after a game session has ended;\n game session objects are retained for this time period and then deleted.
Custom string that uniquely identifies the new game session request. This is useful\n for ensuring that game session requests with the same idempotency token are processed\n only once. Subsequent requests with the same string return the original\n GameSession object, with an updated status. Maximum token length is 48\n characters. If provided, this string is included in the new game session's ID.\n A game session ARN has the following format: \n arn:aws:gamelift:. Idempotency tokens remain in use for 30 days after a game session has ended;\n game session objects are retained for this time period and then deleted.
Creates a placement queue that processes requests for new game sessions. A queue uses\n FleetIQ algorithms to determine the best placement locations and find an available game\n server there, then prompts the game server process to start a new game session.
\nA game session queue is configured with a set of destinations (Amazon GameLift fleets or\n aliases), which determine the locations where the queue can place new game sessions.\n These destinations can span multiple fleet types (Spot and On-Demand), instance types,\n and Amazon Web Services Regions. If the queue includes multi-location fleets, the queue is able to\n place game sessions in all of a fleet's remote locations. You can opt to filter out\n individual locations if needed.
\nThe queue configuration also determines how FleetIQ selects the best available placement\n for a new game session. Before searching for an available game server, FleetIQ first\n prioritizes the queue's destinations and locations, with the best placement locations on\n top. You can set up the queue to use the FleetIQ default prioritization or provide an\n alternate set of priorities.
\nTo create a new queue, provide a name, timeout value, and a list of destinations.\n Optionally, specify a sort configuration and/or a filter, and define a set of latency\n cap policies. You can also include the ARN for an Amazon Simple Notification Service\n (SNS) topic to receive notifications of game session placement activity. Notifications\n using SNS or CloudWatch events is the preferred way to track placement activity.
\nIf successful, a new GameSessionQueue object is returned with an assigned\n queue ARN. New game session requests, which are submitted to queue with StartGameSessionPlacement or StartMatchmaking, reference a queue's name or ARN.
\n Learn more\n
\n\n \n Design a game session queue\n
\n\n \n Create a game session queue\n
\n\n Related actions\n
\n\n CreateGameSessionQueue \n |\n DescribeGameSessionQueues\n | \n UpdateGameSessionQueue\n | \n DeleteGameSessionQueue\n | \n All APIs by task\n
" + "smithy.api#documentation": "Creates a placement queue that processes requests for new game sessions. A queue uses\n FleetIQ algorithms to locate the best available placement locations for a new game\n session, and then prompts the game server process to start a new game session.
\nA game session queue is configured with a set of destinations (Amazon GameLift fleets or\n aliases) that determine where the queue can place new game sessions. These destinations\n can span multiple Amazon Web Services Regions, can use different instance types, and can include both\n Spot and On-Demand fleets. If the queue includes multi-location fleets, the queue can\n place game sessions in any of a fleet's remote locations.
\nYou can configure a queue to determine how it selects the best available placement for\n a new game session. Queues can prioritize placement decisions based on a combination of\n location, hosting cost, and player latency. You can set up the queue to use the default\n prioritization or provide alternate instructions using\n PriorityConfiguration.
\n Request options\n
\nUse this operation to make these common types of requests.
\nCreate a queue with the minimum required parameters.
\n\n Name\n
\n Destinations (This parameter isn't required, but a queue\n can't make placements without at least one destination.)
Create a queue with placement notification. Queues that have high placement\n activity must use a notification system, such as with Amazon Simple Notification Service (Amazon SNS) or Amazon CloudWatch.
\nRequired parameters Name and\n Destinations\n
\n NotificationTarget\n
Create a queue with custom prioritization settings. These custom settings\n replace the default prioritization configuration for a queue.
\nRequired parameters Name and\n Destinations\n
\n PriorityConfiguration\n
Create a queue with special rules for processing player latency data.
\nRequired parameters Name and\n Destinations\n
\n PlayerLatencyPolicies\n
\n Results\n
\nIf successful, this operation returns a new GameSessionQueue object with\n an assigned queue ARN. Use the queue's name or ARN when submitting new game session\n requests with StartGameSessionPlacement or StartMatchmaking.
\n Learn more\n
\n\n \n Design a game session queue\n
\n\n \n Create a game session queue\n
\n\n Related actions\n
\n\n CreateGameSessionQueue \n |\n DescribeGameSessionQueues\n | \n UpdateGameSessionQueue\n | \n DeleteGameSessionQueue\n | \n All APIs by task\n
" } }, "com.amazonaws.gamelift#CreateGameSessionQueueInput": { @@ -2783,7 +2783,7 @@ "TimeoutInSeconds": { "target": "com.amazonaws.gamelift#WholeNumber", "traits": { - "smithy.api#documentation": "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.
The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status. If you don't specify a request timeout, the queue uses a default value.
Creates a new script record for your Realtime Servers script. Realtime scripts are JavaScript that\n provide configuration settings and optional custom game logic for your game. The script\n is deployed when you create a Realtime Servers fleet to host your game sessions. Script logic is\n executed during an active game session.
\nTo create a new script record, specify a script name and provide the script file(s).\n The script files and all dependencies must be zipped into a single file. You can pull\n the zip file from either of these locations:
\nA locally available directory. Use the ZipFile parameter\n for this option.
\nAn Amazon Simple Storage Service (Amazon S3) bucket under your Amazon Web Services account. Use the\n StorageLocation parameter for this option. You'll need\n to have an Identity Access Management (IAM) role that allows the Amazon GameLift service\n to access your S3 bucket.
\nIf the call is successful, a new script record is created with a unique script ID. If\n the script file is provided as a local file, the file is uploaded to an Amazon GameLift-owned S3\n bucket and the script record's storage location reflects this location. If the script\n file is provided as an S3 bucket, Amazon GameLift accesses the file at this storage location as\n needed for deployment.
\n\n Learn more\n
\n\n Amazon GameLift Realtime Servers\n
\n\n Set Up a Role for Amazon GameLift Access\n
\n\n Related actions\n
\n\n All APIs by task\n
" + "smithy.api#documentation": "Creates a new script record for your Amazon GameLift Realtime script. Realtime scripts are JavaScript that\n provide configuration settings and optional custom game logic for your game. The script\n is deployed when you create a Amazon GameLift Realtime fleet to host your game sessions. Script logic is\n executed during an active game session.
\nTo create a new script record, specify a script name and provide the script file(s).\n The script files and all dependencies must be zipped into a single file. You can pull\n the zip file from either of these locations:
\nA locally available directory. Use the ZipFile parameter\n for this option.
\nAn Amazon Simple Storage Service (Amazon S3) bucket under your Amazon Web Services account. Use the\n StorageLocation parameter for this option. You'll need\n to have an Identity Access Management (IAM) role that allows the Amazon GameLift service\n to access your S3 bucket.
\nIf the call is successful, a new script record is created with a unique script ID. If\n the script file is provided as a local file, the file is uploaded to an Amazon GameLift-owned S3\n bucket and the script record's storage location reflects this location. If the script\n file is provided as an S3 bucket, Amazon GameLift accesses the file at this storage location as\n needed for deployment.
\n\n Learn more\n
\n\n Amazon GameLift Amazon GameLift Realtime\n
\n\n Set Up a Role for Amazon GameLift Access\n
\n\n Related actions\n
\n\n All APIs by task\n
" } }, "com.amazonaws.gamelift#CreateScriptInput": { @@ -4224,7 +4224,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a Realtime script. This operation permanently deletes the script record. If\n script files were uploaded, they are also deleted (files stored in an S3 bucket are not\n deleted).
\nTo delete a script, specify the script ID. Before deleting a script, be sure to\n terminate all fleets that are deployed with the script being deleted. Fleet instances\n periodically check for script updates, and if the script record no longer exists, the\n instance will go into an error state and be unable to host game sessions.
\n\n Learn more\n
\n\n Amazon GameLift Realtime Servers\n
\n\n Related actions\n
\n\n All APIs by task\n
" + "smithy.api#documentation": "Deletes a Realtime script. This operation permanently deletes the script record. If\n script files were uploaded, they are also deleted (files stored in an S3 bucket are not\n deleted).
\nTo delete a script, specify the script ID. Before deleting a script, be sure to\n terminate all fleets that are deployed with the script being deleted. Fleet instances\n periodically check for script updates, and if the script record no longer exists, the\n instance will go into an error state and be unable to host game sessions.
\n\n Learn more\n
\n\n Amazon GameLift Amazon GameLift Realtime\n
\n\n Related actions\n
\n\n All APIs by task\n
" } }, "com.amazonaws.gamelift#DeleteScriptInput": { @@ -4511,7 +4511,7 @@ } ], "traits": { - "smithy.api#documentation": "Removes a compute resource from an Amazon GameLift Anywhere fleet.\n Deregistered computes can no longer host game sessions through Amazon GameLift.
\nFor an Anywhere fleet that's running the Amazon GameLift Agent, the Agent\n handles all compute registry tasks for you. For an Anywhere fleet that doesn't use the\n Agent, call this operation to deregister fleet computes.
\nTo deregister a compute, call this operation from the compute that's being\n deregistered and specify the compute name and the fleet ID.
" + "smithy.api#documentation": "Removes a compute resource from an Anywhere fleet. Deregistered computes can no longer\n host game sessions through Amazon GameLift. Use this operation with an Anywhere fleet that\n doesn't use the Amazon GameLift Agent For Anywhere fleets with the Agent, the Agent handles all\n compute registry tasks for you.
\nTo deregister a compute, call this operation from the compute that's being\n deregistered and specify the compute name and the fleet ID.
" } }, "com.amazonaws.gamelift#DeregisterComputeInput": { @@ -4733,7 +4733,7 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves properties for a compute resource in an Amazon GameLift fleet. To get a list of all\n computes in a fleet, call https://docs.aws.amazon.com/gamelift/latest/apireference/API_ListCompute.html.
\nTo request information on a specific compute, provide the fleet ID and compute\n name.
\nIf successful, this operation returns details for the requested compute resource.\n Depending on the fleet's compute type, the result includes the following information:
\nFor managed EC2 fleets, this operation returns information about the EC2\n instance.
\nFor Anywhere fleets, this operation returns information about the\n registered compute.
\nRetrieves properties for a specific compute resource in an Amazon GameLift fleet. You can list\n all computes in a fleet by calling ListCompute.
\n\n Request options\n
\nProvide the fleet ID and compute name. The compute name varies depending on the type\n of fleet.
\nFor a compute in a managed EC2 fleet, provide an instance ID. Each instance in\n the fleet is a compute.
\nFor a compute in a managed container fleet, provide a compute name. In a\n container fleet, each game server container group on a fleet instance is\n assigned a compute name.
\nFor a compute in an Anywhere fleet, provide a registered compute name.\n Anywhere fleet computes are created when you register a hosting resource with\n the fleet.
\n\n Results\n
\nIf successful, this operation returns details for the requested compute resource.\n Depending on the fleet's compute type, the result includes the following information:
\nFor a managed EC2 fleet, this operation returns information about the EC2\n instance.
\nFor an Anywhere fleet, this operation returns information about the registered\n compute.
\nThe unique identifier of the compute resource to retrieve properties for. For an\n Anywhere fleet compute, use the registered compute name. For an EC2 fleet instance, use\n the instance ID.
", + "smithy.api#documentation": "The unique identifier of the compute resource to retrieve properties for. For a\n managed container fleet or Anywhere fleet, use a compute name. For an EC2 fleet, use an\n instance ID. To retrieve a fleet's compute identifiers, call ListCompute.
", "smithy.api#required": {} } } @@ -6819,7 +6819,7 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves properties for a Realtime script.
\nTo request a script record, specify the script ID. If successful, an object containing\n the script properties is returned.
\n\n Learn more\n
\n\n Amazon GameLift Realtime Servers\n
\n\n Related actions\n
\n\n All APIs by task\n
" + "smithy.api#documentation": "Retrieves properties for a Realtime script.
\nTo request a script record, specify the script ID. If successful, an object containing\n the script properties is returned.
\n\n Learn more\n
\n\n Amazon GameLift Amazon GameLift Realtime\n
\n\n Related actions\n
\n\n All APIs by task\n
" } }, "com.amazonaws.gamelift#DescribeScriptInput": { @@ -8135,6 +8135,1986 @@ "traits": { "smithy.api#enumValue": "g5g.16xlarge" } + }, + "r6i_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6i.large" + } + }, + "r6i_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6i.xlarge" + } + }, + "r6i_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6i.2xlarge" + } + }, + "r6i_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6i.4xlarge" + } + }, + "r6i_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6i.8xlarge" + } + }, + "r6i_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6i.12xlarge" + } + }, + "r6i_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6i.16xlarge" + } + }, + "c6gd_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6gd.medium" + } + }, + "c6gd_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6gd.large" + } + }, + "c6gd_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6gd.xlarge" + } + }, + "c6gd_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6gd.2xlarge" + } + }, + "c6gd_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6gd.4xlarge" + } + }, + "c6gd_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6gd.8xlarge" + } + }, + "c6gd_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6gd.12xlarge" + } + }, + "c6gd_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6gd.16xlarge" + } + }, + "c6in_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6in.large" + } + }, + "c6in_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6in.xlarge" + } + }, + "c6in_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6in.2xlarge" + } + }, + "c6in_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6in.4xlarge" + } + }, + "c6in_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6in.8xlarge" + } + }, + "c6in_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6in.12xlarge" + } + }, + "c6in_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6in.16xlarge" + } + }, + "c7a_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7a.medium" + } + }, + "c7a_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7a.large" + } + }, + "c7a_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7a.xlarge" + } + }, + "c7a_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7a.2xlarge" + } + }, + "c7a_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7a.4xlarge" + } + }, + "c7a_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7a.8xlarge" + } + }, + "c7a_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7a.12xlarge" + } + }, + "c7a_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7a.16xlarge" + } + }, + "c7gd_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gd.medium" + } + }, + "c7gd_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gd.large" + } + }, + "c7gd_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gd.xlarge" + } + }, + "c7gd_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gd.2xlarge" + } + }, + "c7gd_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gd.4xlarge" + } + }, + "c7gd_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gd.8xlarge" + } + }, + "c7gd_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gd.12xlarge" + } + }, + "c7gd_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gd.16xlarge" + } + }, + "c7gn_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gn.medium" + } + }, + "c7gn_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gn.large" + } + }, + "c7gn_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gn.xlarge" + } + }, + "c7gn_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gn.2xlarge" + } + }, + "c7gn_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gn.4xlarge" + } + }, + "c7gn_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gn.8xlarge" + } + }, + "c7gn_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gn.12xlarge" + } + }, + "c7gn_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gn.16xlarge" + } + }, + "c7i_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.large" + } + }, + "c7i_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.xlarge" + } + }, + "c7i_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.2xlarge" + } + }, + "c7i_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.4xlarge" + } + }, + "c7i_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.8xlarge" + } + }, + "c7i_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.12xlarge" + } + }, + "c7i_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.16xlarge" + } + }, + "m6a_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6a.large" + } + }, + "m6a_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6a.xlarge" + } + }, + "m6a_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6a.2xlarge" + } + }, + "m6a_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6a.4xlarge" + } + }, + "m6a_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6a.8xlarge" + } + }, + "m6a_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6a.12xlarge" + } + }, + "m6a_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6a.16xlarge" + } + }, + "m6gd_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6gd.medium" + } + }, + "m6gd_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6gd.large" + } + }, + "m6gd_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6gd.xlarge" + } + }, + "m6gd_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6gd.2xlarge" + } + }, + "m6gd_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6gd.4xlarge" + } + }, + "m6gd_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6gd.8xlarge" + } + }, + "m6gd_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6gd.12xlarge" + } + }, + "m6gd_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6gd.16xlarge" + } + }, + "m6i_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6i.large" + } + }, + "m6i_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6i.xlarge" + } + }, + "m6i_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6i.2xlarge" + } + }, + "m6i_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6i.4xlarge" + } + }, + "m6i_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6i.8xlarge" + } + }, + "m6i_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6i.12xlarge" + } + }, + "m6i_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6i.16xlarge" + } + }, + "m7a_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7a.medium" + } + }, + "m7a_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7a.large" + } + }, + "m7a_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7a.xlarge" + } + }, + "m7a_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7a.2xlarge" + } + }, + "m7a_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7a.4xlarge" + } + }, + "m7a_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7a.8xlarge" + } + }, + "m7a_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7a.12xlarge" + } + }, + "m7a_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7a.16xlarge" + } + }, + "m7gd_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7gd.medium" + } + }, + "m7gd_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7gd.large" + } + }, + "m7gd_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7gd.xlarge" + } + }, + "m7gd_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7gd.2xlarge" + } + }, + "m7gd_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7gd.4xlarge" + } + }, + "m7gd_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7gd.8xlarge" + } + }, + "m7gd_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7gd.12xlarge" + } + }, + "m7gd_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7gd.16xlarge" + } + }, + "m7i_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7i.large" + } + }, + "m7i_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7i.xlarge" + } + }, + "m7i_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7i.2xlarge" + } + }, + "m7i_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7i.4xlarge" + } + }, + "m7i_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7i.8xlarge" + } + }, + "m7i_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7i.12xlarge" + } + }, + "m7i_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7i.16xlarge" + } + }, + "r6gd_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6gd.medium" + } + }, + "r6gd_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6gd.large" + } + }, + "r6gd_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6gd.xlarge" + } + }, + "r6gd_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6gd.2xlarge" + } + }, + "r6gd_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6gd.4xlarge" + } + }, + "r6gd_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6gd.8xlarge" + } + }, + "r6gd_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6gd.12xlarge" + } + }, + "r6gd_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6gd.16xlarge" + } + }, + "r7a_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.medium" + } + }, + "r7a_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.large" + } + }, + "r7a_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.xlarge" + } + }, + "r7a_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.2xlarge" + } + }, + "r7a_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.4xlarge" + } + }, + "r7a_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.8xlarge" + } + }, + "r7a_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.12xlarge" + } + }, + "r7a_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.16xlarge" + } + }, + "r7gd_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7gd.medium" + } + }, + "r7gd_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7gd.large" + } + }, + "r7gd_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7gd.xlarge" + } + }, + "r7gd_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7gd.2xlarge" + } + }, + "r7gd_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7gd.4xlarge" + } + }, + "r7gd_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7gd.8xlarge" + } + }, + "r7gd_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7gd.12xlarge" + } + }, + "r7gd_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7gd.16xlarge" + } + }, + "r7i_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7i.large" + } + }, + "r7i_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7i.xlarge" + } + }, + "r7i_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7i.2xlarge" + } + }, + "r7i_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7i.4xlarge" + } + }, + "r7i_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7i.8xlarge" + } + }, + "r7i_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7i.12xlarge" + } + }, + "r7i_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7i.16xlarge" + } + }, + "r7i_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7i.24xlarge" + } + }, + "r7i_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7i.48xlarge" + } + }, + "c5ad_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c5ad.large" + } + }, + "c5ad_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c5ad.xlarge" + } + }, + "c5ad_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c5ad.2xlarge" + } + }, + "c5ad_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c5ad.4xlarge" + } + }, + "c5ad_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c5ad.8xlarge" + } + }, + "c5ad_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c5ad.12xlarge" + } + }, + "c5ad_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c5ad.16xlarge" + } + }, + "c5ad_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c5ad.24xlarge" + } + }, + "c5n_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c5n.large" + } + }, + "c5n_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c5n.xlarge" + } + }, + "c5n_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c5n.2xlarge" + } + }, + "c5n_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c5n.4xlarge" + } + }, + "c5n_9xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c5n.9xlarge" + } + }, + "c5n_18xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c5n.18xlarge" + } + }, + "r5ad_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5ad.large" + } + }, + "r5ad_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5ad.xlarge" + } + }, + "r5ad_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5ad.2xlarge" + } + }, + "r5ad_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5ad.4xlarge" + } + }, + "r5ad_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5ad.8xlarge" + } + }, + "r5ad_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5ad.12xlarge" + } + }, + "r5ad_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5ad.16xlarge" + } + }, + "r5ad_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5ad.24xlarge" + } + }, + "c6id_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6id.large" + } + }, + "c6id_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6id.xlarge" + } + }, + "c6id_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6id.2xlarge" + } + }, + "c6id_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6id.4xlarge" + } + }, + "c6id_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6id.8xlarge" + } + }, + "c6id_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6id.12xlarge" + } + }, + "c6id_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6id.16xlarge" + } + }, + "c6id_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6id.24xlarge" + } + }, + "c6id_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6id.32xlarge" + } + }, + "c8g_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c8g.medium" + } + }, + "c8g_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c8g.large" + } + }, + "c8g_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c8g.xlarge" + } + }, + "c8g_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c8g.2xlarge" + } + }, + "c8g_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c8g.4xlarge" + } + }, + "c8g_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c8g.8xlarge" + } + }, + "c8g_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c8g.12xlarge" + } + }, + "c8g_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c8g.16xlarge" + } + }, + "c8g_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c8g.24xlarge" + } + }, + "c8g_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c8g.48xlarge" + } + }, + "m5ad_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5ad.large" + } + }, + "m5ad_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5ad.xlarge" + } + }, + "m5ad_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5ad.2xlarge" + } + }, + "m5ad_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5ad.4xlarge" + } + }, + "m5ad_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5ad.8xlarge" + } + }, + "m5ad_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5ad.12xlarge" + } + }, + "m5ad_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5ad.16xlarge" + } + }, + "m5ad_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5ad.24xlarge" + } + }, + "m5d_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5d.large" + } + }, + "m5d_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5d.xlarge" + } + }, + "m5d_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5d.2xlarge" + } + }, + "m5d_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5d.4xlarge" + } + }, + "m5d_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5d.8xlarge" + } + }, + "m5d_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5d.12xlarge" + } + }, + "m5d_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5d.16xlarge" + } + }, + "m5d_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5d.24xlarge" + } + }, + "m5dn_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5dn.large" + } + }, + "m5dn_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5dn.xlarge" + } + }, + "m5dn_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5dn.2xlarge" + } + }, + "m5dn_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5dn.4xlarge" + } + }, + "m5dn_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5dn.8xlarge" + } + }, + "m5dn_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5dn.12xlarge" + } + }, + "m5dn_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5dn.16xlarge" + } + }, + "m5dn_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5dn.24xlarge" + } + }, + "m5n_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5n.large" + } + }, + "m5n_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5n.xlarge" + } + }, + "m5n_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5n.2xlarge" + } + }, + "m5n_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5n.4xlarge" + } + }, + "m5n_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5n.8xlarge" + } + }, + "m5n_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5n.12xlarge" + } + }, + "m5n_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5n.16xlarge" + } + }, + "m5n_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m5n.24xlarge" + } + }, + "m6id_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6id.large" + } + }, + "m6id_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6id.xlarge" + } + }, + "m6id_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6id.2xlarge" + } + }, + "m6id_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6id.4xlarge" + } + }, + "m6id_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6id.8xlarge" + } + }, + "m6id_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6id.12xlarge" + } + }, + "m6id_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6id.16xlarge" + } + }, + "m6id_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6id.24xlarge" + } + }, + "m6id_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6id.32xlarge" + } + }, + "m6idn_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6idn.large" + } + }, + "m6idn_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6idn.xlarge" + } + }, + "m6idn_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6idn.2xlarge" + } + }, + "m6idn_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6idn.4xlarge" + } + }, + "m6idn_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6idn.8xlarge" + } + }, + "m6idn_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6idn.12xlarge" + } + }, + "m6idn_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6idn.16xlarge" + } + }, + "m6idn_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6idn.24xlarge" + } + }, + "m6idn_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6idn.32xlarge" + } + }, + "m6in_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6in.large" + } + }, + "m6in_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6in.xlarge" + } + }, + "m6in_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6in.2xlarge" + } + }, + "m6in_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6in.4xlarge" + } + }, + "m6in_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6in.8xlarge" + } + }, + "m6in_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6in.12xlarge" + } + }, + "m6in_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6in.16xlarge" + } + }, + "m6in_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6in.24xlarge" + } + }, + "m6in_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6in.32xlarge" + } + }, + "m8g_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m8g.medium" + } + }, + "m8g_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m8g.large" + } + }, + "m8g_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m8g.xlarge" + } + }, + "m8g_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m8g.2xlarge" + } + }, + "m8g_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m8g.4xlarge" + } + }, + "m8g_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m8g.8xlarge" + } + }, + "m8g_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m8g.12xlarge" + } + }, + "m8g_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m8g.16xlarge" + } + }, + "m8g_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m8g.24xlarge" + } + }, + "m8g_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m8g.48xlarge" + } + }, + "r5dn_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5dn.large" + } + }, + "r5dn_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5dn.xlarge" + } + }, + "r5dn_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5dn.2xlarge" + } + }, + "r5dn_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5dn.4xlarge" + } + }, + "r5dn_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5dn.8xlarge" + } + }, + "r5dn_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5dn.12xlarge" + } + }, + "r5dn_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5dn.16xlarge" + } + }, + "r5dn_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5dn.24xlarge" + } + }, + "r5n_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5n.large" + } + }, + "r5n_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5n.xlarge" + } + }, + "r5n_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5n.2xlarge" + } + }, + "r5n_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5n.4xlarge" + } + }, + "r5n_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5n.8xlarge" + } + }, + "r5n_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5n.12xlarge" + } + }, + "r5n_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5n.16xlarge" + } + }, + "r5n_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r5n.24xlarge" + } + }, + "r6a_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6a.large" + } + }, + "r6a_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6a.xlarge" + } + }, + "r6a_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6a.2xlarge" + } + }, + "r6a_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6a.4xlarge" + } + }, + "r6a_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6a.8xlarge" + } + }, + "r6a_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6a.12xlarge" + } + }, + "r6a_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6a.16xlarge" + } + }, + "r6a_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6a.24xlarge" + } + }, + "r6a_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6a.32xlarge" + } + }, + "r6a_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6a.48xlarge" + } + }, + "r6id_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6id.large" + } + }, + "r6id_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6id.xlarge" + } + }, + "r6id_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6id.2xlarge" + } + }, + "r6id_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6id.4xlarge" + } + }, + "r6id_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6id.8xlarge" + } + }, + "r6id_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6id.12xlarge" + } + }, + "r6id_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6id.16xlarge" + } + }, + "r6id_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6id.24xlarge" + } + }, + "r6id_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6id.32xlarge" + } + }, + "r6idn_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6idn.large" + } + }, + "r6idn_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6idn.xlarge" + } + }, + "r6idn_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6idn.2xlarge" + } + }, + "r6idn_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6idn.4xlarge" + } + }, + "r6idn_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6idn.8xlarge" + } + }, + "r6idn_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6idn.12xlarge" + } + }, + "r6idn_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6idn.16xlarge" + } + }, + "r6idn_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6idn.24xlarge" + } + }, + "r6idn_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6idn.32xlarge" + } + }, + "r6in_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6in.large" + } + }, + "r6in_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6in.xlarge" + } + }, + "r6in_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6in.2xlarge" + } + }, + "r6in_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6in.4xlarge" + } + }, + "r6in_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6in.8xlarge" + } + }, + "r6in_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6in.12xlarge" + } + }, + "r6in_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6in.16xlarge" + } + }, + "r6in_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6in.24xlarge" + } + }, + "r6in_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6in.32xlarge" + } + }, + "r8g_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.medium" + } + }, + "r8g_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.large" + } + }, + "r8g_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.xlarge" + } + }, + "r8g_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.2xlarge" + } + }, + "r8g_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.4xlarge" + } + }, + "r8g_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.8xlarge" + } + }, + "r8g_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.12xlarge" + } + }, + "r8g_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.16xlarge" + } + }, + "r8g_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.24xlarge" + } + }, + "r8g_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.48xlarge" + } + }, + "m4_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m4.16xlarge" + } + }, + "c6a_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6a.32xlarge" + } + }, + "c6a_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6a.48xlarge" + } + }, + "c6i_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6i.32xlarge" + } + }, + "r6i_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6i.24xlarge" + } + }, + "r6i_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r6i.32xlarge" + } + }, + "c6in_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6in.24xlarge" + } + }, + "c6in_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c6in.32xlarge" + } + }, + "c7a_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7a.24xlarge" + } + }, + "c7a_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7a.32xlarge" + } + }, + "c7a_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7a.48xlarge" + } + }, + "c7i_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.24xlarge" + } + }, + "c7i_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.48xlarge" + } + }, + "m6a_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6a.24xlarge" + } + }, + "m6a_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6a.32xlarge" + } + }, + "m6a_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6a.48xlarge" + } + }, + "m6i_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6i.24xlarge" + } + }, + "m6i_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m6i.32xlarge" + } + }, + "m7a_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7a.24xlarge" + } + }, + "m7a_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7a.32xlarge" + } + }, + "m7a_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7a.48xlarge" + } + }, + "m7i_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7i.24xlarge" + } + }, + "m7i_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "m7i.48xlarge" + } + }, + "r7a_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.24xlarge" + } + }, + "r7a_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.32xlarge" + } + }, + "r7a_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.48xlarge" + } } } }, @@ -8156,7 +10136,7 @@ "EventCode": { "target": "com.amazonaws.gamelift#EventCode", "traits": { - "smithy.api#documentation": "The type of event being logged.
\n\n Fleet state transition events:\n
\nFLEET_CREATED -- A fleet resource was successfully created with a status of\n NEW. Event messaging includes the fleet ID.
FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to\n DOWNLOADING. Amazon GameLift is downloading the compressed build and\n running install scripts.
FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING\n to VALIDATING. Amazon GameLift has successfully installed build and is now\n validating the build files.
FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to\n BUILDING. Amazon GameLift has successfully verified the build files and\n is now launching a fleet instance.
FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to\n ACTIVATING. Amazon GameLift is launching a game server process on the\n fleet instance and is testing its connectivity with the Amazon GameLift service.
FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING\n to ACTIVE. The fleet is now ready to host game sessions.
FLEET_STATE_ERROR -- The Fleet's status changed to ERROR.\n Describe the fleet event message for more details.
\n Fleet creation events (ordered by fleet creation\n activity):\n
\nFLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet\n instance.
\nFLEET_CREATION_EXTRACTING_BUILD -- The game server build was successfully\n downloaded to an instance, and Amazon GameLiftis now extracting the build files from the\n uploaded build. Failure at this stage prevents a fleet from moving to ACTIVE\n status. Logs for this stage display a list of the files that are extracted and\n saved on the instance. Access the logs by using the URL in\n PreSignedLogUrl.
\nFLEET_CREATION_RUNNING_INSTALLER -- The game server build files were\n successfully extracted, and Amazon GameLift is now running the build's install script\n (if one is included). Failure in this stage prevents a fleet from moving to\n ACTIVE status. Logs for this stage list the installation steps and whether or\n not the install completed successfully. Access the logs by using the URL in\n PreSignedLogUrl.
\nFLEET_CREATION_COMPLETED_INSTALLER -- The game server build files were\n successfully installed and validation of the installation will begin\n soon.
\nFLEET_CREATION_FAILED_INSTALLER -- The installed failed while attempting to\n install the build files. This event indicates that the failure occurred before\n Amazon GameLift could start validation.
\nFLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful,\n and the GameLift is now verifying that the game server launch paths, which are\n specified in the fleet's runtime configuration, exist. If any listed launch path\n exists, Amazon GameLift tries to launch a game server process and waits for the process\n to report ready. Failures in this stage prevent a fleet from moving to\n ACTIVE status. Logs for this stage list the launch paths in the\n runtime configuration and indicate whether each is found. Access the logs by\n using the URL in PreSignedLogUrl.
FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime\n configuration failed because the executable specified in a launch path does not\n exist on the instance.
\nFLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime\n configuration failed because the executable specified in a launch path failed to\n run on the fleet instance.
\nFLEET_VALIDATION_TIMED_OUT -- Validation of the fleet at the end of creation\n timed out. Try fleet creation again.
\nFLEET_ACTIVATION_FAILED -- The fleet failed to successfully complete one of\n the steps in the fleet activation process. This event code indicates that the\n game build was successfully downloaded to a fleet instance, built, and\n validated, but was not able to start a server process. For more information, see\n Debug Fleet Creation Issues.
\nFLEET_ACTIVATION_FAILED_NO_INSTANCES -- Fleet creation was not able to obtain\n any instances based on the input fleet attributes. Try again at a different time\n or choose a different combination of fleet attributes such as fleet type,\n instance type, etc.
\nFLEET_INITIALIZATION_FAILED -- A generic exception occurred during fleet\n creation. Describe the fleet event message for more details.
\n\n VPC peering events:\n
\nFLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established\n between the VPC for an Amazon GameLift fleet and a VPC in your Amazon Web Services account.
\nFLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed.\n Event details and status information provide additional detail. A common reason\n for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4\n addresses. To resolve this, change the CIDR block for the VPC in your Amazon Web Services\n account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html\n
\nFLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully\n deleted.
\n\n Spot instance events:\n
\nINSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a\n two-minute notification.
\nINSTANCE_RECYCLED -- A spot instance was determined to have a high risk \n of interruption and is scheduled to be recycled once it has no active \n game sessions.
\n\n Server process events:\n
\nSERVER_PROCESS_INVALID_PATH -- The game server executable or script could not\n be found based on the Fleet runtime configuration. Check that the launch path is\n correct based on the operating system of the Fleet.
\nSERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT -- The server process did not call\n InitSDK() within the time expected (5 minutes). Check your game\n session log to see why InitSDK() was not called in time.
SERVER_PROCESS_PROCESS_READY_TIMEOUT -- The server process did not call\n ProcessReady() within the time expected (5 minutes) after\n calling InitSDK(). Check your game session log to see why\n ProcessReady() was not called in time.
SERVER_PROCESS_CRASHED -- The server process exited without calling\n ProcessEnding(). Check your game session log to see why\n ProcessEnding() was not called.
SERVER_PROCESS_TERMINATED_UNHEALTHY -- The server process did not report a\n valid health check for too long and was therefore terminated by GameLift. Check\n your game session log to see if the thread became stuck processing a synchronous\n task for too long.
\nSERVER_PROCESS_FORCE_TERMINATED -- The server process did not exit cleanly\n within the time expected after OnProcessTerminate() was sent. Check\n your game session log to see why termination took longer than expected.
SERVER_PROCESS_PROCESS_EXIT_TIMEOUT -- The server process did not exit cleanly\n within the time expected (30 seconds) after calling\n ProcessEnding(). Check your game session log to see why termination\n took longer than expected.
\n Game session events:\n
\nGAME_SESSION_ACTIVATION_TIMEOUT -- GameSession failed to activate within the\n expected time. Check your game session log to see why\n ActivateGameSession() took longer to complete than\n expected.
\n Other fleet events:\n
\nFLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings\n (desired instances, minimum/maximum scaling limits). Event messaging includes\n the new capacity settings.
\nFLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the\n fleet's game session protection policy setting. Event messaging includes both\n the old and new policy setting.
\nFLEET_DELETED -- A request to delete a fleet was initiated.
\nGENERIC_EVENT -- An unspecified event has occurred.
\nThe type of event being logged.
\n\n Fleet state transition events:\n
\nFLEET_CREATED -- A fleet resource was successfully created with a status of\n NEW. Event messaging includes the fleet ID.
FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to\n DOWNLOADING. Amazon GameLift is downloading the compressed build and\n running install scripts.
FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING\n to VALIDATING. Amazon GameLift has successfully installed build and is now\n validating the build files.
FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to\n BUILDING. Amazon GameLift has successfully verified the build files and\n is now launching a fleet instance.
FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to\n ACTIVATING. Amazon GameLift is launching a game server process on the\n fleet instance and is testing its connectivity with the Amazon GameLift service.
FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING\n to ACTIVE. The fleet is now ready to host game sessions.
FLEET_STATE_ERROR -- The Fleet's status changed to ERROR.\n Describe the fleet event message for more details.
\n Fleet creation events (ordered by fleet creation\n activity):\n
\nFLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet\n instance.
\nFLEET_CREATION_EXTRACTING_BUILD -- The game server build was successfully\n downloaded to an instance, and Amazon GameLiftis now extracting the build files from the\n uploaded build. Failure at this stage prevents a fleet from moving to ACTIVE\n status. Logs for this stage display a list of the files that are extracted and\n saved on the instance. Access the logs by using the URL in\n PreSignedLogUrl.
\nFLEET_CREATION_RUNNING_INSTALLER -- The game server build files were\n successfully extracted, and Amazon GameLift is now running the build's install script\n (if one is included). Failure in this stage prevents a fleet from moving to\n ACTIVE status. Logs for this stage list the installation steps and whether or\n not the install completed successfully. Access the logs by using the URL in\n PreSignedLogUrl.
\nFLEET_CREATION_COMPLETED_INSTALLER -- The game server build files were\n successfully installed and validation of the installation will begin\n soon.
\nFLEET_CREATION_FAILED_INSTALLER -- The installed failed while attempting to\n install the build files. This event indicates that the failure occurred before\n Amazon GameLift could start validation.
\nFLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful,\n and the GameLift is now verifying that the game server launch paths, which are\n specified in the fleet's runtime configuration, exist. If any listed launch path\n exists, Amazon GameLift tries to launch a game server process and waits for the process\n to report ready. Failures in this stage prevent a fleet from moving to\n ACTIVE status. Logs for this stage list the launch paths in the\n runtime configuration and indicate whether each is found. Access the logs by\n using the URL in PreSignedLogUrl.
FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime\n configuration failed because the executable specified in a launch path does not\n exist on the instance.
\nFLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime\n configuration failed because the executable specified in a launch path failed to\n run on the fleet instance.
\nFLEET_VALIDATION_TIMED_OUT -- Validation of the fleet at the end of creation\n timed out. Try fleet creation again.
\nFLEET_ACTIVATION_FAILED -- The fleet failed to successfully complete one of\n the steps in the fleet activation process. This event code indicates that the\n game build was successfully downloaded to a fleet instance, built, and\n validated, but was not able to start a server process. For more information, see\n Debug Fleet Creation Issues.
\nFLEET_ACTIVATION_FAILED_NO_INSTANCES -- Fleet creation was not able to obtain\n any instances based on the input fleet attributes. Try again at a different time\n or choose a different combination of fleet attributes such as fleet type,\n instance type, etc.
\nFLEET_INITIALIZATION_FAILED -- A generic exception occurred during fleet\n creation. Describe the fleet event message for more details.
\n\n VPC peering events:\n
\nFLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established\n between the VPC for an Amazon GameLift fleet and a VPC in your Amazon Web Services account.
\nFLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed.\n Event details and status information provide additional detail. A common reason\n for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4\n addresses. To resolve this, change the CIDR block for the VPC in your Amazon Web Services\n account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html\n
\nFLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully\n deleted.
\n\n Spot instance events:\n
\nINSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a\n two-minute notification.
\nINSTANCE_RECYCLED -- A spot instance was determined to have a high risk \n of interruption and is scheduled to be recycled once it has no active \n game sessions.
\n\n Server process events:\n
\nSERVER_PROCESS_INVALID_PATH -- The game server executable or script could not\n be found based on the Fleet runtime configuration. Check that the launch path is\n correct based on the operating system of the Fleet.
\nSERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT -- The server process did not call\n InitSDK() within the time expected (5 minutes). Check your game\n session log to see why InitSDK() was not called in time. This event\n is not emitted for managed container fleets and Anywhere fleets unless they're\n deployed with the Amazon GameLift Agent.
SERVER_PROCESS_PROCESS_READY_TIMEOUT -- The server process did not call\n ProcessReady() within the time expected (5 minutes) after\n calling InitSDK(). Check your game session log to see why\n ProcessReady() was not called in time.
SERVER_PROCESS_CRASHED -- The server process exited without calling\n ProcessEnding(). Check your game session log to see why\n ProcessEnding() was not called.
SERVER_PROCESS_TERMINATED_UNHEALTHY -- The server process did not report a\n valid health check for too long and was therefore terminated by GameLift. Check\n your game session log to see if the thread became stuck processing a synchronous\n task for too long.
\nSERVER_PROCESS_FORCE_TERMINATED -- The server process did not exit cleanly\n within the time expected after OnProcessTerminate() was sent. Check\n your game session log to see why termination took longer than expected.
SERVER_PROCESS_PROCESS_EXIT_TIMEOUT -- The server process did not exit cleanly\n within the time expected (30 seconds) after calling\n ProcessEnding(). Check your game session log to see why termination\n took longer than expected.
\n Game session events:\n
\nGAME_SESSION_ACTIVATION_TIMEOUT -- GameSession failed to activate within the\n expected time. Check your game session log to see why\n ActivateGameSession() took longer to complete than\n expected.
\n Other fleet events:\n
\nFLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings\n (desired instances, minimum/maximum scaling limits). Event messaging includes\n the new capacity settings.
\nFLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the\n fleet's game session protection policy setting. Event messaging includes both\n the old and new policy setting.
\nFLEET_DELETED -- A request to delete a fleet was initiated.
\nGENERIC_EVENT -- An unspecified event has occurred.
\nThe operating system of the fleet's computing resources. A fleet's operating system is\n determined by the OS of the build or script that is deployed on this fleet. This\n attribute is used with fleets where ComputeType is\n EC2.
Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in \n the Amazon Linux 2 FAQs. \n For game servers\n that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the\n game server build to server SDK 5.x, and then deploy to AL2023 instances. See\n \n Migrate to Amazon GameLift server SDK version 5.\n
\nThe operating system of the fleet's computing resources. A fleet's operating system is\n determined by the OS of the build or script that is deployed on this fleet. This\n attribute is used with fleets where ComputeType is\n EC2.
Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in \n the Amazon Linux 2 FAQs. \n For game servers\n that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the\n game server build to server SDK 5.x, and then deploy to AL2023 instances. See\n \n Migrate to server SDK version 5.\n
\nDescribes an Amazon GameLift fleet of game hosting resources. Attributes differ based on\n the fleet's compute type, as follows:
\nEC2 fleet attributes identify a Build resource (for fleets with \n customer game server builds) or a Script resource (for Realtime Servers fleets).
Amazon GameLift Anywhere fleets have an abbreviated set of attributes, because most fleet configurations\n are set directly on the fleet's computes. Attributes include fleet identifiers and descriptive\n properties, creation/termination time, and fleet status.
\n\n Returned by:\n https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetAttributes\n
" + "smithy.api#documentation": "Describes an Amazon GameLift fleet of game hosting resources. Attributes differ based on\n the fleet's compute type, as follows:
\nEC2 fleet attributes identify a Build resource (for fleets with \n customer game server builds) or a Script resource (for Amazon GameLift Realtime fleets).
Amazon GameLift Anywhere fleets have an abbreviated set of attributes, because most fleet configurations\n are set directly on the fleet's computes. Attributes include fleet identifiers and descriptive\n properties, creation/termination time, and fleet status.
\n\n Returned by:\n https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetAttributes\n
" } }, "com.amazonaws.gamelift#FleetAttributesList": { @@ -9495,7 +11475,7 @@ "name": "gamelift" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon GameLift provides solutions for hosting session-based multiplayer game servers in the\n cloud, including tools for deploying, operating, and scaling game servers. Built on\n Amazon Web Services global computing infrastructure, GameLift helps you deliver high-performance,\n high-reliability, low-cost game servers while dynamically scaling your resource usage to\n meet player demand.
\n\n About Amazon GameLift solutions\n
\nGet more information on these Amazon GameLift solutions in the Amazon GameLift Developer Guide.
\nAmazon GameLift managed hosting -- Amazon GameLift offers a fully managed service to set up\n and maintain computing machines for hosting, manage game session and player\n session life cycle, and handle security, storage, and performance tracking. You\n can use automatic scaling tools to balance player demand and hosting costs,\n configure your game session management to minimize player latency, and add\n FlexMatch for matchmaking.
\nManaged hosting with Realtime Servers -- With Amazon GameLift Realtime Servers, you can quickly configure\n and set up ready-to-go game servers for your game. Realtime Servers provides a game server\n framework with core Amazon GameLift infrastructure already built in. Then use the full\n range of Amazon GameLift managed hosting features, including FlexMatch, for your\n game.
\nAmazon GameLift FleetIQ -- Use Amazon GameLift FleetIQ as a standalone service while hosting your games using EC2\n instances and Auto Scaling groups. Amazon GameLift FleetIQ provides optimizations for game\n hosting, including boosting the viability of low-cost Spot Instances gaming. For\n a complete solution, pair the Amazon GameLift FleetIQ and FlexMatch standalone services.
\nAmazon GameLift FlexMatch -- Add matchmaking to your game hosting solution. FlexMatch is a\n customizable matchmaking service for multiplayer games. Use FlexMatch as\n integrated with Amazon GameLift managed hosting or incorporate FlexMatch as a standalone\n service into your own hosting solution.
\n\n About this API Reference\n
\nThis reference guide describes the low-level service API for Amazon GameLift. With each topic\n in this guide, you can find links to language-specific SDK guides and the Amazon Web Services CLI\n reference. Useful links:
\nAmazon GameLift provides solutions for hosting session-based multiplayer game servers in the\n cloud, including tools for deploying, operating, and scaling game servers. Built on\n Amazon Web Services global computing infrastructure, GameLift helps you deliver high-performance,\n high-reliability, low-cost game servers while dynamically scaling your resource usage to\n meet player demand.
\n\n About Amazon GameLift solutions\n
\nGet more information on these Amazon GameLift solutions in the Amazon GameLift Developer Guide.
\nAmazon GameLift managed hosting -- Amazon GameLift offers a fully managed service to set up\n and maintain computing machines for hosting, manage game session and player\n session life cycle, and handle security, storage, and performance tracking. You\n can use automatic scaling tools to balance player demand and hosting costs,\n configure your game session management to minimize player latency, and add\n FlexMatch for matchmaking.
\nManaged hosting with Amazon GameLift Realtime -- With Amazon GameLift Amazon GameLift Realtime, you can quickly configure\n and set up ready-to-go game servers for your game. Amazon GameLift Realtime provides a game server\n framework with core Amazon GameLift infrastructure already built in. Then use the full\n range of Amazon GameLift managed hosting features, including FlexMatch, for your\n game.
\nAmazon GameLift FleetIQ -- Use Amazon GameLift FleetIQ as a standalone service while hosting your games using EC2\n instances and Auto Scaling groups. Amazon GameLift FleetIQ provides optimizations for game\n hosting, including boosting the viability of low-cost Spot Instances gaming. For\n a complete solution, pair the Amazon GameLift FleetIQ and FlexMatch standalone services.
\nAmazon GameLift FlexMatch -- Add matchmaking to your game hosting solution. FlexMatch is a\n customizable matchmaking service for multiplayer games. Use FlexMatch as\n integrated with Amazon GameLift managed hosting or incorporate FlexMatch as a standalone\n service into your own hosting solution.
\n\n About this API Reference\n
\nThis reference guide describes the low-level service API for Amazon GameLift. With each topic\n in this guide, you can find links to language-specific SDK guides and the Amazon Web Services CLI\n reference. Useful links:
\nA unique identifier for the game session. A game session ARN has the following format: \n arn:aws:gamelift:.
A unique identifier for the game session. A game session ARN has the following format: \n arn:aws:gamelift:.
A prioritized list of locations to use with a game session placement request and\n instructions on how to use it. This list overrides a queue's prioritized location list\n for a single game session placement request only. The list can include Amazon Web Services Regions,\n local zones, and custom locations (for Anywhere fleets). The fallback strategy instructs\n Amazon GameLift to use the override list for the first placement attempt only or for all\n placement attempts.
" + "smithy.api#documentation": "An alternative priority list of locations that's included with a game session\n placement request. When provided, the list overrides a queue's location order list for\n this game session placement request only. The list might include Amazon Web Services Regions, local\n zones, and custom locations (for Anywhere fleets). The fallback strategy tells Amazon GameLift\n what action to take (if any) in the event that it failed to place a new game session.
" } } }, @@ -12250,7 +14230,7 @@ } ], "traits": { - "smithy.api#documentation": "Requests authorization to remotely connect to a hosting resource in a Amazon GameLift managed\n fleet. This operation is not used with Amazon GameLift Anywhere fleets.
\n\n Request options\n
\nTo request access to a compute, specify the compute name and the fleet ID.
\n\n Results\n
\nIf successful, this operation returns a set of temporary Amazon Web Services credentials, including\n a two-part access key and a session token.
\nWith a managed EC2 fleet (where compute type is EC2), use these\n credentials with Amazon EC2 Systems Manager (SSM) to start a session with the compute. For more\n details, see Starting a session (CLI) in the Amazon EC2 Systems Manager User\n Guide.
Requests authorization to remotely connect to a hosting resource in a Amazon GameLift managed\n fleet. This operation is not used with Amazon GameLift Anywhere fleets.
\n\n Request options\n
\nProvide the fleet ID and compute name. The compute name varies depending on the type\n of fleet.
\nFor a compute in a managed EC2 fleet, provide an instance ID. Each instance in\n the fleet is a compute.
\nFor a compute in a managed container fleet, provide a compute name. In a\n container fleet, each game server container group on a fleet instance is\n assigned a compute name.
\n\n Results\n
\nIf successful, this operation returns a set of temporary Amazon Web Services credentials, including\n a two-part access key and a session token.
\nWith a managed EC2 fleet (where compute type is EC2), use these\n credentials with Amazon EC2 Systems Manager (SSM) to start a session with the compute. For more\n details, see Starting a session (CLI) in the Amazon EC2 Systems Manager User\n Guide.
A unique identifier for the compute resource that you want to connect to. For an EC2\n fleet compute, use the instance ID. Use\n https://docs.aws.amazon.com/gamelift/latest/apireference/API_ListCompute.html to retrieve compute identifiers.
", + "smithy.api#documentation": "A unique identifier for the compute resource that you want to connect to. For an EC2\n fleet, use an instance ID. For a managed container fleet, use a compute name. You can\n retrieve a fleet's compute names by calling ListCompute.
", "smithy.api#required": {} } } @@ -12622,7 +14602,7 @@ "OperatingSystem": { "target": "com.amazonaws.gamelift#OperatingSystem", "traits": { - "smithy.api#documentation": "Operating system that is running on this EC2 instance.
\nAmazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in \n the Amazon Linux 2 FAQs. \n For game servers\n that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the\n game server build to server SDK 5.x, and then deploy to AL2023 instances. See\n \n Migrate to Amazon GameLift server SDK version 5.\n
\nOperating system that is running on this EC2 instance.
\nAmazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in \n the Amazon Linux 2 FAQs. \n For game servers\n that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the\n game server build to server SDK 5.x, and then deploy to AL2023 instances. See\n \n Migrate to server SDK version 5.\n
\nA range of IP addresses and port settings that allow inbound traffic to connect to\n processes on an instance in a fleet. Processes are assigned an IP address/port number\n combination, which must fall into the fleet's allowed ranges.\n
\nFor Realtime Servers fleets, Amazon GameLift automatically opens two port ranges, one for TCP messaging\n and one for UDP.
" + "smithy.api#documentation": "A range of IP addresses and port settings that allow inbound traffic to connect to\n processes on an instance in a fleet. Processes are assigned an IP address/port number\n combination, which must fall into the fleet's allowed ranges.\n
\nFor Amazon GameLift Realtime fleets, Amazon GameLift automatically opens two port ranges, one for TCP messaging\n and one for UDP.
" } }, "com.amazonaws.gamelift#IpPermissionsList": { @@ -13239,7 +15219,7 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves information on the compute resources in an Amazon GameLift fleet. Use the pagination\n parameters to retrieve results in a set of sequential pages.
\n\n Request options:\n
\nRetrieve a list of all computes in a fleet. Specify a fleet ID.
\nRetrieve a list of all computes in a specific fleet location. Specify a fleet\n ID and location.
\n\n Results:\n
\nIf successful, this operation returns information on a set of computes. Depending on\n the type of fleet, the result includes the following information:
\nFor managed EC2 fleets (compute type EC2), this operation returns\n information about the EC2 instance. Compute names are EC2 instance IDs.
For Anywhere fleets (compute type ANYWHERE), this operation\n returns compute names and details as provided when the compute was registered\n with RegisterCompute. This includes\n GameLiftServiceSdkEndpoint or\n GameLiftAgentEndpoint.
Retrieves information on the compute resources in an Amazon GameLift fleet. Use the pagination\n parameters to retrieve results in a set of sequential pages.
\n\n Request options\n
\nRetrieve a list of all computes in a fleet. Specify a fleet ID.
\nRetrieve a list of all computes in a specific fleet location. Specify a fleet\n ID and location.
\n\n Results\n
\nIf successful, this operation returns information on a set of computes. Depending on\n the type of fleet, the result includes the following information:
\nFor a managed EC2 fleet (compute type EC2), this operation\n returns information about the EC2 instance. Compute names are EC2 instance\n IDs.
For an Anywhere fleet (compute type ANYWHERE), this operation\n returns compute names and details from when the compute was registered with\n RegisterCompute. This includes\n GameLiftServiceSdkEndpoint or\n GameLiftAgentEndpoint.
Retrieves a collection of container fleet resources in an Amazon Web Services Region. For fleets\n that have multiple locations, this operation retrieves fleets based on their home Region\n only.
\n\n Request options\n
\nGet a list of all fleets. Call this operation without specifying a container\n group definition.
\nGet a list of fleets filtered by container group definition. Provide the\n container group definition name or ARN value.
\nTo get a list of all Realtime Servers fleets with a specific configuration script,\n provide the script ID.
\nUse the pagination parameters to retrieve results as a set of sequential pages.
\nIf successful, this operation returns a collection of container fleets that match the request\n parameters. A NextToken value is also returned if there are more result pages to\n retrieve.
\nFleet IDs are returned in no particular order.
\nRetrieves a collection of container fleet resources in an Amazon Web Services Region. For fleets\n that have multiple locations, this operation retrieves fleets based on their home Region\n only.
\n\n Request options\n
\nGet a list of all fleets. Call this operation without specifying a container\n group definition.
\nGet a list of fleets filtered by container group definition. Provide the\n container group definition name or ARN value.
\nTo get a list of all Amazon GameLift Realtime fleets with a specific configuration script,\n provide the script ID.
\nUse the pagination parameters to retrieve results as a set of sequential pages.
\nIf successful, this operation returns a collection of container fleets that match the request\n parameters. A NextToken value is also returned if there are more result pages to\n retrieve.
\nFleet IDs are returned in no particular order.
\nRetrieves a collection of fleet resources in an Amazon Web Services Region. You can filter the\n result set to find only those fleets that are deployed with a specific build or script.\n For fleets that have multiple locations, this operation retrieves fleets based on their\n home Region only.
\nYou can use operation in the following ways:
\nTo get a list of all fleets in a Region, don't provide a build or script\n identifier.
\nTo get a list of all fleets where a specific game build is deployed, provide\n the build ID.
\nTo get a list of all Realtime Servers fleets with a specific configuration script,\n provide the script ID.
\nUse the pagination parameters to retrieve results as a set of sequential pages.
\nIf successful, this operation returns a list of fleet IDs that match the request\n parameters. A NextToken value is also returned if there are more result pages to\n retrieve.
\nFleet IDs are returned in no particular order.
\nRetrieves a collection of fleet resources in an Amazon Web Services Region. You can filter the\n result set to find only those fleets that are deployed with a specific build or script.\n For fleets that have multiple locations, this operation retrieves fleets based on their\n home Region only.
\nYou can use operation in the following ways:
\nTo get a list of all fleets in a Region, don't provide a build or script\n identifier.
\nTo get a list of all fleets where a specific game build is deployed, provide\n the build ID.
\nTo get a list of all Amazon GameLift Realtime fleets with a specific configuration script,\n provide the script ID.
\nUse the pagination parameters to retrieve results as a set of sequential pages.
\nIf successful, this operation returns a list of fleet IDs that match the request\n parameters. A NextToken value is also returned if there are more result pages to\n retrieve.
\nFleet IDs are returned in no particular order.
\nRetrieves script records for all Realtime scripts that are associated with the Amazon Web Services\n account in use.
\n\n Learn more\n
\n\n Amazon GameLift Realtime Servers\n
\n\n Related actions\n
\n\n All APIs by task\n
", + "smithy.api#documentation": "Retrieves script records for all Realtime scripts that are associated with the Amazon Web Services\n account in use.
\n\n Learn more\n
\n\n Amazon GameLift Amazon GameLift Realtime\n
\n\n Related actions\n
\n\n All APIs by task\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -14342,7 +16322,7 @@ "LogDestination": { "target": "com.amazonaws.gamelift#LogDestination", "traits": { - "smithy.api#documentation": "The type of log collection to use for a fleet.
\n\n CLOUDWATCH -- (default value) Send logs to an Amazon CloudWatch log group that you define. Each container emits a log stream, which is organized in the log group.
\n S3 -- Store logs in an Amazon S3 bucket that you define.
\n NONE -- Don't collect container logs.
The type of log collection to use for a fleet.
\n\n CLOUDWATCH -- (default value) Send logs to an Amazon CloudWatch log group that you define. Each container emits a log stream, which is organized in the log group.
\n S3 -- Store logs in an Amazon S3 bucket that you define. This bucket must reside in the fleet's home Amazon Web Services Region.
\n NONE -- Don't collect container logs.
A custom sequence to use when prioritizing where to place new game sessions. Each\n priority type is listed once.
\n\n LATENCY -- Amazon GameLift prioritizes locations where the average player\n latency is lowest. Player latency data is provided in each game session\n placement request.
\n COST -- Amazon GameLift prioritizes destinations with the lowest current\n hosting costs. Cost is evaluated based on the location, instance type, and fleet\n type (Spot or On-Demand) of each destination in the queue.
\n DESTINATION -- Amazon GameLift prioritizes based on the list order of\n destinations in the queue configuration.
\n LOCATION -- Amazon GameLift prioritizes based on the provided order of\n locations, as defined in LocationOrder.
A custom sequence to use when prioritizing where to place new game sessions. Each\n priority type is listed once.
\n\n LATENCY -- Amazon GameLift prioritizes locations where the average player\n latency is lowest. Player latency data is provided in each game session\n placement request.
\n COST -- Amazon GameLift prioritizes queue destinations with the lowest\n current hosting costs. Cost is evaluated based on the destination's location,\n instance type, and fleet type (Spot or On-Demand).
\n DESTINATION -- Amazon GameLift prioritizes based on the list order of\n destinations in the queue configuration.
\n LOCATION -- Amazon GameLift prioritizes based on the provided order of\n locations, as defined in LocationOrder.
Custom prioritization settings for a game session queue to use when searching for\n available game servers to place new game sessions. This configuration replaces the\n default FleetIQ prioritization process.
\nBy default, a queue makes placements based on the following default\n prioritizations:
\nIf player latency data is included in a game session request, Amazon GameLift\n prioritizes placing game sessions where the average player latency is lowest.\n Amazon GameLift re-orders the queue's destinations and locations (for multi-location\n fleets) based on the following priorities: (1) the lowest average latency across\n all players, (2) the lowest hosting cost, (3) the queue's default destination\n order, and then (4), an alphabetic list of locations.
\nIf player latency data is not included, Amazon GameLift prioritizes placing game\n sessions in the queue's first destination. If that fleet has multiple locations,\n the game session is placed on the first location (when listed alphabetically).\n Amazon GameLift re-orders the queue's destinations and locations (for multi-location\n fleets) based on the following priorities: (1) the queue's default destination\n order, and then (2) an alphabetic list of locations.
\nCustom prioritization settings to use with a game session queue. Prioritization\n settings determine how the queue selects a game hosting resource to start a new game\n session. This configuration replaces the default prioritization process for\n queues.
\nBy default, a queue makes game session placements based on the following\n criteria:
\nWhen a game session request does not include player latency data, Amazon GameLift\n places game sessions based on the following priorities: (1) the queue's default\n destination order, and (2) for multi-location fleets, an alphabetic list of\n locations.
\nWhen a game session request includes player latency data, Amazon GameLift re-orders\n the queue's destinations to make placements where the average player latency is\n lowest. It reorders based the following priorities: (1) the lowest average\n latency across all players, (2) the lowest hosting cost, (3) the queue's default\n destination order, and (4) for multi-location fleets, an alphabetic list of\n locations.
\nInstructions for how to use the override list if the first round of placement attempts fails. The first round is a failure if \n Amazon GameLift searches all listed locations, in all of the queue's destinations, without finding an available hosting resource\n for a new game session. Valid strategies include:
\n\n DEFAULT_AFTER_SINGLE_PASS -- After the first round of placement attempts, discard the override list and\n use the queue's default location priority list. Continue to use the queue's default list until the placement request times out.
\n NONE -- Continue to use the override list for all rounds of placement attempts until the placement request times out.
Instructions for how to proceed if placement fails in every location on the priority\n override list. Valid strategies include:
\n\n DEFAULT_AFTER_SINGLE_PASS -- After attempting to place a new game session in\n every location on the priority override list, try to place a game session in\n queue's other locations. This is the default behavior.
\n NONE -- Limit placements to locations on the priority override list only.
An alternate list of prioritized locations for use with a game session queue. When\n this property is included in a StartGameSessionPlacement request, this list overrides the queue's default\n location prioritization, as defined in the queue's PriorityConfiguration setting (LocationOrder). This\n property overrides the queue's default priority list for individual placement requests\n only. Use this property only with queues that have a PriorityConfiguration\n setting that prioritizes first.
A priority configuration override list does not override a queue's\n FilterConfiguration setting, if the queue has one. Filter configurations are used to\n limit placements to a subset of the locations in a queue's destinations. If the\n override list includes a location that's not included in the FilterConfiguration\n allowed list, Amazon GameLift won't attempt to place a game session there.
\nAn alternate list of prioritized locations for use with a game session queue. When\n this property is included in a StartGameSessionPlacement request, the alternate list overrides the queue's\n default location priorities, as defined in the queue's PriorityConfiguration setting (LocationOrder). The\n override is valid for an individual placement request only. Use this property only with\n queues that have a PriorityConfiguration setting that prioritizes\n LOCATION first.
A priority configuration override list does not override a queue's\n FilterConfiguration setting, if the queue has one. Filter configurations are used to\n limit placements to a subset of the locations in a queue's destinations. If the\n override list includes a location that's not on in the\n FilterConfiguration allowed list, Amazon GameLift won't attempt to place a\n game session there.
A set of instructions that define the set of server processes to run on computes in a\n fleet. Server processes run either an executable in a custom game build or a Realtime Servers\n script. Amazon GameLift launches the processes, manages their life cycle, and replaces them as\n needed. Computes check regularly for an updated runtime configuration.
\nAn Amazon GameLift instance is limited to 50 processes running concurrently. To calculate the\n total number of processes defined in a runtime configuration, add the values of the\n ConcurrentExecutions parameter for each server process. Learn more\n about Running Multiple\n Processes on a Fleet.
A set of instructions that define the set of server processes to run on computes in a\n fleet. Server processes run either an executable in a custom game build or a Amazon GameLift Realtime\n script. Amazon GameLift launches the processes, manages their life cycle, and replaces them as\n needed. Computes check regularly for an updated runtime configuration.
\nAn Amazon GameLift instance is limited to 50 processes running concurrently. To calculate the\n total number of processes defined in a runtime configuration, add the values of the\n ConcurrentExecutions parameter for each server process. Learn more\n about Running Multiple\n Processes on a Fleet.
A set of instructions for launching server processes on fleet computes. Server\n processes run either an executable in a custom game build or a Realtime Servers script. Server\n process configurations are part of a fleet's runtime configuration.
" + "smithy.api#documentation": "A set of instructions for launching server processes on fleet computes. Server\n processes run either an executable in a custom game build or a Amazon GameLift Realtime script. Server\n process configurations are part of a fleet's runtime configuration.
" } }, "com.amazonaws.gamelift#ServerProcessList": { @@ -16772,7 +18752,7 @@ } ], "traits": { - "smithy.api#documentation": "Makes a request to start a new game session using a game session queue. When\n processing a placement request in a queue, Amazon GameLift finds the best possible available\n resource to host the game session and prompts the resource to start the game session.
\n\n Request options\n
\nCall this API with the following minimum parameters:\n GameSessionQueueName,\n MaximumPlayerSessionCount, and\n PlacementID. You can also include game session data (data formatted\n as strings) or game properties (data formatted as key-value pairs) to pass to the new\n game session.
\nYou can change how Amazon GameLift chooses a hosting resource for the new game session.\n Prioritizing resources for game session placements is defined when you configure\n a game session queue. You can use the default prioritization process or specify\n a custom process by providing a \n PriorityConfiguration when you create or update a queue.
\nPrioritize based on resource cost and location, using the queue's\n configured priority settings. Call this API with the minimum\n parameters.
\nPrioritize based on latency. Include a set of values for\n PlayerLatencies. You can provide latency data\n with or without player session data. This option instructs Amazon GameLift to\n reorder the queue's prioritized locations list based on the latency\n data. If latency data is provided for multiple players, Amazon GameLift\n calculates each location's average latency for all players and reorders\n to find the lowest latency across all players. Don't include latency\n data if you're providing a custom list of locations.
\nPrioritize based on a custom list of locations. If you're using a\n queue that's configured to prioritize location first (see PriorityConfiguration for game session queues), use the\n PriorityConfigurationOverride parameter to\n substitute a different location list for this placement request. When\n prioritizing placements by location, Amazon GameLift searches each location in\n prioritized order to find an available hosting resource for the new game\n session. You can choose whether to use the override list for the first\n placement attempt only or for all attempts.
\nYou can request new player sessions for a group of players. Include the\n DesiredPlayerSessions parameter and include at minimum\n a unique player ID for each. You can also include player-specific data to pass\n to the new game session.
\n\n Result\n
\nIf successful, this request generates a new game session placement request and adds it\n to the game session queue for Amazon GameLift to process in turn. You can track the status of\n individual placement requests by calling DescribeGameSessionPlacement. A new game session is running if the status\n is FULFILLED and the request returns the game session connection\n information (IP address and port). If you include player session data, Amazon GameLift creates a\n player session for each player ID in the request.
The request results in a BadRequestException in the following\n situations:
If the request includes both PlayerLatencies and\n PriorityConfigurationOverride parameters.
\nIf the request includes the PriorityConfigurationOverride\n parameter and designates a queue doesn't prioritize locations.
\nAmazon GameLift continues to retry each placement request until it reaches the queue's timeout\n setting. If a request times out, you can resubmit the request to the same queue or try a\n different queue.
" + "smithy.api#documentation": "Makes a request to start a new game session using a game session queue. When\n processing a placement request, Amazon GameLift looks for the best possible available resource to\n host the game session, based on how the queue is configured to prioritize factors such\n as resource cost, latency, and location. After selecting an available resource, Amazon GameLift\n prompts the resource to start a game session. A placement request can include a list of\n players to create a set of player sessions. The request can also include information to\n pass to the new game session, such as to specify a game map or other options.
\n\n Request options\n
\nUse this operation to make the following types of requests.
\nRequest a placement using the queue's default prioritization process (see the\n default prioritization described in PriorityConfiguration). Include these required parameters:
\n\n GameSessionQueueName\n
\n MaximumPlayerSessionCount\n
\n PlacementID\n
Request a placement and prioritize based on latency. Include these\n parameters:
\nRequired parameters GameSessionQueueName,\n MaximumPlayerSessionCount,\n PlacementID.
\n PlayerLatencies. Include a set of latency values for\n destinations in the queue. When a request includes latency data, Amazon GameLift\n automatically reorder the queue's locations priority list based on\n lowest available latency values. If a request includes latency data for\n multiple players, Amazon GameLift calculates each location's average latency for\n all players and reorders to find the lowest latency across all\n players.
Don't include PriorityConfigurationOverride.
Prioritize based on a custom list of locations. If you're using a\n queue that's configured to prioritize location first (see PriorityConfiguration for game session queues), you can\n optionally use the PriorityConfigurationOverride\n parameter to substitute a different location priority list for this\n placement request. Amazon GameLift searches each location on the priority\n override list to find an available hosting resource for the new game\n session. Specify a fallback strategy to use in the event that Amazon GameLift\n fails to place the game session in any of the locations on the override\n list.
\nRequest a placement and prioritized based on a custom list of locations.\n
\nYou can request new player sessions for a group of players. Include the\n DesiredPlayerSessions parameter and include at minimum\n a unique player ID for each. You can also include player-specific data to pass\n to the new game session.
\n\n Result\n
\nIf successful, this operation generates a new game session placement request and adds\n it to the game session queue for processing. You can track the status of individual\n placement requests by calling DescribeGameSessionPlacement or by monitoring queue notifications. When the\n request status is FULFILLED, a new game session has started and the\n placement request is updated with connection information for the game session (IP\n address and port). If the request included player session data, Amazon GameLift creates a player\n session for each player ID in the request.
The request results in a InvalidRequestException in the following\n situations:
If the request includes both PlayerLatencies and\n PriorityConfigurationOverride parameters.
\nIf the request includes the PriorityConfigurationOverride\n parameter and specifies a queue that doesn't prioritize locations.
\nAmazon GameLift continues to retry each placement request until it reaches the queue's timeout\n setting. If a request times out, you can resubmit the request to the same queue or try a\n different queue.
" } }, "com.amazonaws.gamelift#StartGameSessionPlacementInput": { @@ -16835,7 +18815,7 @@ "PriorityConfigurationOverride": { "target": "com.amazonaws.gamelift#PriorityConfigurationOverride", "traits": { - "smithy.api#documentation": "A prioritized list of locations to use for the game session placement and instructions\n on how to use it. This list overrides a queue's prioritized location list for this game\n session placement request only. You can include Amazon Web Services Regions, local zones, and custom\n locations (for Anywhere fleets). Choose a fallback strategy to instruct Amazon GameLift to use\n the override list for the first placement attempt only or for all placement\n attempts.
" + "smithy.api#documentation": "A prioritized list of locations to use for the game session placement and instructions\n on how to use it. This list overrides a queue's prioritized location list for this game\n session placement request only. You can include Amazon Web Services Regions, local zones, and custom\n locations (for Anywhere fleets). You can choose to limit placements to locations on the\n override list only, or you can prioritize locations on the override list first and then\n fall back to the queue's other locations if needed. Choose a fallback strategy to use in\n the event that Amazon GameLift fails to place a game session in any of the locations on the\n priority override list.
" } } }, @@ -17105,7 +19085,7 @@ } ], "traits": { - "smithy.api#documentation": "Cancels a game session placement that is in PENDING status. To stop a\n placement, provide the placement ID values. If successful, the placement is moved to\n CANCELLED status.
Cancels a game session placement that's in PENDING status. To stop a\n placement, provide the placement ID value.
Results
\nIf successful, this operation removes the placement request from the queue and moves\n the GameSessionPlacement to CANCELLED status.
This operation results in an InvalidRequestExecption (400) error if a\n game session has already been created for this placement. You can clean up an unneeded\n game session by calling TerminateGameSession.
A unique identifier for the game session to be terminated. A game session ARN has the following format: \n arn:aws:gamelift:.
A unique identifier for the game session to be terminated. A game session ARN has the following format: \n arn:aws:gamelift:.
The platform that all containers in the group use. Containers in a group must run on the\n same operating system.
\nAmazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game\n servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x, first update the game\n server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to\n Amazon GameLift server SDK version 5.\n
\nThe platform that all containers in the group use. Containers in a group must run on the\n same operating system.
\nAmazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game\n servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the game\n server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to\n server SDK version 5.\n
\nInstructions for launching server processes on fleet computes. Server processes run\n either a custom game build executable or a Realtime Servers script. The runtime configuration lists\n the types of server processes to run, how to launch them, and the number of processes to\n run concurrently.
", + "smithy.api#documentation": "Instructions for launching server processes on fleet computes. Server processes run\n either a custom game build executable or a Amazon GameLift Realtime script. The runtime configuration lists\n the types of server processes to run, how to launch them, and the number of processes to\n run concurrently.
", "smithy.api#required": {} } } @@ -19045,7 +21025,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates Realtime script metadata and content.
\nTo update script metadata, specify the script ID and provide updated name and/or\n version values.
\nTo update script content, provide an updated zip file by pointing to either a local\n file or an Amazon S3 bucket location. You can use either method regardless of how the\n original script was uploaded. Use the Version parameter to track\n updates to the script.
\nIf the call is successful, the updated metadata is stored in the script record and a\n revised script is uploaded to the Amazon GameLift service. Once the script is updated and\n acquired by a fleet instance, the new version is used for all new game sessions.
\n\n Learn more\n
\n\n Amazon GameLift Realtime Servers\n
\n\n Related actions\n
\n\n All APIs by task\n
" + "smithy.api#documentation": "Updates Realtime script metadata and content.
\nTo update script metadata, specify the script ID and provide updated name and/or\n version values.
\nTo update script content, provide an updated zip file by pointing to either a local\n file or an Amazon S3 bucket location. You can use either method regardless of how the\n original script was uploaded. Use the Version parameter to track\n updates to the script.
\nIf the call is successful, the updated metadata is stored in the script record and a\n revised script is uploaded to the Amazon GameLift service. Once the script is updated and\n acquired by a fleet instance, the new version is used for all new game sessions.
\n\n Learn more\n
\n\n Amazon GameLift Amazon GameLift Realtime\n
\n\n Related actions\n
\n\n All APIs by task\n
" } }, "com.amazonaws.gamelift#UpdateScriptInput": { diff --git a/codegen/sdk/aws-models/gameliftstreams.json b/codegen/sdk/aws-models/gameliftstreams.json index 312577f947b..a094ec61440 100644 --- a/codegen/sdk/aws-models/gameliftstreams.json +++ b/codegen/sdk/aws-models/gameliftstreams.json @@ -13,7 +13,7 @@ } }, "traits": { - "smithy.api#documentation": "You don't have the required permissions to access this Amazon GameLift Streams resource. Correct the\n permissions before you try again.
", + "smithy.api#documentation": "You don't have the required permissions to access this Amazon GameLift Streams resource. Correct the permissions before you try again.
", "smithy.api#error": "client", "smithy.api#httpError": 403 } @@ -58,7 +58,7 @@ "ec2:DescribeRegions" ] }, - "smithy.api#documentation": "\t\t\t\n\t\t Add locations that can host stream sessions. You configure locations and their corresponding capacity for each stream group. Creating a stream group in a location that's nearest to your end users can help minimize latency and improve quality.\n\t\t
\n\n This operation provisions stream capacity at the specified locations. By default, all locations have 1 or 2 capacity, depending on the stream class option: 2 for 'High' and 1 for 'Ultra' and 'Win2022'. This operation also copies the content files of all associated applications to an internal S3 bucket at each location. This allows Amazon GameLift Streams to host performant stream sessions.\n
", + "smithy.api#documentation": "Add locations that can host stream sessions. You configure locations and their corresponding capacity for each stream group. Creating a\n stream group in a location that's nearest to your end users can help minimize latency and improve quality.
\nThis operation provisions stream capacity at the specified locations. By default, all locations have 1 or 2 capacity, depending on the\n stream class option: 2 for 'High' and 1 for 'Ultra' and 'Win2022'. This operation also copies the content files of all associated\n applications to an internal S3 bucket at each location. This allows Amazon GameLift Streams to host performant stream sessions.
", "smithy.api#http": { "code": 200, "method": "POST", @@ -72,7 +72,7 @@ "Identifier": { "target": "com.amazonaws.gameliftstreams#Identifier", "traits": { - "smithy.api#documentation": "\n A stream group to add the specified locations to. \n
\nThis value is a \n\tAmazon Resource Name (ARN) that uniquely identifies the stream group resource. Format example: 1AB2C3De4.\n
A stream group to add the specified locations to.
\nThis value is a \n\tAmazon Resource Name (ARN) that uniquely identifies the stream group resource. Format example: sg-1AB2C3De4.\n
This value is the \n\tAmazon Resource Name (ARN) that uniquely identifies the stream group resource. Format example: 1AB2C3De4.\n
This value is the \n\tAmazon Resource Name (ARN) that uniquely identifies the stream group resource. Format example: sg-1AB2C3De4.\n
An Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies the\n application across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS\n account]:application/[resource ID].
An Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies the application across all Amazon Web Services Regions. Format is\n arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6 or ID-9ZY8X7Wv6.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.\n
The current status of the application resource. Possible statuses include the\n following:
\n\n INITIALIZED: Amazon GameLift Streams has received the request and is initiating the\n work flow to create an application.
\n PROCESSING: The create application work flow is in process. Amazon GameLift Streams\n is copying the content and caching for future deployment in a stream\n group.
\n READY: The application is ready to deploy in a stream group.
\n ERROR: An error occurred when setting up the application. See\n StatusReason for more information.
\n DELETING: Amazon GameLift Streams is in the process of deleting the\n application.
The current status of the application resource. Possible statuses include the following:
\n\n INITIALIZED: Amazon GameLift Streams has received the request and is initiating the work flow to create an application.
\n PROCESSING: The create application work flow is in process. Amazon GameLift Streams is copying the content and caching for future\n deployment in a stream group.
\n READY: The application is ready to deploy in a stream group.
\n ERROR: An error occurred when setting up the application. See StatusReason for more information.
\n DELETING: Amazon GameLift Streams is in the process of deleting the application.
\n A set of configuration settings to run the application on a stream group. This configures the operating system, and can include compatibility layers and other drivers.\n
\nA runtime environment can be one of the following:
\n\n For Linux applications\n
\n\n Ubuntu 22.04 LTS(Type=UBUNTU, Version=22_04_LTS)\n
\n For Windows applications\n
\nMicrosoft Windows Server 2022 Base (Type=WINDOWS, Version=2022)
Proton 8.0-5 (Type=PROTON, Version=20241007)
Proton 8.0-2c (Type=PROTON, Version=20230704)
Configuration settings that identify the operating system for an application resource. This can also include a compatibility layer and\n other drivers.
\nA runtime environment can be one of the following:
\n\n For Linux applications\n
\n\n Ubuntu 22.04 LTS (Type=UBUNTU, Version=22_04_LTS)\n
\n For Windows applications\n
\nMicrosoft Windows Server 2022 Base (Type=WINDOWS, Version=2022)
Proton 8.0-5 (Type=PROTON, Version=20241007)
Proton 8.0-2c (Type=PROTON, Version=20230704)
Describes an application resource that represents a collection of content for\n streaming with Amazon GameLift Streams. To retrieve additional application details, call GetApplication.
" + "smithy.api#documentation": "Describes an application resource that represents a collection of content for streaming with Amazon GameLift Streams. To retrieve additional application\n details, call GetApplication.
" } }, "com.amazonaws.gameliftstreams#ApplicationSummaryList": { @@ -389,7 +389,7 @@ } } }, - "smithy.api#documentation": "When you associate, or link, an application with a stream group, then Amazon GameLift Streams can launch the application using the stream group's allocated compute resources. The stream group must be in ACTIVE status. You can reverse this action by using DisassociateApplications.
When you associate, or link, an application with a stream group, then Amazon GameLift Streams can launch the application using the stream group's\n allocated compute resources. The stream group must be in ACTIVE status. You can reverse this action by using DisassociateApplications.
A stream group to associate to the applications.
\nThis value is a \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4 or ID-1AB2C3De4.\n
A stream group to associate to the applications.
\nThis value is a \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.\n
A set of applications to associate with the stream group.
\nThis value is a \n set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6 or ID-9ZY8X7Wv6. \n
A set of applications to associate with the stream group.
\nThis value is a \n set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6. \n
A stream group that is associated to the applications.
\nThis value is a \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4 or ID-1AB2C3De4.\n
A stream group that is associated to the applications.
\nThis value is a \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.\n
A set of applications that are associated to the stream group.
\nThis value is a \n set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6 or ID-9ZY8X7Wv6. \n
A set of applications that are associated to the stream group.
\nThis value is a \n set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6. \n
The requested operation would cause a conflict with the current state of a service resource associated with the request. Resolve the conflict before retrying this request.
", + "smithy.api#documentation": "The requested operation would cause a conflict with the current state of a service resource associated with the request. Resolve the\n conflict before retrying this request.
", "smithy.api#error": "client", "smithy.api#httpError": 409 } @@ -527,7 +527,7 @@ "s3:GetObject" ] }, - "smithy.api#documentation": "Creates an application resource in Amazon GameLift Streams, which specifies the application content you want to stream, such as a game build or other software, and configures the settings to run it.
\n\n Before you create an application, upload your\n application content files to an Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Getting Started in the Amazon GameLift Streams Developer Guide.\n
\n\n Make sure that your files in the Amazon S3 bucket are the correct version you \n want to use. As soon as you create a Amazon GameLift Streams application, you cannot change the files at a \n later time.\n
\n\n If the request is successful, Amazon GameLift Streams begins to create an application and sets the status to INITIALIZED. When an application reaches READY status, you can use the application to set up stream groups and start streams. To track application status, call GetApplication.\n
Creates an application resource in Amazon GameLift Streams, which specifies the application content you want to stream, such as a game build or other\n software, and configures the settings to run it.
\nBefore you create an application, upload your application content files to an Amazon Simple Storage Service (Amazon S3) bucket. For more information, see\n Getting Started in the Amazon GameLift Streams Developer Guide.
\nMake sure that your files in the Amazon S3 bucket are the correct version you want to use. As soon as you create a Amazon GameLift Streams application,\n you cannot change the files at a later time.
\n If the request is successful, Amazon GameLift Streams begins to create an application and sets the status to INITIALIZED. When an\n application reaches READY status, you can use the application to set up stream groups and start streams. To track application\n status, call GetApplication.
A set of configuration settings to run the application on a stream group. This configures the operating system, and can include compatibility layers and other drivers.
\nA runtime environment can be one of the following:
\n\n For Linux applications\n
\n\n Ubuntu 22.04 LTS(Type=UBUNTU, Version=22_04_LTS)\n
\n For Windows applications\n
\nMicrosoft Windows Server 2022 Base (Type=WINDOWS, Version=2022)
Proton 8.0-5 (Type=PROTON, Version=20241007)
Proton 8.0-2c (Type=PROTON, Version=20230704)
Configuration settings that identify the operating system for an application resource. This can also include a compatibility layer and\n other drivers.
\nA runtime environment can be one of the following:
\n\n For Linux applications\n
\n\n Ubuntu 22.04 LTS (Type=UBUNTU, Version=22_04_LTS)\n
\n For Windows applications\n
\nMicrosoft Windows Server 2022 Base (Type=WINDOWS, Version=2022)
Proton 8.0-5 (Type=PROTON, Version=20241007)
Proton 8.0-2c (Type=PROTON, Version=20230704)
The path and file name of the executable file that launches the content for streaming.\n Enter a path value that is relative to the location set in\n ApplicationSourceUri.
The path and file name of the executable file that launches the content for streaming. Enter a path value that is relative to the\n location set in ApplicationSourceUri.
The location of the content that you want to stream. Enter the URI of an Amazon S3 location\n (bucket name and prefixes) that contains your content. Use the following format for the\n URI: s3://[bucket name]/[prefix]. The location can have a multi-level\n prefix structure, but it must include all the files needed to run the content. Amazon GameLift Streams\n copies everything under the specified location.
This value is immutable. To designate a different content location, create a new\n application.
\nThe S3 bucket and the Amazon GameLift Streams application must be in the same Amazon Web Services Region.
\nThe location of the content that you want to stream. Enter an Amazon S3 URI to a bucket that contains your game or other application. The\n location can have a multi-level prefix structure, but it must include all the files needed to run the content. Amazon GameLift Streams copies everything\n under the specified location.
\nThis value is immutable. To designate a different content location, create a new application.
\nThe Amazon S3 bucket and the Amazon GameLift Streams application must be in the same Amazon Web Services Region.
\nLocations of log files that your content generates during a stream session. Enter path\n values that are relative to the ApplicationSourceUri location.\n You can specify up to 10 log locations.\n Amazon GameLift Streams uploads designated log files to the Amazon S3 bucket that you specify in ApplicationLogOutputUri \n at the end of a stream session. To retrieve stored log files, call GetStreamSession \n and get the LogFileLocationUri.
Locations of log files that your content generates during a stream session. Enter path\n values that are relative to the ApplicationSourceUri location.\n You can specify up to 10 log paths.\n Amazon GameLift Streams uploads designated log files to the Amazon S3 bucket that you specify in ApplicationLogOutputUri \n at the end of a stream session. To retrieve stored log files, call GetStreamSession \n and get the LogFileLocationUri.
An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Use the following format for the URI: s3://[bucket name]/[prefix]. \n Required if you specify one or more LogPaths.
The log bucket must have permissions that give Amazon GameLift Streams access to write the log files. For more information, see Getting Started in the Amazon GameLift Streams Developer Guide.
\nAn Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Required if you specify one or more ApplicationLogPaths.
The log bucket must have permissions that give Amazon GameLift Streams access to write the log files. For more information, see Getting Started in the Amazon GameLift Streams Developer Guide.
\nA list of labels to assign to the new application resource. Tags are developer-defined\n key-value pairs. Tagging Amazon Web Services resources is useful for resource management, access\n management and cost allocation. See Tagging Amazon Web Services Resources in the\n Amazon Web Services General Reference. You can use TagResource to add tags, UntagResource to remove tags,\n and ListTagsForResource to view tags on existing resources. The\n maximum tag limit might be lower than stated. See the Amazon Web Services General\n Reference for actual tagging limits.
" + "smithy.api#documentation": "A list of labels to assign to the new application resource. Tags are developer-defined key-value pairs. Tagging Amazon Web Services resources is\n useful for resource management, access management and cost allocation. See Tagging Amazon Web Services Resources in the Amazon Web Services General Reference. You can\n use TagResource to add tags, UntagResource to remove tags, and ListTagsForResource to view tags on existing resources.
" } }, "ClientToken": { @@ -601,9 +601,9 @@ "type": "structure", "members": { "Arn": { - "target": "com.amazonaws.gameliftstreams#Arn", + "target": "com.amazonaws.gameliftstreams#Identifier", "traits": { - "smithy.api#documentation": "An Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across\n all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS\n account]:application/[resource ID].
An Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is\n arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].
\n A set of configuration settings to run the application on a stream group. This configures the operating system, and can include compatibility layers and other drivers.\n
\nA runtime environment can be one of the following:
\n\n For Linux applications\n
\n\n Ubuntu 22.04 LTS(Type=UBUNTU, Version=22_04_LTS)\n
\n For Windows applications\n
\nMicrosoft Windows Server 2022 Base (Type=WINDOWS, Version=2022)
Proton 8.0-5 (Type=PROTON, Version=20241007)
Proton 8.0-2c (Type=PROTON, Version=20230704)
Configuration settings that identify the operating system for an application resource. This can also include a compatibility layer and\n other drivers.
\nA runtime environment can be one of the following:
\n\n For Linux applications\n
\n\n Ubuntu 22.04 LTS (Type=UBUNTU, Version=22_04_LTS)\n
\n For Windows applications\n
\nMicrosoft Windows Server 2022 Base (Type=WINDOWS, Version=2022)
Proton 8.0-5 (Type=PROTON, Version=20241007)
Proton 8.0-2c (Type=PROTON, Version=20230704)
The path and file name of the executable file that launches the content for\n streaming.
" + "smithy.api#documentation": "The path and file name of the executable file that launches the content for streaming.
" } }, "ApplicationLogPaths": { "target": "com.amazonaws.gameliftstreams#FilePaths", "traits": { - "smithy.api#documentation": "Locations of log files that your content generates during a stream session. \n Amazon GameLift Streams uploads log files to the Amazon S3 bucket that you specify in ApplicationLogOutputUri \n at the end of a stream session. To retrieve stored log files, call GetStreamSession \n and get the LogFileLocationUri.
Locations of log files that your content generates during a stream session. \n Amazon GameLift Streams uploads log files to the Amazon S3 bucket that you specify in ApplicationLogOutputUri \n at the end of a stream session. To retrieve stored log files, call GetStreamSession \n and get the LogFileLocationUri.
An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Use the following format for the URI: s3://[bucket name]/[prefix]. \n Required if you specify one or more LogPaths.
An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Required if you specify one or more ApplicationLogPaths.
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6 or ID-9ZY8X7Wv6.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.\n
The current status of the application resource. Possible statuses include the\n following:
\n\n INITIALIZED: Amazon GameLift Streams has received the request and is initiating the\n work flow to create an application.
\n PROCESSING: The create application work flow is in process. Amazon GameLift Streams\n is copying the content and caching for future deployment in a stream\n group.
\n READY: The application is ready to deploy in a stream group.
\n ERROR: An error occurred when setting up the application. See\n StatusReason for more information.
\n DELETING: Amazon GameLift Streams is in the process of deleting the\n application.
The current status of the application resource. Possible statuses include the following:
\n\n INITIALIZED: Amazon GameLift Streams has received the request and is initiating the work flow to create an application.
\n PROCESSING: The create application work flow is in process. Amazon GameLift Streams is copying the content and caching for future\n deployment in a stream group.
\n READY: The application is ready to deploy in a stream group.
\n ERROR: An error occurred when setting up the application. See StatusReason for more information.
\n DELETING: Amazon GameLift Streams is in the process of deleting the application.
A short description of the status reason when the application is in ERROR\n status.
A short description of the status reason when the application is in ERROR status.
\n Manage how Amazon GameLift Streams streams your applications by using a stream group. A stream group is a collection of resources that Amazon GameLift Streams uses to stream your application to end-users. When you create a stream group, you specify an application to stream by default and the type of hardware to use, such as the graphical processing unit (GPU). You can also link additional applications, which allows you to stream those applications using this stream group. Depending on your expected users, you also scale the number of concurrent streams you want to support at one time, and in what locations.\n
\n\n Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. There are two types of capacity: always-on and on-demand:\n
\n\n Always-on: \n The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.\n\n
\n\n On-demand: \n The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).\n\n
\n\n To adjust the capacity of any ACTIVE stream group, call UpdateStreamGroup.\n
\n If the request is successful, Amazon GameLift Streams begins creating the stream group. Amazon GameLift Streams assigns a unique ID to the stream group resource and sets the status to ACTIVATING. When the stream group reaches ACTIVE status, you can start stream sessions by using StartStreamSession. To check the stream group's status, call GetStreamGroup.\n
Manage how Amazon GameLift Streams streams your applications by using a stream group. A stream group is a collection of resources that Amazon GameLift Streams uses to\n stream your application to end-users. When you create a stream group, you specify an application to stream by default and the type of\n hardware to use, such as the graphical processing unit (GPU). You can also link additional applications, which allows you to stream those\n applications using this stream group. Depending on your expected users, you also scale the number of concurrent streams you want to\n support at one time, and in what locations.
\n\n Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. There are two types of capacity: always-on and on-demand:\n
\n\n Always-on: \n The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.\n\n
\n\n On-demand: \n The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).\n\n
\n To adjust the capacity of any ACTIVE stream group, call UpdateStreamGroup.
If the request is successful, Amazon GameLift Streams begins creating the stream group. Amazon GameLift Streams assigns a unique ID to the stream group resource and\n sets the status to ACTIVATING. When the stream group reaches ACTIVE status, you can start stream sessions by\n using StartStreamSession. To check the stream\n group's status, call GetStreamGroup.
The target stream quality for sessions that are hosted in this stream group. Set a\n stream class that is appropriate to the type of content that you're streaming. Stream\n class determines the type of computing resources Amazon GameLift Streams uses and impacts the cost of\n streaming. The following options are available:
\nA stream class can be one of the following:
\n\n \n gen5n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with most Unreal Engine 5.x builds, 32-bit applications, and anti-cheat technology. Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen5n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 12 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen5n_ultra (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity.\n Uses dedicated NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with most Unreal Engine 5.2 and 5.3 builds, 32-bit applications, and anti-cheat technology. Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 8 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen4n_ultra (NVIDIA, ultra) Supports applications with high 3D scene complexity.\n Uses dedicated NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\nThe target stream quality for sessions that are hosted in this stream group. Set a stream class that is appropriate to the type of\n content that you're streaming. Stream class determines the type of computing resources Amazon GameLift Streams uses and impacts the cost of streaming. The\n following options are available:
\nA stream class can be one of the following:
\n\n \n gen5n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with Unreal Engine versions up through 5.4, 32 and 64-bit applications, and anti-cheat technology. Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen5n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 12 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen5n_ultra (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity.\n Uses dedicated NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with Unreal Engine versions up through 5.4, 32 and 64-bit applications, and anti-cheat technology. Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 8 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen4n_ultra (NVIDIA, ultra) Supports applications with high 3D scene complexity.\n Uses dedicated NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\nA list of labels to assign to the new stream group resource. Tags are\n developer-defined key-value pairs. It is useful to tag Amazon Web Services resources for resource\n management, access management, and cost allocation. See Tagging Amazon Web Services Resources in the\n Amazon Web Services General Reference. You can use TagResource, UntagResource, and ListTagsForResource to add, remove, and view tags on existing resources.\n The maximum tag limit might be lower than stated. See the Amazon Web Services \n for actual tagging limits.
" + "smithy.api#documentation": "A list of labels to assign to the new stream group resource. Tags are developer-defined key-value pairs. Tagging Amazon Web Services resources is\n useful for resource management, access management and cost allocation. See Tagging Amazon Web Services Resources in the Amazon Web Services General Reference. You can\n use TagResource to add tags, UntagResource to remove tags, and ListTagsForResource to view tags on existing resources.
" } }, "ClientToken": { @@ -800,9 +803,9 @@ "type": "structure", "members": { "Arn": { - "target": "com.amazonaws.gameliftstreams#Arn", + "target": "com.amazonaws.gameliftstreams#Identifier", "traits": { - "smithy.api#documentation": "An Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies\n the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS\n account]:streamgroup/[resource ID].
An Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is\n arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].
The Amazon GameLift Streams application that is associated with this stream group.
" + "smithy.api#documentation": "The default Amazon GameLift Streams application that is associated with this stream group.
" } }, "LocationStates": { @@ -831,13 +834,13 @@ "target": "com.amazonaws.gameliftstreams#StreamClass", "traits": { "aws.cloudformation#cfnMutability": "create-and-read", - "smithy.api#documentation": "The target stream quality for the stream group.
\nA stream class can be one of the following:
\n\n \n gen5n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with most Unreal Engine 5.x builds, 32-bit applications, and anti-cheat technology. Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen5n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 12 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen5n_ultra (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity.\n Uses dedicated NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with most Unreal Engine 5.2 and 5.3 builds, 32-bit applications, and anti-cheat technology. Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 8 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen4n_ultra (NVIDIA, ultra) Supports applications with high 3D scene complexity.\n Uses dedicated NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\nThe target stream quality for the stream group.
\nA stream class can be one of the following:
\n\n \n gen5n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with Unreal Engine versions up through 5.4, 32 and 64-bit applications, and anti-cheat technology. Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen5n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 12 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen5n_ultra (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity.\n Uses dedicated NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with Unreal Engine versions up through 5.4, 32 and 64-bit applications, and anti-cheat technology. Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 8 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen4n_ultra (NVIDIA, ultra) Supports applications with high 3D scene complexity.\n Uses dedicated NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\nA unique ID value that is assigned to the resource when it's created. Format example:\n 1AB2C3De4.
A unique ID value that is assigned to the resource when it's created. Format example: sg-1AB2C3De4.
\n A short description of the reason that the stream group is in ERROR status. The possible reasons can be one of the following:\n
\n internalError: The request can't process right now bcause of an issue with the server. Try again later. Reach out to the Amazon GameLift Streams team for more help.\n
\n noAvailableInstances: Amazon GameLift Streams does not currently have enough available On-Demand capacity to fulfill your request. Wait a few minutes and retry the request as capacity can shift frequently. You can also try to make the request using a different stream class or in another region.\n
A short description of the reason that the stream group is in ERROR status. The possible reasons can be one of the\n following:
\n internalError: The request can't process right now bcause of an issue with the server. Try again later. Reach out to\n the Amazon GameLift Streams team for more help.
\n noAvailableInstances: Amazon GameLift Streams does not currently have enough available On-Demand capacity to fulfill your request.\n Wait a few minutes and retry the request as capacity can shift frequently. You can also try to make the request using a different\n stream class or in another region.
\n A set of applications that this stream group is associated to. You can stream any of these applications by using this stream group.\n
\nThis value is a \n set of Amazon Resource Names (ARNs) that uniquely identify application resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6.\n
A set of applications that this stream group is associated to. You can stream any of these applications by using this stream group.
\nThis value is a \n set of Amazon Resource Names (ARNs) that uniquely identify application resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.\n
Allows clients to reconnect to a recently disconnected stream session without losing\n any data from the last session.
\nA client can reconnect to a stream session that's in\n PENDING_CLIENT_RECONNECTION or ACTIVE status. In the\n stream session life cycle, when the client disconnects from the stream session, the\n stream session transitions from CONNECTED to\n PENDING_CLIENT_RECONNECTION status. When a client requests to reconnect\n by calling CreateStreamSessionConnection, the stream session transitions to\n RECONNECTING status. When the reconnection is successful, the stream\n session transitions to ACTIVE status. After a stream session is\n disconnected for longer than ConnectionTimeoutSeconds, the stream session\n transitions to the TERMINATED status.
To connect to an existing stream session, specify the stream group ID and stream\n session ID that you want to reconnect to, as well as the signal request settings to use\n with the stream.
\n\n ConnectionTimeoutSeconds defines the amount of time after the stream\n session disconnects that a reconnection is allowed. If a client is disconnected from the\n stream for longer than ConnectionTimeoutSeconds, the stream session\n ends.
Allows clients to reconnect to a recently disconnected stream session without losing any data from the last session.
\nA client can reconnect to a stream session that's in PENDING_CLIENT_RECONNECTION or ACTIVE status. In the\n stream session life cycle, when the client disconnects from the stream session, the stream session transitions from CONNECTED\n to PENDING_CLIENT_RECONNECTION status. When a client requests to reconnect by calling\n CreateStreamSessionConnection, the stream session transitions to RECONNECTING status. When the reconnection\n is successful, the stream session transitions to ACTIVE status. After a stream session is disconnected for longer than\n ConnectionTimeoutSeconds, the stream session transitions to the TERMINATED status.
To connect to an existing stream session, specify the stream group ID and stream session ID that you want to reconnect to, as well as\n the signal request settings to use with the stream.
\n\n ConnectionTimeoutSeconds defines the amount of time after the stream session disconnects that a reconnection is allowed. If\n a client is disconnected from the stream for longer than ConnectionTimeoutSeconds, the stream session ends.
\n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4 or ID-1AB2C3De4.\n
\n The stream group that you want to run this stream session with. The stream group must be in ACTIVE status and have idle stream capacity.\n
\n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.\n
The stream group that you want to run this stream session with. The stream group must be in ACTIVE status and have idle\n stream capacity.
\n Amazon Resource Name (ARN) that uniquely identifies the stream session resource. Format example: 1AB2C3De4.\n The stream session must be in\n PENDING_CLIENT_RECONNECTION or ACTIVE status.
\n Amazon Resource Name (ARN) that uniquely identifies the stream session resource. Format example: 1AB2C3De4.\n The stream session must be in PENDING_CLIENT_RECONNECTION or ACTIVE status.
A WebRTC ICE offer string to use when initializing a WebRTC connection. The offer is a\n very long JSON string. Provide the string as a text value in quotes. The offer must be\n newly generated, not the same offer provided to StartStreamSession.
A WebRTC ICE offer string to use when initializing a WebRTC connection. The offer is a very long JSON string. Provide the string as a\n text value in quotes. The offer must be newly generated, not the same offer provided to StartStreamSession.
The WebRTC answer string that the stream server generates in response to the\n SignalRequest.
The WebRTC answer string that the stream server generates in response to the SignalRequest.
The default application of the stream group.
\nThis value is an \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6 or ID-9ZY8X7Wv6.\n
An ID that uniquely identifies the application resource. For example: a-9ZY8X7Wv6.
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4 or ID-1AB2C3De4.\n
An \n Amazon Resource Name (ARN) that uniquely identifies the application resource. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.\n
Represents the Amazon GameLift Streams application that a stream group hosts.
" + "smithy.api#documentation": "Represents the default Amazon GameLift Streams application that a stream group hosts.
" } }, "com.amazonaws.gameliftstreams#DeleteApplication": { @@ -1030,7 +1033,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to delete an application" }, - "smithy.api#documentation": "Permanently deletes an Amazon GameLift Streams application resource. This also deletes the\n application content files stored with Amazon GameLift Streams. However, this does not delete the\n original files that you uploaded to your Amazon S3 bucket; you can delete these any time\n after Amazon GameLift Streams creates an application, which is the only time Amazon GameLift Streams accesses your Amazon S3\n bucket.
\n\n You can only delete an application that meets the following conditions: \n
\nThe application is in READY or ERROR status. You cannot delete an application that's in PROCESSING or INITIALIZED status.
The application is not the default application of any stream groups. You must first delete the stream group by using DeleteStreamGroup.
\nThe application is not linked to any stream groups. You must first unlink the stream group by using DisassociateApplications.
\n\n An application is not streaming in any ongoing stream session. You must wait until the client ends the stream session or call TerminateStreamSession to end the stream.\n
\nIf any active stream groups exist for this application,\n this request returns a ValidationException.\n
Permanently deletes an Amazon GameLift Streams application resource. This also deletes the application content files stored with Amazon GameLift Streams. However,\n this does not delete the original files that you uploaded to your Amazon S3 bucket; you can delete these any time after Amazon GameLift Streams creates an\n application, which is the only time Amazon GameLift Streams accesses your Amazon S3 bucket.
\nYou can only delete an application that meets the following conditions:
\nThe application is in READY or ERROR status. You cannot delete an application that's in\n PROCESSING or INITIALIZED status.
The application is not the default application of any stream groups. You must first delete the stream group by using DeleteStreamGroup.
\nThe application is not linked to any stream groups. You must first unlink the stream group by using DisassociateApplications.
\nAn application is not streaming in any ongoing stream session. You must wait until the client ends the stream session or call\n TerminateStreamSession to end the\n stream.
\nIf any active stream groups exist for this application, this request returns a ValidationException.
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6 or ID-9ZY8X7Wv6.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.\n
Permanently deletes all compute resources and information related to a stream group. To delete a stream group, specify the unique stream\n group identifier. During the deletion process, the stream group's status is DELETING. This operation stops streams in\n progress and prevents new streams from starting. As a best practice, before deleting the stream group, call ListStreamSessions to check for streams in progress and take action to stop them. When you delete a stream group, any\n application associations referring to that stream group are automatically removed.
Permanently deletes all compute resources and information related to a stream group. To delete a stream group, specify the unique stream\n group identifier. During the deletion process, the stream group's status is DELETING. This operation stops streams in\n progress and prevents new streams from starting. As a best practice, before deleting the stream group, call ListStreamSessions to check for streams in progress and take action to stop\n them. When you delete a stream group, any application associations referring to that stream group are automatically removed.
The unique ID value of the stream group resource to delete. Format example:\n 1AB2C3De4.
The unique ID value of the stream group resource to delete. Format example: sg-1AB2C3De4.
\n When you disassociate, or unlink, an application from a stream group, you can no longer stream this application by using that stream group's allocated compute resources. Any streams in process will continue until they terminate, which helps avoid interrupting an end-user's stream. Amazon GameLift Streams will not initiate new streams using this stream group. The disassociate action does not affect the stream capacity of a stream group.\n
\n\n You can only disassociate an application if it's not a default application of the stream group. Check DefaultApplicationIdentifier by calling GetStreamGroup.\n
When you disassociate, or unlink, an application from a stream group, you can no longer stream this application by using that stream\n group's allocated compute resources. Any streams in process will continue until they terminate, which helps avoid interrupting an\n end-user's stream. Amazon GameLift Streams will not initiate new streams using this stream group. The disassociate action does not affect the stream\n capacity of a stream group.
\n You can only disassociate an application if it's not a default application of the stream group. Check\n DefaultApplicationIdentifier by calling GetStreamGroup.
A stream group to disassociate these applications from.
\nThis value is an \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4 or ID-1AB2C3De4.\n
A stream group to disassociate these applications from.
\nThis value is an \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.\n
A set of applications that you want to disassociate from the stream group.
\nThis value is a \n set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6 or ID-9ZY8X7Wv6. \n
A set of applications that you want to disassociate from the stream group.
\nThis value is a \n set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6. \n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4 or ID-1AB2C3De4.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.\n
A set of applications that are disassociated from this stream group.
\nThis value is a \n set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6 or ID-9ZY8X7Wv6. \n
A set of applications that are disassociated from this stream group.
\nThis value is a \n set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6. \n
The result of the ExportStreamSessionFiles operation.
" + "smithy.api#documentation": "The result of the ExportStreamSessionFiles\n operation.
" } }, "StatusReason": { @@ -1267,7 +1270,7 @@ "OutputUri": { "target": "com.amazonaws.gameliftstreams#OutputUri", "traits": { - "smithy.api#documentation": " The S3 bucket URI where Amazon GameLift Streams uploaded the set of compressed exported files for a stream session. Amazon GameLift Streams generates a ZIP file name\n based on the stream session metadata. Alternatively, you can provide a custom file name with a .zip file extension.
\n Example 1: If you provide an S3 URI called s3://MyBucket/MyGame_Session1.zip, then Amazon GameLift Streams will save the files at that location.\n
Example 2: If you provide an S3 URI called s3://MyBucket/MyGameSessions_ExportedFiles/, then Amazon GameLift Streams will save the files\n at s3://MyBucket/MyGameSessions_ExportedFiles/YYYYMMDD-HHMMSS-appId-sg-Id-sessionId.zip or another similar name.
The S3 bucket URI where Amazon GameLift Streams uploaded the set of compressed exported files for a stream session. Amazon GameLift Streams generates a ZIP file name\n based on the stream session metadata. Alternatively, you can provide a custom file name with a .zip file extension.
Example 1: If you provide an S3 URI called s3://MyBucket/MyGame_Session1.zip, then Amazon GameLift Streams will save the files at that\n location.
Example 2: If you provide an S3 URI called s3://MyBucket/MyGameSessions_ExportedFiles/, then Amazon GameLift Streams will save the files\n at s3://MyBucket/MyGameSessions_ExportedFiles/YYYYMMDD-HHMMSS-appId-sg-Id-sessionId.zip or another similar name.
\n Export the files that your application modifies or generates in a stream session, which can help you debug or verify your application. When your application runs, it generates output files such as logs, diagnostic information, crash dumps, save files, user data, screenshots, and so on. The files can be defined by the engine or frameworks that your application uses, or information that you've programmed your application to output.\n
\n\n You can only call this action on a stream session that is in progress, specifically in one of the following statuses ACTIVE, CONNECTED, PENDING_CLIENT_RECONNECTION, and RECONNECTING. You must provide an Amazon Simple Storage Service (Amazon S3) bucket to store the files in. When the session ends, Amazon GameLift Streams produces a compressed folder that contains all of the files and directories that were modified or created by the application during the stream session. AWS uses your security credentials to authenticate and authorize access to your Amazon S3 bucket.\n
Amazon GameLift Streams collects the following generated and modified files. Find them in the\n corresponding folders in the .zip archive.
\n application/: The folder where your application or game is stored.\n
\n profile/: The user profile folder.
\n temp/: The system temp folder.
To verify the status of the exported files, use GetStreamSession.
\nTo delete the files, delete the object in the S3 bucket.
", + "smithy.api#documentation": "Export the files that your application modifies or generates in a stream session, which can help you debug or verify your application.\n When your application runs, it generates output files such as logs, diagnostic information, crash dumps, save files, user data,\n screenshots, and so on. The files can be defined by the engine or frameworks that your application uses, or information that you've\n programmed your application to output.
\n You can only call this action on a stream session that is in progress, specifically in one of the following statuses\n ACTIVE, CONNECTED, PENDING_CLIENT_RECONNECTION, and RECONNECTING. You must provide\n an Amazon Simple Storage Service (Amazon S3) bucket to store the files in. When the session ends, Amazon GameLift Streams produces a compressed folder\n that contains all of the files and directories that were modified or created by the application during the stream session. AWS uses your\n security credentials to authenticate and authorize access to your Amazon S3 bucket.
Amazon GameLift Streams collects the following generated and modified files. Find them in the corresponding folders in the .zip\n archive.
\n application/: The folder where your application or game is stored.
\n profile/: The user profile folder.
\n temp/: The system temp folder.
To verify the status of the exported files, use GetStreamSession.
\nTo delete the files, delete the object in the S3 bucket.
", "smithy.api#http": { "code": 200, "method": "PUT", @@ -1360,7 +1363,7 @@ "Identifier": { "target": "com.amazonaws.gameliftstreams#Identifier", "traits": { - "smithy.api#documentation": "An \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4 or ID-1AB2C3De4.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.\n
The S3 bucket URI where Amazon GameLift Streams uploads the set of compressed exported files for this stream session. Amazon GameLift Streams generates a ZIP file name\n based on the stream session metadata. Alternatively, you can provide a custom file name with a .zip file extension.
\n Example 1: If you provide an S3 URI called s3://MyBucket/MyGame_Session1.zip, then Amazon GameLift Streams will save the files at that location.\n
Example 2: If you provide an S3 URI called s3://MyBucket/MyGameSessions_ExportedFiles/, then Amazon GameLift Streams will save the files\n at s3://MyBucket/MyGameSessions_ExportedFiles/YYYYMMDD-HHMMSS-appId-sg-Id-sessionId.zip or another similar name.
The S3 bucket URI where Amazon GameLift Streams uploads the set of compressed exported files for this stream session. Amazon GameLift Streams generates a ZIP file name\n based on the stream session metadata. Alternatively, you can provide a custom file name with a .zip file extension.
Example 1: If you provide an S3 URI called s3://MyBucket/MyGame_Session1.zip, then Amazon GameLift Streams will save the files at that\n location.
Example 2: If you provide an S3 URI called s3://MyBucket/MyGameSessions_ExportedFiles/, then Amazon GameLift Streams will save the files\n at s3://MyBucket/MyGameSessions_ExportedFiles/YYYYMMDD-HHMMSS-appId-sg-Id-sessionId.zip or another similar name.
Amazon GameLift Streams provides a global cloud solution for content streaming experiences. Use Amazon GameLift Streams\n tools to upload and configure content for streaming, deploy and scale computing resources\n to host streams, and manage stream session placement to meet customer demand.
\nThis Reference Guide describes the Amazon GameLift Streams service API. You can use the API through the\n Amazon Web Services SDK, the Command Line Interface (AWS CLI), or by making direct REST calls through HTTPS.
\nSee the Amazon GameLift Streams Developer Guide for more information on how Amazon GameLift Streams works and how to work with it.
", + "smithy.api#cors": { + "additionalAllowedHeaders": [ + "x-amz-gameliftstreams-raw-errors" + ] + }, + "smithy.api#documentation": "Amazon GameLift Streams provides a global cloud solution for content streaming experiences. Use Amazon GameLift Streams tools to upload and configure content for\n streaming, deploy and scale computing resources to host streams, and manage stream session placement to meet customer demand.
\nThis Reference Guide describes the Amazon GameLift Streams service API. You can use the API through the Amazon Web Services SDK, the Command Line Interface (AWS CLI), or by making\n direct REST calls through HTTPS.
\nSee the Amazon GameLift Streams Developer Guide for more information on how Amazon GameLift Streams works and how to work with it.
", "smithy.api#title": "Amazon GameLift Streams", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1891,7 +1898,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to get an application" }, - "smithy.api#documentation": "Retrieves properties for an Amazon GameLift Streams application resource. Specify the ID of the\n application that you want to retrieve. If the operation is successful, it returns\n properties for the requested application.
", + "smithy.api#documentation": "Retrieves properties for an Amazon GameLift Streams application resource. Specify the ID of the application that you want to retrieve. If the\n operation is successful, it returns properties for the requested application.
", "smithy.api#http": { "code": 200, "method": "GET", @@ -1961,7 +1968,7 @@ "Identifier": { "target": "com.amazonaws.gameliftstreams#Identifier", "traits": { - "smithy.api#documentation": "An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6 or ID-9ZY8X7Wv6.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.\n
An Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across\n all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS\n account]:application/[resource ID].
An Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is\n arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].
\n A set of configuration settings to run the application on a stream group. This configures the operating system, and can include compatibility layers and other drivers.\n
\nA runtime environment can be one of the following:
\n\n For Linux applications\n
\n\n Ubuntu 22.04 LTS(Type=UBUNTU, Version=22_04_LTS)\n
\n For Windows applications\n
\nMicrosoft Windows Server 2022 Base (Type=WINDOWS, Version=2022)
Proton 8.0-5 (Type=PROTON, Version=20241007)
Proton 8.0-2c (Type=PROTON, Version=20230704)
Configuration settings that identify the operating system for an application resource. This can also include a compatibility layer and\n other drivers.
\nA runtime environment can be one of the following:
\n\n For Linux applications\n
\n\n Ubuntu 22.04 LTS (Type=UBUNTU, Version=22_04_LTS)\n
\n For Windows applications\n
\nMicrosoft Windows Server 2022 Base (Type=WINDOWS, Version=2022)
Proton 8.0-5 (Type=PROTON, Version=20241007)
Proton 8.0-2c (Type=PROTON, Version=20230704)
The path and file name of the executable file that launches the content for\n streaming.
" + "smithy.api#documentation": "The path and file name of the executable file that launches the content for streaming.
" } }, "ApplicationLogPaths": { "target": "com.amazonaws.gameliftstreams#FilePaths", "traits": { - "smithy.api#documentation": "Locations of log files that your content generates during a stream session. \n Amazon GameLift Streams uploads log files to the Amazon S3 bucket that you specify in ApplicationLogOutputUri \n at the end of a stream session. To retrieve stored log files, call GetStreamSession \n and get the LogFileLocationUri.
Locations of log files that your content generates during a stream session. \n Amazon GameLift Streams uploads log files to the Amazon S3 bucket that you specify in ApplicationLogOutputUri \n at the end of a stream session. To retrieve stored log files, call GetStreamSession \n and get the LogFileLocationUri.
An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Use the following format for the URI: s3://[bucket name]/[prefix]. \n Required if you specify one or more LogPaths.
An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Required if you specify one or more ApplicationLogPaths.
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6 or ID-9ZY8X7Wv6.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.\n
The current status of the application resource. Possible statuses include the\n following:
\n\n INITIALIZED: Amazon GameLift Streams has received the request and is initiating the\n work flow to create an application.
\n PROCESSING: The create application work flow is in process. Amazon GameLift Streams\n is copying the content and caching for future deployment in a stream\n group.
\n READY: The application is ready to deploy in a stream group.
\n ERROR: An error occurred when setting up the application. See\n StatusReason for more information.
\n DELETING: Amazon GameLift Streams is in the process of deleting the\n application.
The current status of the application resource. Possible statuses include the following:
\n\n INITIALIZED: Amazon GameLift Streams has received the request and is initiating the work flow to create an application.
\n PROCESSING: The create application work flow is in process. Amazon GameLift Streams is copying the content and caching for future\n deployment in a stream group.
\n READY: The application is ready to deploy in a stream group.
\n ERROR: An error occurred when setting up the application. See StatusReason for more information.
\n DELETING: Amazon GameLift Streams is in the process of deleting the application.
A short description of the status reason when the application is in ERROR\n status.
A short description of the status reason when the application is in ERROR status.
\n A set of stream groups that this application is associated with. You can use any of these stream groups to stream your application.\n
\nThis value is a \n set of Amazon Resource Names (ARNs) that uniquely identify stream group resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4.\n
A set of stream groups that this application is associated with. You can use any of these stream groups to stream your application.
\nThis value is a \n set of Amazon Resource Names (ARNs) that uniquely identify stream group resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4.\n
Retrieves properties for a Amazon GameLift Streams stream group resource. Specify the ID of the stream\n group that you want to retrieve. If the operation is successful, it returns properties\n for the requested stream group.
", + "smithy.api#documentation": "Retrieves properties for a Amazon GameLift Streams stream group resource. Specify the ID of the stream group that you want to retrieve. If the operation\n is successful, it returns properties for the requested stream group.
", "smithy.api#http": { "code": 200, "method": "GET", @@ -2196,7 +2203,7 @@ "Identifier": { "target": "com.amazonaws.gameliftstreams#Identifier", "traits": { - "smithy.api#documentation": "The unique ID value of the stream group resource to retrieve. Format example:\n 1AB2C3De4.
The unique ID value of the stream group resource to retrieve. Format example: sg-1AB2C3De4.
An Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies\n the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS\n account]:streamgroup/[resource ID].
An Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is\n arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].
The Amazon GameLift Streams application that is associated with this stream group.
" + "smithy.api#documentation": "The default Amazon GameLift Streams application that is associated with this stream group.
" } }, "LocationStates": { @@ -2242,13 +2249,13 @@ "target": "com.amazonaws.gameliftstreams#StreamClass", "traits": { "aws.cloudformation#cfnMutability": "create-and-read", - "smithy.api#documentation": "The target stream quality for the stream group.
\nA stream class can be one of the following:
\n\n \n gen5n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with most Unreal Engine 5.x builds, 32-bit applications, and anti-cheat technology. Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen5n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 12 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen5n_ultra (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity.\n Uses dedicated NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with most Unreal Engine 5.2 and 5.3 builds, 32-bit applications, and anti-cheat technology. Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 8 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen4n_ultra (NVIDIA, ultra) Supports applications with high 3D scene complexity.\n Uses dedicated NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\nThe target stream quality for the stream group.
\nA stream class can be one of the following:
\n\n \n gen5n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with Unreal Engine versions up through 5.4, 32 and 64-bit applications, and anti-cheat technology. Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen5n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 12 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen5n_ultra (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity.\n Uses dedicated NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with Unreal Engine versions up through 5.4, 32 and 64-bit applications, and anti-cheat technology. Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 8 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen4n_ultra (NVIDIA, ultra) Supports applications with high 3D scene complexity.\n Uses dedicated NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\nA unique ID value that is assigned to the resource when it's created. Format example:\n 1AB2C3De4.
A unique ID value that is assigned to the resource when it's created. Format example: sg-1AB2C3De4.
\n A short description of the reason that the stream group is in ERROR status. The possible reasons can be one of the following:\n
\n internalError: The request can't process right now bcause of an issue with the server. Try again later. Reach out to the Amazon GameLift Streams team for more help.\n
\n noAvailableInstances: Amazon GameLift Streams does not currently have enough available On-Demand capacity to fulfill your request. Wait a few minutes and retry the request as capacity can shift frequently. You can also try to make the request using a different stream class or in another region.\n
A short description of the reason that the stream group is in ERROR status. The possible reasons can be one of the\n following:
\n internalError: The request can't process right now bcause of an issue with the server. Try again later. Reach out to\n the Amazon GameLift Streams team for more help.
\n noAvailableInstances: Amazon GameLift Streams does not currently have enough available On-Demand capacity to fulfill your request.\n Wait a few minutes and retry the request as capacity can shift frequently. You can also try to make the request using a different\n stream class or in another region.
\n A set of applications that this stream group is associated to. You can stream any of these applications by using this stream group.\n
\nThis value is a \n set of Amazon Resource Names (ARNs) that uniquely identify application resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6.\n
A set of applications that this stream group is associated to. You can stream any of these applications by using this stream group.
\nThis value is a \n set of Amazon Resource Names (ARNs) that uniquely identify application resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.\n
Retrieves properties for a Amazon GameLift Streams stream session resource. Specify the Amazon Resource Name (ARN) of the\n stream session that you want to retrieve and its stream group ARN. If the operation is successful, it returns properties\n for the requested resource.
", + "smithy.api#documentation": "Retrieves properties for a Amazon GameLift Streams stream session resource. Specify the Amazon Resource Name (ARN) of the stream session that you want to retrieve and its\n stream group ARN. If the operation is successful, it returns properties for the requested resource.
", "smithy.api#http": { "code": 200, "method": "GET", @@ -2386,7 +2393,7 @@ "Identifier": { "target": "com.amazonaws.gameliftstreams#Identifier", "traits": { - "smithy.api#documentation": "The stream group that runs this stream session.
\nThis value is an \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4 or ID-1AB2C3De4.\n
The stream group that runs this stream session.
\nThis value is an \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.\n
The Amazon Resource Name (ARN) assigned to the stream session resource. When combined with the stream group\n ARN, this value uniquely identifies it across all Amazon Web Services Regions. Format is\n arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamsession/[resource\n ID].
The Amazon Resource Name (ARN) assigned to the stream session resource. When combined with the stream group ARN, this value uniquely identifies it across all\n Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamsession/[resource ID].
A human-readable label for the stream session. You can update this value at any\n time.
" + "smithy.api#documentation": "A human-readable label for the stream session. You can update this value at any time.
" } }, "StreamGroupId": { "target": "com.amazonaws.gameliftstreams#Id", "traits": { - "smithy.api#documentation": "The unique identifier for the Amazon GameLift Streams stream group that is hosting the stream\n session.
" + "smithy.api#documentation": "The unique identifier for the Amazon GameLift Streams stream group that is hosting the stream session.
" } }, "UserId": { @@ -2434,13 +2441,13 @@ "Status": { "target": "com.amazonaws.gameliftstreams#StreamSessionStatus", "traits": { - "smithy.api#documentation": "The current status of the stream session. A stream session can host clients when in\n ACTIVE status.
The current status of the stream session. A stream session can host clients when in ACTIVE status.
A short description of the reason the stream session is in ERROR\n status.
A short description of the reason the stream session is in ERROR status.
The location where Amazon GameLift Streams is hosting the stream session.
\n\n\tA location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, see \n\tthe Regions and quotas section in the Amazon GameLift Streams Developer Guide\n. \n
The location where Amazon GameLift Streams is hosting the stream session.
\n\n\tA location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide. \n
The WebRTC ICE offer string that a client generates to initiate a connection to the\n stream session.
" + "smithy.api#documentation": "The WebRTC ICE offer string that a client generates to initiate a connection to the stream session.
" } }, "SignalResponse": { "target": "com.amazonaws.gameliftstreams#SignalResponse", "traits": { - "smithy.api#documentation": "The WebRTC answer string that the stream server generates in response to the\n SignalRequest.
The WebRTC answer string that the stream server generates in response to the SignalRequest.
The maximum length of time (in seconds) that Amazon GameLift Streams keeps the stream session open. At\n this point, Amazon GameLift Streams ends the stream session regardless of any existing client\n connections.
" + "smithy.api#documentation": "The maximum length of time (in seconds) that Amazon GameLift Streams keeps the stream session open. At this point, Amazon GameLift Streams ends the stream session\n regardless of any existing client connections.
" } }, "SessionLengthSeconds": { @@ -2494,13 +2501,13 @@ "LogFileLocationUri": { "target": "com.amazonaws.gameliftstreams#FileLocationUri", "traits": { - "smithy.api#documentation": "Access location for log files that your content generates during a stream session.\n These log files are uploaded to cloud storage location at the end of a stream session.\n The Amazon GameLift Streams application resource defines which log files to upload.
" + "smithy.api#documentation": "Access location for log files that your content generates during a stream session. These log files are uploaded to cloud storage\n location at the end of a stream session. The Amazon GameLift Streams application resource defines which log files to upload.
" } }, "WebSdkProtocolUrl": { "target": "com.amazonaws.gameliftstreams#WebSdkProtocolUrl", "traits": { - "smithy.api#documentation": "The URL of an S3 bucket that stores Amazon GameLift Streams WebSDK files. The URL is used to establish\n connection with the client.
" + "smithy.api#documentation": "The URL of an S3 bucket that stores Amazon GameLift Streams WebSDK files. The URL is used to establish connection with the client.
" } }, "LastUpdatedAt": { @@ -2518,7 +2525,7 @@ "ApplicationArn": { "target": "com.amazonaws.gameliftstreams#Arn", "traits": { - "smithy.api#documentation": "The application streaming in this session.
\nThis value is an \n Amazon Resource Name (ARN) that uniquely identifies the application resource. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6.\n
The application streaming in this session.
\nThis value is an \n Amazon Resource Name (ARN) that uniquely identifies the application resource. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.\n
Retrieves a list of all Amazon GameLift Streams applications that are associated with the\n Amazon Web Services account in use. This operation returns applications in all statuses, in no\n particular order. You can paginate the results as needed.
", + "smithy.api#documentation": "Retrieves a list of all Amazon GameLift Streams applications that are associated with the Amazon Web Services account in use. This operation returns applications in\n all statuses, in no particular order. You can paginate the results as needed.
", "smithy.api#http": { "code": 200, "method": "GET", @@ -2649,7 +2656,7 @@ "MaxResults": { "target": "com.amazonaws.gameliftstreams#MaxResults", "traits": { - "smithy.api#documentation": "The number of results to return. Use this parameter with NextToken to\n return results in sequential pages. Default value is 25.
The number of results to return. Use this parameter with NextToken to return results in sequential pages. Default value is\n 25.
A collection of Amazon GameLift Streams applications that are associated with the Amazon Web Services account in\n use. Each item includes application metadata and status.
" + "smithy.api#documentation": "A collection of Amazon GameLift Streams applications that are associated with the Amazon Web Services account in use. Each item includes application metadata and\n status.
" } }, "NextToken": { @@ -2704,7 +2711,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to list StreamGroups" }, - "smithy.api#documentation": "Retrieves a list of all Amazon GameLift Streams stream groups that are associated with the\n Amazon Web Services account in use. This operation returns stream groups in all statuses, in no\n particular order. You can paginate the results as needed.
", + "smithy.api#documentation": "Retrieves a list of all Amazon GameLift Streams stream groups that are associated with the Amazon Web Services account in use. This operation returns stream groups in\n all statuses, in no particular order. You can paginate the results as needed.
", "smithy.api#http": { "code": 200, "method": "GET", @@ -2745,7 +2752,7 @@ "MaxResults": { "target": "com.amazonaws.gameliftstreams#MaxResults", "traits": { - "smithy.api#documentation": "The number of results to return. Use this parameter with NextToken to\n return results in sequential pages. Default value is 25.
The number of results to return. Use this parameter with NextToken to return results in sequential pages. Default value is\n 25.
A collection of Amazon GameLift Streams stream groups that are associated with the Amazon Web Services account in\n use. Each item includes stream group metadata and status, but doesn't include capacity\n information.
" + "smithy.api#documentation": "A collection of Amazon GameLift Streams stream groups that are associated with the Amazon Web Services account in use. Each item includes stream group metadata and\n status, but doesn't include capacity information.
" } }, "NextToken": { @@ -2809,7 +2816,7 @@ } } }, - "smithy.api#documentation": "Retrieves a list of Amazon GameLift Streams stream sessions that a stream group is hosting.
\nTo retrieve stream sessions, specify the stream group, and optionally filter by stream\n session status. You can paginate the results as needed.
\nThis operation returns the requested stream sessions in no particular order.
", + "smithy.api#documentation": "Retrieves a list of Amazon GameLift Streams stream sessions that a stream group is hosting.
\nTo retrieve stream sessions, specify the stream group, and optionally filter by stream session status. You can paginate the results as\n needed.
\nThis operation returns the requested stream sessions in no particular order.
", "smithy.api#http": { "code": 200, "method": "GET", @@ -2868,7 +2875,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to list stream sessions" }, - "smithy.api#documentation": "Retrieves a list of Amazon GameLift Streams stream sessions that this user account has access to.
\nIn the returned list of stream sessions, the ExportFilesMetadata property only shows the Status value. To get the OutpurUri and StatusReason values, use GetStreamSession.
We don't recommend using this operation to regularly check stream session statuses because it's costly. Instead, to check status updates\n for a specific stream session, use GetStreamSession.
", + "smithy.api#documentation": "Retrieves a list of Amazon GameLift Streams stream sessions that this user account has access to.
\nIn the returned list of stream sessions, the ExportFilesMetadata property only shows the Status value. To get\n the OutpurUri and StatusReason values, use GetStreamSession.
We don't recommend using this operation to regularly check stream session statuses because it's costly. Instead, to check status updates\n for a specific stream session, use GetStreamSession.
", "smithy.api#http": { "code": 200, "method": "GET", @@ -2902,14 +2909,14 @@ "Status": { "target": "com.amazonaws.gameliftstreams#StreamSessionStatus", "traits": { - "smithy.api#documentation": "Filter by the stream session status. You can specify one status in each request\n to retrieve only sessions that are currently in that status.
", + "smithy.api#documentation": "Filter by the stream session status. You can specify one status in each request to retrieve only sessions that are currently in that\n status.
", "smithy.api#httpQuery": "Status" } }, "ExportFilesStatus": { "target": "com.amazonaws.gameliftstreams#ExportFilesStatus", "traits": { - "smithy.api#documentation": "Filter by the exported files status. You can specify one status in each request\n to retrieve only sessions that currently have that exported files status.
", + "smithy.api#documentation": "Filter by the exported files status. You can specify one status in each request to retrieve only sessions that currently have that\n exported files status.
", "smithy.api#httpQuery": "ExportFilesStatus" } }, @@ -2938,7 +2945,7 @@ "Items": { "target": "com.amazonaws.gameliftstreams#StreamSessionSummaryList", "traits": { - "smithy.api#documentation": "A collection of Amazon GameLift Streams stream sessions that are associated with a stream group and\n returned in response to a list request. Each item includes stream session metadata and\n status.
" + "smithy.api#documentation": "A collection of Amazon GameLift Streams stream sessions that are associated with a stream group and returned in response to a list request. Each item\n includes stream session metadata and status.
" } }, "NextToken": { @@ -2958,14 +2965,14 @@ "Status": { "target": "com.amazonaws.gameliftstreams#StreamSessionStatus", "traits": { - "smithy.api#documentation": "Filter by the stream session status. You can specify one status in each request\n to retrieve only sessions that are currently in that status.
", + "smithy.api#documentation": "Filter by the stream session status. You can specify one status in each request to retrieve only sessions that are currently in that\n status.
", "smithy.api#httpQuery": "Status" } }, "ExportFilesStatus": { "target": "com.amazonaws.gameliftstreams#ExportFilesStatus", "traits": { - "smithy.api#documentation": "Filter by the exported files status. You can specify one status in each request\n to retrieve only sessions that currently have that exported files status.
\n\n\t\tExported files can be in one of the following states:\n\t
\n\n SUCCEEDED: The exported files are successfully stored in S3 bucket.\n\t\t\t\t
\n\n FAILED: The session ended but Amazon GameLift Streams couldn't collect and upload the to S3. \n\t\t\t\t
\n\n PENDING: Either the stream session is still in progress, or uploading the exported files to the S3 bucket is in progress.\n\t\t\t\t
\nFilter by the exported files status. You can specify one status in each request to retrieve only sessions that currently have that\n exported files status.
\n\n\t\tExported files can be in one of the following states:\n\t
\n\n SUCCEEDED: The exported files are successfully stored in S3 bucket.\n\t\t\t\t
\n\n FAILED: The session ended but Amazon GameLift Streams couldn't collect and upload the to S3. \n\t\t\t\t
\n\n PENDING: Either the stream session is still in progress, or uploading the exported files to the S3 bucket is in progress.\n\t\t\t\t
\nThe unique identifier of a Amazon GameLift Streams stream group to retrieve the stream session for.\n You can use either the stream group ID or the Amazon Resource Name (ARN).
", + "smithy.api#documentation": "The unique identifier of a Amazon GameLift Streams stream group to retrieve the stream session for. You can use either the stream group ID or the\n Amazon Resource Name (ARN).
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -3002,7 +3009,7 @@ "Items": { "target": "com.amazonaws.gameliftstreams#StreamSessionSummaryList", "traits": { - "smithy.api#documentation": "A collection of Amazon GameLift Streams stream sessions that are associated with a stream group and\n returned in response to a list request. Each item includes stream session metadata and\n status.
" + "smithy.api#documentation": "A collection of Amazon GameLift Streams stream sessions that are associated with a stream group and returned in response to a list request. Each item\n includes stream session metadata and status.
" } }, "NextToken": { @@ -3048,7 +3055,7 @@ } } }, - "smithy.api#documentation": "Retrieves all tags assigned to a Amazon GameLift Streams resource. To list tags for a resource, specify\n the ARN value for the resource.
\n\n Learn more\n
\n\n Tagging Amazon Web Services Resources in the\n Amazon Web Services General Reference\n
\n\n \n Amazon Web Services Tagging Strategies\n
", + "smithy.api#documentation": "Retrieves all tags assigned to a Amazon GameLift Streams resource. To list tags for a resource, specify the ARN value for the resource.
\n\n Learn more\n
\n\n Tagging Amazon Web Services Resources in the Amazon Web Services General\n Reference\n
\n\n Amazon Web Services Tagging Strategies\n
", "smithy.api#http": { "uri": "/tags/{ResourceArn}", "method": "GET" @@ -3079,7 +3086,7 @@ "ResourceArn": { "target": "com.amazonaws.gameliftstreams#Arn", "traits": { - "smithy.api#documentation": "The (Amazon Resource Name (ARN) that you want to retrieve tags for. To get a Amazon GameLift Streams resource ARN, call a\n List or Get operation for the resource.
", + "smithy.api#documentation": "The (Amazon Resource Name (ARN) that you want to retrieve tags for. To get a\n Amazon GameLift Streams resource ARN, call a List or Get operation for the resource.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -3109,7 +3116,7 @@ "LocationName": { "target": "com.amazonaws.gameliftstreams#LocationName", "traits": { - "smithy.api#documentation": "\n\tA location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, see \n\tthe Regions and quotas section in the Amazon GameLift Streams Developer Guide\n. \n
\n\tA location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide. \n
Configuration settings that define a stream group's stream capacity for a location. When configuring a location for the first time, you must specify a numeric value for at least one of the two capacity types. To update the capacity for an existing stream group, call UpdateStreamGroup. To add a new location and specify its capacity, call AddStreamGroupLocations.
" + "smithy.api#documentation": "Configuration settings that define a stream group's stream capacity for a location. When configuring a location for the first time, you\n must specify a numeric value for at least one of the two capacity types. To update the capacity for an existing stream group, call UpdateStreamGroup. To add a new location and specify its\n capacity, call AddStreamGroupLocations.
" } }, "com.amazonaws.gameliftstreams#LocationConfigurations": { @@ -3171,7 +3178,7 @@ "LocationName": { "target": "com.amazonaws.gameliftstreams#LocationName", "traits": { - "smithy.api#documentation": "\n\tA location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, see \n\tthe Regions and quotas section in the Amazon GameLift Streams Developer Guide\n. \n
\n\tA location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide. \n
\n Removes a set of remote locations from this stream group. Amazon GameLift Streams works to release allocated compute resources in these location. Thus, stream sessions can no longer start from these locations by using this stream group. Amazon GameLift Streams also deletes the content files of all associated applications that were in Amazon GameLift Streams's internal S3 bucket at this location.\n
\n\n You cannot remove the region where you initially created this stream group, known as the primary location. However, you can set the stream capacity to zero. \n
", + "smithy.api#documentation": "Removes a set of remote locations from this stream group. Amazon GameLift Streams works to release allocated compute resources in these location. Thus,\n stream sessions can no longer start from these locations by using this stream group. Amazon GameLift Streams also deletes the content files of all\n associated applications that were in Amazon GameLift Streams's internal S3 bucket at this location.
\nYou cannot remove the region where you initially created this stream group, known as the primary location. However, you can set the stream\n capacity to zero.
", "smithy.api#http": { "code": 204, "method": "DELETE", @@ -3329,7 +3336,7 @@ "Identifier": { "target": "com.amazonaws.gameliftstreams#Identifier", "traits": { - "smithy.api#documentation": "\n A stream group to remove the specified locations from. \n
\n\n This value is a \n\tAmazon Resource Name (ARN) that uniquely identifies the stream group resource. Format example: 1AB2C3De4.\n\n
A stream group to remove the specified locations from.
\n This value is a \n\tAmazon Resource Name (ARN) that uniquely identifies the stream group resource. Format example: sg-1AB2C3De4.\n
\n A set of locations to remove this stream group. \n
\n\n A set of location names. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, see \n\tthe Regions and quotas section in the Amazon GameLift Streams Developer Guide\n.\n\n
A set of locations to remove this stream group.
\n A set of location names. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.\n
\n\tA location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, see \n\tthe Regions and quotas section in the Amazon GameLift Streams Developer Guide\n. \n
\n\tA location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide. \n
Represents the status of the replication of an application to a location. An application cannot be streamed from a location until it has finished replicating there.
" + "smithy.api#documentation": "Represents the status of the replication of an application to a location. An application cannot be streamed from a location until it has\n finished replicating there.
" } }, "com.amazonaws.gameliftstreams#ReplicationStatusType": { @@ -3428,7 +3435,7 @@ } }, "traits": { - "smithy.api#documentation": "Configuration settings that identify the operating system for an application\n resource. This can also include a compatibility layer and other drivers.
\nA runtime environment can be one of the following:
\n\n For Linux applications\n
\n\n Ubuntu 22.04 LTS(Type=UBUNTU, Version=22_04_LTS)\n
\n For Windows applications\n
\nMicrosoft Windows Server 2022 Base (Type=WINDOWS, Version=2022)
Proton 8.0-5 (Type=PROTON, Version=20241007)
Proton 8.0-2c (Type=PROTON, Version=20230704)
Configuration settings that identify the operating system for an application resource. This can also include a compatibility layer and\n other drivers.
\nA runtime environment can be one of the following:
\n\n For Linux applications\n
\n\n Ubuntu 22.04 LTS (Type=UBUNTU, Version=22_04_LTS)\n
\n For Windows applications\n
\nMicrosoft Windows Server 2022 Base (Type=WINDOWS, Version=2022)
Proton 8.0-5 (Type=PROTON, Version=20241007)
Proton 8.0-2c (Type=PROTON, Version=20230704)
The request would cause the resource to exceed an allowed service quota. Resolve the\n issue before you try again.
", + "smithy.api#documentation": "The request would cause the resource to exceed an allowed service quota. Resolve the issue before you try again.
", "smithy.api#error": "client", "smithy.api#httpError": 402 } @@ -3522,6 +3529,9 @@ { "target": "com.amazonaws.gameliftstreams#InternalServerException" }, + { + "target": "com.amazonaws.gameliftstreams#ResourceNotFoundException" + }, { "target": "com.amazonaws.gameliftstreams#ThrottlingException" }, @@ -3539,7 +3549,7 @@ } } }, - "smithy.api#documentation": "This action initiates a new stream session and outputs connection information that clients can use to access the stream. A stream\n session refers to an instance of a stream that Amazon GameLift Streams transmits from the server to the end-user. A stream session runs on a compute\n resource, or stream capacity, that a stream group has allocated.
\nTo start a new stream session, specify a stream group and application ID, along with the transport protocol and signal request settings\n to use with the stream. You must have associated at least one application to the stream group before starting a stream session, either when\n creating the stream group, or by using AssociateApplications.
\n For stream groups that have multiple locations, provide a set of locations ordered by priority by setting Locations.\n Amazon GameLift Streams will start a single stream session in the next available location. An application must be finished replicating in a remote\n location before the remote location can host a stream.
If the request is successful, Amazon GameLift Streams begins to prepare the stream. Amazon GameLift Streams assigns an Amazon Resource Name (ARN) value to the stream\n session resource and sets the status to ACTIVATING. During the stream preparation process, Amazon GameLift Streams queues the request and\n searches for available stream capacity to run the stream. This can result to one of the following:
Amazon GameLift Streams identifies an available compute resource to run the application content and start the stream. When the stream is ready,\n the stream session's status changes to ACTIVE and includes stream connection information. Provide the connection\n information to the requesting client to join the stream session.
Amazon GameLift Streams doesn't identify an available resource within a certain time, set by ClientToken. In this case, Amazon GameLift Streams\n stops processing the request, and the stream session object status changes to ERROR with status reason\n placementTimeout.
This action initiates a new stream session and outputs connection information that clients can use to access the stream. A stream\n session refers to an instance of a stream that Amazon GameLift Streams transmits from the server to the end-user. A stream session runs on a compute\n resource, or stream capacity, that a stream group has allocated.
\nTo start a new stream session, specify a stream group and application ID, along with the transport protocol and signal request settings\n to use with the stream. You must have associated at least one application to the stream group before starting a stream session, either\n when creating the stream group, or by using AssociateApplications.
\n For stream groups that have multiple locations, provide a set of locations ordered by priority by setting Locations.\n Amazon GameLift Streams will start a single stream session in the next available location. An application must be finished replicating in a remote\n location before the remote location can host a stream.
If the request is successful, Amazon GameLift Streams begins to prepare the stream. Amazon GameLift Streams assigns an Amazon Resource Name (ARN) value to the stream\n session resource and sets the status to ACTIVATING. During the stream preparation process, Amazon GameLift Streams queues the request and\n searches for available stream capacity to run the stream. This can result to one of the following:
Amazon GameLift Streams identifies an available compute resource to run the application content and start the stream. When the stream is ready,\n the stream session's status changes to ACTIVE and includes stream connection information. Provide the connection\n information to the requesting client to join the stream session.
Amazon GameLift Streams doesn't identify an available resource within a certain time, set by ClientToken. In this case, Amazon GameLift Streams\n stops processing the request, and the stream session object status changes to ERROR with status reason\n placementTimeout.
The stream group to run this stream session with.
\nThis value is an \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4 or ID-1AB2C3De4.\n
The stream group to run this stream session with.
\nThis value is an \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.\n
A WebRTC ICE offer string to use when initializing a WebRTC connection. The offer is a\n very long JSON string. Provide the string as a text value in quotes.
", + "smithy.api#documentation": "A WebRTC ICE offer string to use when initializing a WebRTC connection. The offer is a very long JSON string. Provide the string as a\n text value in quotes.
", "smithy.api#required": {} } }, "ApplicationIdentifier": { "target": "com.amazonaws.gameliftstreams#Identifier", "traits": { - "smithy.api#documentation": "An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6 or ID-9ZY8X7Wv6.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.\n
A list of locations, in order of priority, where you want Amazon GameLift Streams to start a stream\n from. Amazon GameLift Streams selects the location with the next available capacity to start a single\n stream session in. If this value is empty, Amazon GameLift Streams attempts to start a stream session in\n the primary location.
\n\n This value is A set of location names. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, see \n\tthe Regions and quotas section in the Amazon GameLift Streams Developer Guide\n.\n\n
A list of locations, in order of priority, where you want Amazon GameLift Streams to start a stream from. Amazon GameLift Streams selects the location with the next\n available capacity to start a single stream session in. If this value is empty, Amazon GameLift Streams attempts to start a stream session in the\n primary location.
\n This value is A set of location names. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.\n
Length of time (in seconds) that Amazon GameLift Streams should wait for a client to connect to the\n stream session. This time span starts when the stream session reaches\n ACTIVE status. If no client connects before the timeout, Amazon GameLift Streams stops\n the stream session with status of TERMINATED. Default value is 120.
Length of time (in seconds) that Amazon GameLift Streams should wait for a client to connect to the stream session. This time span starts when the\n stream session reaches ACTIVE status. If no client connects before the timeout, Amazon GameLift Streams stops the stream session with status\n of TERMINATED. Default value is 120.
The maximum length of time (in seconds) that Amazon GameLift Streams keeps the stream session open. At\n this point, Amazon GameLift Streams ends the stream session regardless of any existing client\n connections. Default value is 43200.
" + "smithy.api#documentation": "The maximum length of time (in seconds) that Amazon GameLift Streams keeps the stream session open. At this point, Amazon GameLift Streams ends the stream session\n regardless of any existing client connections. Default value is 43200.
" } }, "AdditionalLaunchArgs": { @@ -3639,19 +3649,19 @@ "Arn": { "target": "com.amazonaws.gameliftstreams#Arn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) assigned to the stream session resource. When combined with the stream group\n ARN, this value uniquely identifies it across all Amazon Web Services Regions. Format is\n arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamsession/[resource\n ID].
The Amazon Resource Name (ARN) assigned to the stream session resource. When combined with the stream group ARN, this value uniquely identifies it across all\n Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamsession/[resource ID].
A human-readable label for the stream session. You can update this value at any\n time.
" + "smithy.api#documentation": "A human-readable label for the stream session. You can update this value at any time.
" } }, "StreamGroupId": { "target": "com.amazonaws.gameliftstreams#Id", "traits": { - "smithy.api#documentation": "The unique identifier for the Amazon GameLift Streams stream group that is hosting the stream\n session.
" + "smithy.api#documentation": "The unique identifier for the Amazon GameLift Streams stream group that is hosting the stream session.
" } }, "UserId": { @@ -3663,13 +3673,13 @@ "Status": { "target": "com.amazonaws.gameliftstreams#StreamSessionStatus", "traits": { - "smithy.api#documentation": "The current status of the stream session. A stream session can host clients when in\n ACTIVE status.
The current status of the stream session. A stream session can host clients when in ACTIVE status.
A short description of the reason the stream session is in ERROR\n status.
A short description of the reason the stream session is in ERROR status.
\n The location where Amazon GameLift Streams is streaming your application from.\n
\n\n\tA location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, see \n\tthe Regions and quotas section in the Amazon GameLift Streams Developer Guide\n. \n
The location where Amazon GameLift Streams is streaming your application from.
\n\n\tA location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide. \n
The WebRTC ICE offer string that a client generates to initiate a connection to the\n stream session.
" + "smithy.api#documentation": "The WebRTC ICE offer string that a client generates to initiate a connection to the stream session.
" } }, "SignalResponse": { "target": "com.amazonaws.gameliftstreams#SignalResponse", "traits": { - "smithy.api#documentation": "The WebRTC answer string that the stream server generates in response to the\n SignalRequest.
The WebRTC answer string that the stream server generates in response to the SignalRequest.
The maximum length of time (in seconds) that Amazon GameLift Streams keeps the stream session open. At\n this point, Amazon GameLift Streams ends the stream session regardless of any existing client\n connections.
" + "smithy.api#documentation": "The maximum length of time (in seconds) that Amazon GameLift Streams keeps the stream session open. At this point, Amazon GameLift Streams ends the stream session\n regardless of any existing client connections.
" } }, "SessionLengthSeconds": { @@ -3723,13 +3733,13 @@ "LogFileLocationUri": { "target": "com.amazonaws.gameliftstreams#FileLocationUri", "traits": { - "smithy.api#documentation": "Access location for log files that your content generates during a stream session.\n These log files are uploaded to cloud storage location at the end of a stream session.\n The Amazon GameLift Streams application resource defines which log files to upload.
" + "smithy.api#documentation": "Access location for log files that your content generates during a stream session. These log files are uploaded to cloud storage\n location at the end of a stream session. The Amazon GameLift Streams application resource defines which log files to upload.
" } }, "WebSdkProtocolUrl": { "target": "com.amazonaws.gameliftstreams#WebSdkProtocolUrl", "traits": { - "smithy.api#documentation": "The URL of an S3 bucket that stores Amazon GameLift Streams WebSDK files. The URL is used to establish\n connection with the client.
" + "smithy.api#documentation": "The URL of an S3 bucket that stores Amazon GameLift Streams WebSDK files. The URL is used to establish connection with the client.
" } }, "LastUpdatedAt": { @@ -3747,7 +3757,7 @@ "ApplicationArn": { "target": "com.amazonaws.gameliftstreams#Arn", "traits": { - "smithy.api#documentation": "An \n Amazon Resource Name (ARN) that uniquely identifies the application resource. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6.\n
An \n Amazon Resource Name (ARN) that uniquely identifies the application resource. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4 or ID-1AB2C3De4.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4 or ID-1AB2C3De4.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.\n
Object that identifies the Amazon GameLift Streams application to stream with this stream\n group.
" + "smithy.api#documentation": "Object that identifies the Amazon GameLift Streams application to stream with this stream group.
" } }, "StreamClass": { "target": "com.amazonaws.gameliftstreams#StreamClass", "traits": { - "smithy.api#documentation": "The target stream quality for the stream group.
\nA stream class can be one of the following:
\n\n \n gen5n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with most Unreal Engine 5.x builds, 32-bit applications, and anti-cheat technology. Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen5n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 12 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen5n_ultra (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity.\n Uses dedicated NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with most Unreal Engine 5.2 and 5.3 builds, 32-bit applications, and anti-cheat technology. Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 8 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen4n_ultra (NVIDIA, ultra) Supports applications with high 3D scene complexity.\n Uses dedicated NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\nThe target stream quality for the stream group.
\nA stream class can be one of the following:
\n\n \n gen5n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with Unreal Engine versions up through 5.4, 32 and 64-bit applications, and anti-cheat technology. Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen5n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 12 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen5n_ultra (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity.\n Uses dedicated NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with Unreal Engine versions up through 5.4, 32 and 64-bit applications, and anti-cheat technology. Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 8 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen4n_ultra (NVIDIA, ultra) Supports applications with high 3D scene complexity.\n Uses dedicated NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\nDescribes a Amazon GameLift Streams stream group resource for hosting content streams. To retrieve\n additional stream group details, call GetStreamGroup.
" + "smithy.api#documentation": "Describes a Amazon GameLift Streams stream group resource for hosting content streams. To retrieve additional stream group details, call GetStreamGroup.
" } }, "com.amazonaws.gameliftstreams#StreamGroupSummaryList": { @@ -4128,7 +4138,7 @@ "Status": { "target": "com.amazonaws.gameliftstreams#StreamSessionStatus", "traits": { - "smithy.api#documentation": "The current status of the stream session resource. Possible statuses include the\n following:
\n\n ACTIVATING: The stream session is starting and preparing to\n stream.
\n ACTIVE: The stream session is ready to accept client\n connections.
\n CONNECTED: The stream session has a connected client.
\n PENDING_CLIENT_RECONNECTION: A client has recently disconnected,\n and the stream session is waiting for the client to reconnect. After a short\n time, if the client doesn't reconnect, the stream session status transitions to\n TERMINATED.
\n TERMINATING: The stream session is ending.
\n TERMINATED: The stream session has ended.
\n ERROR: The stream session failed to activate.
The current status of the stream session resource. Possible statuses include the following:
\n\n ACTIVATING: The stream session is starting and preparing to stream.
\n ACTIVE: The stream session is ready to accept client connections.
\n CONNECTED: The stream session has a connected client.
\n PENDING_CLIENT_RECONNECTION: A client has recently disconnected, and the stream session is waiting for the client\n to reconnect. After a short time, if the client doesn't reconnect, the stream session status transitions to\n TERMINATED.
\n TERMINATING: The stream session is ending.
\n TERMINATED: The stream session has ended.
\n ERROR: The stream session failed to activate.
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6 or ID-9ZY8X7Wv6.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.\n
The location where Amazon GameLift Streams is hosting the stream session.
\n\n\tA location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, see \n\tthe Regions and quotas section in the Amazon GameLift Streams Developer Guide\n. \n
The location where Amazon GameLift Streams is hosting the stream session.
\n\n\tA location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide. \n
Describes a Amazon GameLift Streams stream session. To retrieve additional details for the stream\n session, call GetStreamSession.
" + "smithy.api#documentation": "Describes a Amazon GameLift Streams stream session. To retrieve additional details for the stream session, call GetStreamSession.
" } }, "com.amazonaws.gameliftstreams#StreamSessionSummaryList": { @@ -4235,7 +4245,7 @@ } } }, - "smithy.api#documentation": "Assigns one or more tags to a Amazon GameLift Streams resource. Use tags to organize Amazon Web Services resources for\n a range of purposes. You can assign tags to the following Amazon GameLift Streams resource types:
\nApplication
\nStreamGroup
\n\n Learn more\n
\n\n Tagging Amazon Web Services Resources in the\n Amazon Web Services General Reference\n
\n\n \n Amazon Web Services Tagging Strategies\n
", + "smithy.api#documentation": "Assigns one or more tags to a Amazon GameLift Streams resource. Use tags to organize Amazon Web Services resources for a range of purposes. You can assign tags to\n the following Amazon GameLift Streams resource types:
\nApplication
\nStreamGroup
\n\n Learn more\n
\n\n Tagging Amazon Web Services Resources in the Amazon Web Services General\n Reference\n
\n\n Amazon Web Services Tagging Strategies\n
", "smithy.api#http": { "uri": "/tags/{ResourceArn}", "method": "POST" @@ -4249,7 +4259,7 @@ "ResourceArn": { "target": "com.amazonaws.gameliftstreams#Arn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon GameLift Streams resource that you want to apply tags to.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon GameLift Streams resource that you want to apply\n tags to.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4257,7 +4267,7 @@ "Tags": { "target": "com.amazonaws.gameliftstreams#Tags", "traits": { - "smithy.api#documentation": "A list of tags, in the form of key-value pairs, to assign to the specified Amazon GameLift Streams\n resource.
", + "smithy.api#documentation": "A list of tags, in the form of key-value pairs, to assign to the specified Amazon GameLift Streams resource.
", "smithy.api#required": {} } } @@ -4333,7 +4343,7 @@ } } }, - "smithy.api#documentation": "Permanently terminates an active stream session. When called, the stream session\n status changes to TERMINATING. You can terminate a stream session in any\n status except ACTIVATING. If the stream session is in\n ACTIVATING status, an exception is thrown.
Permanently terminates an active stream session. When called, the stream session status changes to TERMINATING. You can\n terminate a stream session in any status except ACTIVATING. If the stream session is in ACTIVATING status, an\n exception is thrown.
\n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4 or ID-1AB2C3De4.\n
The stream group that runs this stream session.
", + "smithy.api#documentation": "\n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.\n
The stream group that runs this stream session.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4378,7 +4388,7 @@ } }, "traits": { - "smithy.api#documentation": "The request was denied due to request throttling. Retry the request after the\n suggested wait time.
", + "smithy.api#documentation": "The request was denied due to request throttling. Retry the request after the suggested wait time.
", "smithy.api#error": "client", "smithy.api#httpError": 429, "smithy.api#retryable": { @@ -4421,7 +4431,7 @@ } } }, - "smithy.api#documentation": "Removes one or more tags from a Amazon GameLift Streams resource. To remove tags, specify the Amazon GameLift Streams resource and a list of \n one or more tags to remove.
", + "smithy.api#documentation": "Removes one or more tags from a Amazon GameLift Streams resource. To remove tags, specify the Amazon GameLift Streams resource and a list of one or more tags to\n remove.
", "smithy.api#http": { "uri": "/tags/{ResourceArn}", "method": "DELETE" @@ -4435,7 +4445,7 @@ "ResourceArn": { "target": "com.amazonaws.gameliftstreams#Arn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon GameLift Streams resource that you want to remove tags from.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon GameLift Streams resource that you want to remove\n tags from.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4504,7 +4514,7 @@ "Identifier": { "target": "com.amazonaws.gameliftstreams#Identifier", "traits": { - "smithy.api#documentation": "An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6 or ID-9ZY8X7Wv6.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.\n
Locations of log files that your content generates during a stream session. Enter path\n values that are relative to the ApplicationSourceUri location.\n You can specify up to 10 log locations.\n Amazon GameLift Streams uploads designated log files to the Amazon S3 bucket that you specify in ApplicationLogOutputUri \n at the end of a stream session. To retrieve stored log files, call GetStreamSession \n and get the LogFileLocationUri.
Locations of log files that your content generates during a stream session. Enter path\n values that are relative to the ApplicationSourceUri location.\n You can specify up to 10 log paths.\n Amazon GameLift Streams uploads designated log files to the Amazon S3 bucket that you specify in ApplicationLogOutputUri \n at the end of a stream session. To retrieve stored log files, call GetStreamSession \n and get the LogFileLocationUri.
An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Use the following format for the URI: s3://[bucket name]/[prefix]. \n Required if you specify one or more LogPaths.
The log bucket must have permissions that give Amazon GameLift Streams access to write the log files. For more information, see Getting Started in the Amazon GameLift Streams Developer Guide.
\nAn Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Required if you specify one or more ApplicationLogPaths.
The log bucket must have permissions that give Amazon GameLift Streams access to write the log files. For more information, see Getting Started in the Amazon GameLift Streams Developer Guide.
\nAn Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across\n all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS\n account]:application/[resource ID].
An Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is\n arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].
\n A set of configuration settings to run the application on a stream group. This configures the operating system, and can include compatibility layers and other drivers.\n
\nA runtime environment can be one of the following:
\n\n For Linux applications\n
\n\n Ubuntu 22.04 LTS(Type=UBUNTU, Version=22_04_LTS)\n
\n For Windows applications\n
\nMicrosoft Windows Server 2022 Base (Type=WINDOWS, Version=2022)
Proton 8.0-5 (Type=PROTON, Version=20241007)
Proton 8.0-2c (Type=PROTON, Version=20230704)
Configuration settings that identify the operating system for an application resource. This can also include a compatibility layer and\n other drivers.
\nA runtime environment can be one of the following:
\n\n For Linux applications\n
\n\n Ubuntu 22.04 LTS (Type=UBUNTU, Version=22_04_LTS)\n
\n For Windows applications\n
\nMicrosoft Windows Server 2022 Base (Type=WINDOWS, Version=2022)
Proton 8.0-5 (Type=PROTON, Version=20241007)
Proton 8.0-2c (Type=PROTON, Version=20230704)
The path and file name of the executable file that launches the content for\n streaming.
" + "smithy.api#documentation": "The path and file name of the executable file that launches the content for streaming.
" } }, "ApplicationLogPaths": { "target": "com.amazonaws.gameliftstreams#FilePaths", "traits": { - "smithy.api#documentation": "Locations of log files that your content generates during a stream session. \n Amazon GameLift Streams uploads log files to the Amazon S3 bucket that you specify in ApplicationLogOutputUri \n at the end of a stream session. To retrieve stored log files, call GetStreamSession \n and get the LogFileLocationUri.
Locations of log files that your content generates during a stream session. \n Amazon GameLift Streams uploads log files to the Amazon S3 bucket that you specify in ApplicationLogOutputUri \n at the end of a stream session. To retrieve stored log files, call GetStreamSession \n and get the LogFileLocationUri.
An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Use the following format for the URI: s3://[bucket name]/[prefix]. \n Required if you specify one or more LogPaths.
An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Required if you specify one or more ApplicationLogPaths.
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6 or ID-9ZY8X7Wv6.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.\n
The current status of the application resource. Possible statuses include the\n following:
\n\n INITIALIZED: Amazon GameLift Streams has received the request and is initiating the\n work flow to create an application.
\n PROCESSING: The create application work flow is in process. Amazon GameLift Streams\n is copying the content and caching for future deployment in a stream\n group.
\n READY: The application is ready to deploy in a stream group.
\n ERROR: An error occurred when setting up the application. See\n StatusReason for more information.
\n DELETING: Amazon GameLift Streams is in the process of deleting the\n application.
The current status of the application resource. Possible statuses include the following:
\n\n INITIALIZED: Amazon GameLift Streams has received the request and is initiating the work flow to create an application.
\n PROCESSING: The create application work flow is in process. Amazon GameLift Streams is copying the content and caching for future\n deployment in a stream group.
\n READY: The application is ready to deploy in a stream group.
\n ERROR: An error occurred when setting up the application. See StatusReason for more information.
\n DELETING: Amazon GameLift Streams is in the process of deleting the application.
A short description of the status reason when the application is in ERROR\n status.
A short description of the status reason when the application is in ERROR status.
\n A set of stream groups that this application is associated with. You can use any of these stream groups to stream your application.\n
\nThis value is a \n set of Amazon Resource Names (ARNs) that uniquely identify stream group resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4.\n
A set of stream groups that this application is associated with. You can use any of these stream groups to stream your application.
\nThis value is a \n set of Amazon Resource Names (ARNs) that uniquely identify stream group resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4.\n
\n Updates the configuration settings for an Amazon GameLift Streams stream group resource. You\n can change the description, the set of locations, and the requested capacity of a stream group per location. If you want to change the stream class, create a new stream group.\n
\n\n Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. There are two types of capacity: always-on and on-demand:\n
\n\n Always-on: \n The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.\n\n
\n\n On-demand: \n The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).\n\n
\nTo update a stream group, specify the stream group's Amazon Resource Name (ARN) and provide the new values. If\n the request is successful, Amazon GameLift Streams returns the complete updated metadata for the stream\n group.
", + "smithy.api#documentation": "Updates the configuration settings for an Amazon GameLift Streams stream group resource. You can change the description, the set of locations, and\n the requested capacity of a stream group per location. If you want to change the stream class, create a new stream group.
\n\n Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. There are two types of capacity: always-on and on-demand:\n
\n\n Always-on: \n The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.\n\n
\n\n On-demand: \n The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).\n\n
\nTo update a stream group, specify the stream group's Amazon Resource Name (ARN) and provide the new values. If the request is successful, Amazon GameLift Streams returns the\n complete updated metadata for the stream group.
", "smithy.api#http": { "code": 200, "method": "PATCH", @@ -4683,7 +4693,7 @@ "Identifier": { "target": "com.amazonaws.gameliftstreams#Identifier", "traits": { - "smithy.api#documentation": "An \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/1AB2C3De4 or ID-1AB2C3De4.\n
An \n Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.\n
An Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies\n the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS\n account]:streamgroup/[resource ID].
An Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is\n arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].
The Amazon GameLift Streams application that is associated with this stream group.
" + "smithy.api#documentation": "The default Amazon GameLift Streams application that is associated with this stream group.
" } }, "LocationStates": { @@ -4742,13 +4752,13 @@ "target": "com.amazonaws.gameliftstreams#StreamClass", "traits": { "aws.cloudformation#cfnMutability": "create-and-read", - "smithy.api#documentation": "The target stream quality for the stream group.
\nA stream class can be one of the following:
\n\n \n gen5n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with most Unreal Engine 5.x builds, 32-bit applications, and anti-cheat technology. Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen5n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 12 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen5n_ultra (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity.\n Uses dedicated NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with most Unreal Engine 5.2 and 5.3 builds, 32-bit applications, and anti-cheat technology. Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 8 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen4n_ultra (NVIDIA, ultra) Supports applications with high 3D scene complexity.\n Uses dedicated NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\nThe target stream quality for the stream group.
\nA stream class can be one of the following:
\n\n \n gen5n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with Unreal Engine versions up through 5.4, 32 and 64-bit applications, and anti-cheat technology. Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen5n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 12 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen5n_ultra (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity.\n Uses dedicated NVIDIA A10G Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 24 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_win2022 (NVIDIA, ultra) Supports applications with extremely high 3D scene complexity. Runs applications on Microsoft Windows Server 2022 Base and supports DirectX 12. Compatible with Unreal Engine versions up through 5.4, 32 and 64-bit applications, and anti-cheat technology. Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\n\n \n gen4n_high (NVIDIA, high) Supports applications with moderate to high 3D scene complexity.\n Uses NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 4 vCPUs, 16 GB RAM, 8 GB VRAM
\nTenancy: Supports up to 2 concurrent stream sessions
\n\n \n gen4n_ultra (NVIDIA, ultra) Supports applications with high 3D scene complexity.\n Uses dedicated NVIDIA T4 Tensor GPU.
Reference resolution: 1080p
\nReference frame rate: 60 fps
\nWorkload specifications: 8 vCPUs, 32 GB RAM, 16 GB VRAM
\nTenancy: Supports 1 concurrent stream session
\nA unique ID value that is assigned to the resource when it's created. Format example:\n 1AB2C3De4.
A unique ID value that is assigned to the resource when it's created. Format example: sg-1AB2C3De4.
\n A short description of the reason that the stream group is in ERROR status. The possible reasons can be one of the following:\n
\n internalError: The request can't process right now bcause of an issue with the server. Try again later. Reach out to the Amazon GameLift Streams team for more help.\n
\n noAvailableInstances: Amazon GameLift Streams does not currently have enough available On-Demand capacity to fulfill your request. Wait a few minutes and retry the request as capacity can shift frequently. You can also try to make the request using a different stream class or in another region.\n
A short description of the reason that the stream group is in ERROR status. The possible reasons can be one of the\n following:
\n internalError: The request can't process right now bcause of an issue with the server. Try again later. Reach out to\n the Amazon GameLift Streams team for more help.
\n noAvailableInstances: Amazon GameLift Streams does not currently have enough available On-Demand capacity to fulfill your request.\n Wait a few minutes and retry the request as capacity can shift frequently. You can also try to make the request using a different\n stream class or in another region.
\n A set of applications that this stream group is associated with. You can stream any of these applications with the stream group.\n
\nThis value is a \n set of Amazon Resource Names (ARNs) that uniquely identify application resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/9ZY8X7Wv6.\n
A set of applications that this stream group is associated with. You can stream any of these applications with the stream group.
\nThis value is a \n set of Amazon Resource Names (ARNs) that uniquely identify application resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.\n
One or more parameter values in the request fail to satisfy the specified constraints. \n Correct the invalid parameter values before retrying the request.
", + "smithy.api#documentation": "One or more parameter values in the request fail to satisfy the specified constraints. Correct the invalid parameter values before\n retrying the request.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } diff --git a/codegen/sdk/aws-models/glue.json b/codegen/sdk/aws-models/glue.json index 4958fff0105..f8d2ca2d879 100644 --- a/codegen/sdk/aws-models/glue.json +++ b/codegen/sdk/aws-models/glue.json @@ -7449,6 +7449,12 @@ "type": "list", "member": { "target": "com.amazonaws.glue#Condition" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 500 + } } }, "com.amazonaws.glue#ConfigValueString": { @@ -8141,6 +8147,27 @@ } } }, + "com.amazonaws.glue#ConnectionString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 255 + } + } + }, + "com.amazonaws.glue#ConnectionStringList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#ConnectionString" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1000 + } + } + }, "com.amazonaws.glue#ConnectionType": { "type": "enum", "members": { @@ -8356,7 +8383,7 @@ "type": "structure", "members": { "Connections": { - "target": "com.amazonaws.glue#OrchestrationStringList", + "target": "com.amazonaws.glue#ConnectionStringList", "traits": { "smithy.api#documentation": "A list of connections used by the job.
" } @@ -12205,7 +12232,7 @@ } }, "Description": { - "target": "com.amazonaws.glue#GenericString", + "target": "com.amazonaws.glue#WorkflowDescriptionString", "traits": { "smithy.api#documentation": "A description of the workflow.
" } @@ -25639,11 +25666,18 @@ "smithy.api#documentation": "The number of files removed by the compaction job run.
" } }, + "DpuHours": { + "target": "com.amazonaws.glue#dpuHours", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "The number of DPU hours consumed by the job.
" + } + }, "NumberOfDpus": { "target": "com.amazonaws.glue#dpuCounts", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The number of DPU hours consumed by the job.
" + "smithy.api#documentation": "The number of DPUs consumed by the job, rounded up to the nearest whole number.
" } }, "JobDurationInHour": { @@ -25709,11 +25743,18 @@ "smithy.api#documentation": "The number of orphan files deleted by the orphan file deletion job run.
" } }, + "DpuHours": { + "target": "com.amazonaws.glue#dpuHours", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "The number of DPU hours consumed by the job.
" + } + }, "NumberOfDpus": { "target": "com.amazonaws.glue#dpuCounts", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The number of DPU hours consumed by the job.
" + "smithy.api#documentation": "The number of DPUs consumed by the job, rounded up to the nearest whole number.
" } }, "JobDurationInHour": { @@ -25778,11 +25819,18 @@ "smithy.api#documentation": "The number of manifest lists deleted by the retention job run.
" } }, + "DpuHours": { + "target": "com.amazonaws.glue#dpuHours", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "The number of DPU hours consumed by the job.
" + } + }, "NumberOfDpus": { "target": "com.amazonaws.glue#dpuCounts", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The number of DPU hours consumed by the job.
" + "smithy.api#documentation": "The number of DPUs consumed by the job, rounded up to the nearest whole number.
" } }, "JobDurationInHour": { @@ -34975,7 +35023,7 @@ "NumberOfDpus": { "target": "com.amazonaws.glue#MessageString", "traits": { - "smithy.api#documentation": "The number of DPU hours consumed by the job.
" + "smithy.api#documentation": "The number of DPUs consumed by the job, rounded up to the nearest whole number.
" } }, "JobDurationInHour": { @@ -44398,7 +44446,7 @@ } }, "Description": { - "target": "com.amazonaws.glue#GenericString", + "target": "com.amazonaws.glue#WorkflowDescriptionString", "traits": { "smithy.api#documentation": "The description of the workflow.
" } @@ -45153,6 +45201,15 @@ "smithy.api#documentation": "A workflow is a collection of multiple dependent Glue \n jobs and crawlers that are run to complete a complex ETL task. A\n workflow manages the execution and monitoring of all its jobs and crawlers.
" } }, + "com.amazonaws.glue#WorkflowDescriptionString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 120000 + } + } + }, "com.amazonaws.glue#WorkflowGraph": { "type": "structure", "members": { @@ -45458,6 +45515,12 @@ "smithy.api#default": 0 } }, + "com.amazonaws.glue#dpuHours": { + "type": "double", + "traits": { + "smithy.api#default": 0 + } + }, "com.amazonaws.glue#glueConnectionNameString": { "type": "string", "traits": { diff --git a/codegen/sdk/aws-models/groundstation.json b/codegen/sdk/aws-models/groundstation.json index fe43bbc8705..a629252c52f 100644 --- a/codegen/sdk/aws-models/groundstation.json +++ b/codegen/sdk/aws-models/groundstation.json @@ -94,26 +94,32 @@ } }, "com.amazonaws.groundstation#AgentStatus": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "SUCCESS", - "name": "SUCCESS" - }, - { - "value": "FAILED", - "name": "FAILED" - }, - { - "value": "ACTIVE", - "name": "ACTIVE" - }, - { - "value": "INACTIVE", - "name": "INACTIVE" + "type": "enum", + "members": { + "SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCESS" } - ] + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "INACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INACTIVE" + } + } } }, "com.amazonaws.groundstation#AggregateStatus": { @@ -138,18 +144,20 @@ } }, "com.amazonaws.groundstation#AngleUnits": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "DEGREE_ANGLE", - "name": "DEGREE_ANGLE" - }, - { - "value": "RADIAN", - "name": "RADIAN" + "type": "enum", + "members": { + "DEGREE_ANGLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEGREE_ANGLE" } - ] + }, + "RADIAN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RADIAN" + } + } } }, "com.amazonaws.groundstation#AntennaDemodDecodeDetails": { @@ -249,18 +257,20 @@ } }, "com.amazonaws.groundstation#AuditResults": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "HEALTHY", - "name": "HEALTHY" - }, - { - "value": "UNHEALTHY", - "name": "UNHEALTHY" + "type": "enum", + "members": { + "HEALTHY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HEALTHY" } - ] + }, + "UNHEALTHY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNHEALTHY" + } + } } }, "com.amazonaws.groundstation#AwsGroundStationAgentEndpoint": { @@ -305,22 +315,26 @@ } }, "com.amazonaws.groundstation#BandwidthUnits": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "GHz", - "name": "GHZ" - }, - { - "value": "MHz", - "name": "MHZ" - }, - { - "value": "kHz", - "name": "KHZ" + "type": "enum", + "members": { + "GHZ": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GHz" } - ] + }, + "MHZ": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MHz" + } + }, + "KHZ": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "kHz" + } + } } }, "com.amazonaws.groundstation#BucketArn": { @@ -398,53 +412,67 @@ } }, "com.amazonaws.groundstation#CapabilityHealth": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "UNHEALTHY", - "name": "UNHEALTHY" - }, - { - "value": "HEALTHY", - "name": "HEALTHY" + "type": "enum", + "members": { + "HEALTHY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HEALTHY" } - ] + }, + "UNHEALTHY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNHEALTHY" + } + } } }, "com.amazonaws.groundstation#CapabilityHealthReason": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "NO_REGISTERED_AGENT", - "name": "NO_REGISTERED_AGENT" - }, - { - "value": "INVALID_IP_OWNERSHIP", - "name": "INVALID_IP_OWNERSHIP" - }, - { - "value": "NOT_AUTHORIZED_TO_CREATE_SLR", - "name": "NOT_AUTHORIZED_TO_CREATE_SLR" - }, - { - "value": "UNVERIFIED_IP_OWNERSHIP", - "name": "UNVERIFIED_IP_OWNERSHIP" - }, - { - "value": "INITIALIZING_DATAPLANE", - "name": "INITIALIZING_DATAPLANE" - }, - { - "value": "DATAPLANE_FAILURE", - "name": "DATAPLANE_FAILURE" - }, - { - "value": "HEALTHY", - "name": "HEALTHY" + "type": "enum", + "members": { + "NO_REGISTERED_AGENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NO_REGISTERED_AGENT" } - ] + }, + "INVALID_IP_OWNERSHIP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_IP_OWNERSHIP" + } + }, + "NOT_AUTHORIZED_TO_CREATE_SLR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_AUTHORIZED_TO_CREATE_SLR" + } + }, + "UNVERIFIED_IP_OWNERSHIP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNVERIFIED_IP_OWNERSHIP" + } + }, + "INITIALIZING_DATAPLANE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INITIALIZING_DATAPLANE" + } + }, + "DATAPLANE_FAILURE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATAPLANE_FAILURE" + } + }, + "HEALTHY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HEALTHY" + } + } } }, "com.amazonaws.groundstation#CapabilityHealthReasonList": { @@ -486,21 +514,18 @@ "bytesSent": { "target": "smithy.api#Long", "traits": { - "smithy.api#default": null, "smithy.api#documentation": "Bytes sent by the component.
" } }, "bytesReceived": { "target": "smithy.api#Long", "traits": { - "smithy.api#default": null, "smithy.api#documentation": "Bytes received by the component.
" } }, "packetsDropped": { "target": "smithy.api#Long", "traits": { - "smithy.api#default": null, "smithy.api#documentation": "Packets dropped by component.
" } }, @@ -606,42 +631,59 @@ "type": "AWS::GroundStation::Config", "service": "GroundStation", "resource": "Config" - } + }, + "smithy.api#length": { + "min": 82, + "max": 424 + }, + "smithy.api#pattern": "^arn:aws:groundstation:[-a-z0-9]{1,50}:[0-9]{12}:config/[a-z0-9]+(-[a-z0-9]+){0,4}/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(/.{1,256})?$" } }, "com.amazonaws.groundstation#ConfigCapabilityType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "antenna-downlink", - "name": "ANTENNA_DOWNLINK" - }, - { - "value": "antenna-downlink-demod-decode", - "name": "ANTENNA_DOWNLINK_DEMOD_DECODE" - }, - { - "value": "antenna-uplink", - "name": "ANTENNA_UPLINK" - }, - { - "value": "dataflow-endpoint", - "name": "DATAFLOW_ENDPOINT" - }, - { - "value": "tracking", - "name": "TRACKING" - }, - { - "value": "uplink-echo", - "name": "UPLINK_ECHO" - }, - { - "value": "s3-recording", - "name": "S3_RECORDING" + "type": "enum", + "members": { + "ANTENNA_DOWNLINK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "antenna-downlink" } - ] + }, + "ANTENNA_DOWNLINK_DEMOD_DECODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "antenna-downlink-demod-decode" + } + }, + "TRACKING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "tracking" + } + }, + "DATAFLOW_ENDPOINT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "dataflow-endpoint" + } + }, + "ANTENNA_UPLINK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "antenna-uplink" + } + }, + "UPLINK_ECHO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "uplink-echo" + } + }, + "S3_RECORDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "s3-recording" + } + } } }, "com.amazonaws.groundstation#ConfigDetails": { @@ -951,62 +993,86 @@ } }, "com.amazonaws.groundstation#ContactStatus": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "AVAILABLE", - "name": "AVAILABLE" - }, - { - "value": "AWS_CANCELLED", - "name": "AWS_CANCELLED" - }, - { - "value": "AWS_FAILED", - "name": "AWS_FAILED" - }, - { - "value": "CANCELLED", - "name": "CANCELLED" - }, - { - "value": "CANCELLING", - "name": "CANCELLING" - }, - { - "value": "COMPLETED", - "name": "COMPLETED" - }, - { - "value": "FAILED", - "name": "FAILED" - }, - { - "value": "FAILED_TO_SCHEDULE", - "name": "FAILED_TO_SCHEDULE" - }, - { - "value": "PASS", - "name": "PASS" - }, - { - "value": "POSTPASS", - "name": "POSTPASS" - }, - { - "value": "PREPASS", - "name": "PREPASS" - }, - { - "value": "SCHEDULED", - "name": "SCHEDULED" - }, - { - "value": "SCHEDULING", - "name": "SCHEDULING" + "type": "enum", + "members": { + "SCHEDULING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SCHEDULING" } - ] + }, + "FAILED_TO_SCHEDULE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED_TO_SCHEDULE" + } + }, + "SCHEDULED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SCHEDULED" + } + }, + "CANCELLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CANCELLED" + } + }, + "AWS_CANCELLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_CANCELLED" + } + }, + "PREPASS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PREPASS" + } + }, + "PASS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PASS" + } + }, + "POSTPASS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "POSTPASS" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "AVAILABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AVAILABLE" + } + }, + "CANCELLING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CANCELLING" + } + }, + "AWS_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_FAILED" + } + } } }, "com.amazonaws.groundstation#CreateConfig": { @@ -1100,7 +1166,7 @@ "endpointDetails": { "target": "com.amazonaws.groundstation#EndpointDetailsList", "traits": { - "smithy.api#documentation": "Endpoint details of each endpoint in the dataflow endpoint group.
", + "smithy.api#documentation": "Endpoint details of each endpoint in the dataflow endpoint group.\n\n All dataflow endpoints within a single dataflow endpoint group must be of the same type.\n You cannot mix \n AWS Ground Station Agent endpoints with\n Dataflow endpoints in the same group.\n If your use case requires both types of endpoints, you must create separate dataflow endpoint\n groups for each type.\n
", "smithy.api#required": {} } }, @@ -1167,7 +1233,6 @@ "enabled": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#default": null, "smithy.api#documentation": "Whether to set the ephemeris status to ENABLED after validation.
Setting this to false will set the ephemeris status to DISABLED after validation.
Whether or not the ephemeris is enabled.
" } }, @@ -2141,14 +2214,14 @@ } }, "com.amazonaws.groundstation#EirpUnits": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "dBW", - "name": "DBW" + "type": "enum", + "members": { + "DBW": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "dBW" } - ] + } } }, "com.amazonaws.groundstation#Elevation": { @@ -2224,30 +2297,38 @@ } }, "com.amazonaws.groundstation#EndpointStatus": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "created", - "name": "created" - }, - { - "value": "creating", - "name": "creating" - }, - { - "value": "deleted", - "name": "deleted" - }, - { - "value": "deleting", - "name": "deleting" - }, - { - "value": "failed", - "name": "failed" + "type": "enum", + "members": { + "created": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "created" } - ] + }, + "creating": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "creating" + } + }, + "deleted": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "deleted" + } + }, + "deleting": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "deleting" + } + }, + "failed": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "failed" + } + } } }, "com.amazonaws.groundstation#EphemeridesList": { @@ -2336,35 +2417,43 @@ } }, "com.amazonaws.groundstation#EphemerisInvalidReason": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "METADATA_INVALID", - "name": "METADATA_INVALID", - "documentation": "Provided spacecraft identifiers such as spacecraft NORAD Id are invalid" - }, - { - "value": "TIME_RANGE_INVALID", - "name": "TIME_RANGE_INVALID", - "documentation": "Start, end, or expiration time(s) are invalid for the provided ephemeris" - }, - { - "value": "TRAJECTORY_INVALID", - "name": "TRAJECTORY_INVALID", - "documentation": "Provided ephemeris defines invalid spacecraft trajectory" - }, - { - "value": "KMS_KEY_INVALID", - "name": "KMS_KEY_INVALID", - "documentation": "Provided KMS key is invalid" - }, - { - "value": "VALIDATION_ERROR", - "name": "VALIDATION_ERROR", - "documentation": "Internal Service Error occurred while processing ephemeris" + "type": "enum", + "members": { + "METADATA_INVALID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#documentation": "Provided spacecraft identifiers such as spacecraft NORAD Id are invalid", + "smithy.api#enumValue": "METADATA_INVALID" } - ] + }, + "TIME_RANGE_INVALID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#documentation": "Start, end, or expiration time(s) are invalid for the provided ephemeris", + "smithy.api#enumValue": "TIME_RANGE_INVALID" + } + }, + "TRAJECTORY_INVALID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#documentation": "Provided ephemeris defines invalid spacecraft trajectory", + "smithy.api#enumValue": "TRAJECTORY_INVALID" + } + }, + "KMS_KEY_INVALID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#documentation": "Provided KMS key is invalid", + "smithy.api#enumValue": "KMS_KEY_INVALID" + } + }, + "VALIDATION_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#documentation": "Internal Service Error occurred while processing ephemeris", + "smithy.api#enumValue": "VALIDATION_ERROR" + } + } } }, "com.amazonaws.groundstation#EphemerisItem": { @@ -2391,7 +2480,6 @@ "enabled": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#default": null, "smithy.api#documentation": "Whether or not the ephemeris is enabled.
" } }, @@ -2461,49 +2549,61 @@ } }, "com.amazonaws.groundstation#EphemerisSource": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "CUSTOMER_PROVIDED", - "name": "CUSTOMER_PROVIDED" - }, - { - "value": "SPACE_TRACK", - "name": "SPACE_TRACK" + "type": "enum", + "members": { + "CUSTOMER_PROVIDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOMER_PROVIDED" } - ] + }, + "SPACE_TRACK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SPACE_TRACK" + } + } } }, "com.amazonaws.groundstation#EphemerisStatus": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "VALIDATING", - "name": "VALIDATING" - }, - { - "value": "INVALID", - "name": "INVALID" - }, - { - "value": "ERROR", - "name": "ERROR" - }, - { - "value": "ENABLED", - "name": "ENABLED" - }, - { - "value": "DISABLED", - "name": "DISABLED" - }, - { - "value": "EXPIRED", - "name": "EXPIRED" + "type": "enum", + "members": { + "VALIDATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VALIDATING" } - ] + }, + "INVALID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID" + } + }, + "ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR" + } + }, + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "EXPIRED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXPIRED" + } + } } }, "com.amazonaws.groundstation#EphemerisStatusList": { @@ -2577,22 +2677,26 @@ } }, "com.amazonaws.groundstation#FrequencyUnits": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "GHz", - "name": "GHZ" - }, - { - "value": "MHz", - "name": "MHZ" - }, - { - "value": "kHz", - "name": "KHZ" + "type": "enum", + "members": { + "GHZ": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GHz" } - ] + }, + "MHZ": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MHz" + } + }, + "KHZ": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "kHz" + } + } } }, "com.amazonaws.groundstation#GetAgentConfiguration": { @@ -4148,7 +4252,7 @@ "min": 2, "max": 8192 }, - "smithy.api#pattern": "^[{}\\[\\]:.,\"0-9A-z\\-_\\s]{2,8192}$" + "smithy.api#pattern": "^[{}\\[\\]:.,\"0-9A-Za-z\\-_\\s]{2,8192}$" } }, "com.amazonaws.groundstation#KeyAliasArn": { @@ -4161,7 +4265,7 @@ "min": 1, "max": 512 }, - "smithy.api#pattern": "^arn:aws[a-zA-Z-]{0,16}:kms:[a-z]{2}(-[a-z]{1,16}){1,3}-\\d{1}:\\d{12}:((alias/[a-zA-Z0-9:/_-]{1,256}))$" + "smithy.api#pattern": "^arn:aws[a-zA-Z-]{0,16}:kms:[-a-z0-9]{1,50}:[0-9]{12}:((alias/[a-zA-Z0-9:/_-]{1,256}))$" } }, "com.amazonaws.groundstation#KeyAliasName": { @@ -4205,7 +4309,7 @@ } }, "traits": { - "smithy.api#documentation": "AWS Key Management Service (KMS) Key.
" + "smithy.api#documentation": "KMS key info.
" } }, "com.amazonaws.groundstation#ListConfigs": { @@ -4898,7 +5002,12 @@ "type": "AWS::GroundStation::MissionProfile", "service": "GroundStation", "resource": "MissionProfile" - } + }, + "smithy.api#length": { + "min": 89, + "max": 138 + }, + "smithy.api#pattern": "^arn:aws:groundstation:[-a-z0-9]{1,50}:[0-9]{12}:mission-profile/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" } }, "com.amazonaws.groundstation#MissionProfileIdResponse": { @@ -4986,7 +5095,7 @@ "type": "integer", "traits": { "smithy.api#range": { - "min": 0, + "min": 1, "max": 100 } } @@ -5002,22 +5111,26 @@ } }, "com.amazonaws.groundstation#Polarization": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "LEFT_HAND", - "name": "LEFT_HAND" - }, - { - "value": "NONE", - "name": "NONE" - }, - { - "value": "RIGHT_HAND", - "name": "RIGHT_HAND" + "type": "enum", + "members": { + "RIGHT_HAND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RIGHT_HAND" } - ] + }, + "LEFT_HAND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LEFT_HAND" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } } }, "com.amazonaws.groundstation#PositiveDurationInSeconds": { @@ -5119,6 +5232,12 @@ "smithy.api#documentation": "Detailed information about the agent being registered.
", "smithy.api#required": {} } + }, + "tags": { + "target": "com.amazonaws.groundstation#TagsMap", + "traits": { + "smithy.api#documentation": "Tags assigned to an Agent.
Polarization of a spectral Config. Capturing both \"RIGHT_HAND\" and \"LEFT_HAND\" polarization requires two separate configs.
Each resource must have a unique client request token. The client token is used to implement\n idempotency. It ensures that the request completes no more than one time. If you retry a request\n with the same token and the same parameters, the request will complete successfully. However, if\n you try to create a new resource using the same token but different parameters, an HTTP 409\n conflict occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.\n For more information about idempotency, see Ensuring idempotency in Amazon EC2 API requests.
", + "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to\n implement idempotency. It ensures that the request completes no more than one time. If\n you retry a request with the same token and the same parameters, the request will\n complete successfully. However, if you try to create a new resource using the same token\n but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS SDKs\n will automatically generate a unique client request. For more information about\n idempotency, see Ensuring idempotency in Amazon\n EC2 API requests.
", "smithy.api#idempotencyToken": {} } }, @@ -1231,7 +1231,7 @@ "com.amazonaws.iotwireless#ClientRequestToken": { "type": "string", "traits": { - "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to implement\n idempotency. It ensures that the request completes no more than one time. If you retry a request\n with the same token and the same parameters, the request will complete successfully. However, if\n you try to create a new resource using the same token but different parameters, an HTTP 409\n conflict occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.\n For more information about idempotency, see Ensuring idempotency in Amazon EC2 API requests.
", + "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to\n implement idempotency. It ensures that the request completes no more than one time. If\n you retry a request with the same token and the same parameters, the request will\n complete successfully. However, if you try to create a new resource using the same token\n but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS SDKs\n will automatically generate a unique client request. For more information about\n idempotency, see Ensuring idempotency in Amazon\n EC2 API requests.
", "smithy.api#length": { "min": 1, "max": 64 @@ -1410,7 +1410,7 @@ "ClientRequestToken": { "target": "com.amazonaws.iotwireless#ClientRequestToken", "traits": { - "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to implement\n idempotency. It ensures that the request completes no more than one time. If you retry a request\n with the same token and the same parameters, the request will complete successfully. However, if\n you try to create a new resource using the same token but different parameters, an HTTP 409\n conflict occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.\n For more information about idempotency, see Ensuring idempotency in Amazon EC2 API requests.
", + "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to\n implement idempotency. It ensures that the request completes no more than one time. If\n you retry a request with the same token and the same parameters, the request will\n complete successfully. However, if you try to create a new resource using the same token\n but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS SDKs\n will automatically generate a unique client request. For more information about\n idempotency, see Ensuring idempotency in Amazon\n EC2 API requests.
", "smithy.api#idempotencyToken": {} } } @@ -1497,7 +1497,7 @@ "ClientRequestToken": { "target": "com.amazonaws.iotwireless#ClientRequestToken", "traits": { - "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to implement\n idempotency. It ensures that the request completes no more than one time. If you retry a request\n with the same token and the same parameters, the request will complete successfully. However, if\n you try to create a new resource using the same token but different parameters, an HTTP 409\n conflict occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.\n For more information about idempotency, see Ensuring idempotency in Amazon EC2 API requests.
", + "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to\n implement idempotency. It ensures that the request completes no more than one time. If\n you retry a request with the same token and the same parameters, the request will\n complete successfully. However, if you try to create a new resource using the same token\n but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS SDKs\n will automatically generate a unique client request. For more information about\n idempotency, see Ensuring idempotency in Amazon\n EC2 API requests.
", "smithy.api#idempotencyToken": {} } }, @@ -1685,7 +1685,7 @@ "ClientRequestToken": { "target": "com.amazonaws.iotwireless#ClientRequestToken", "traits": { - "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to implement\n idempotency. It ensures that the request completes no more than one time. If you retry a request\n with the same token and the same parameters, the request will complete successfully. However, if\n you try to create a new resource using the same token but different parameters, an HTTP 409\n conflict occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.\n For more information about idempotency, see Ensuring idempotency in Amazon EC2 API requests.
", + "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to\n implement idempotency. It ensures that the request completes no more than one time. If\n you retry a request with the same token and the same parameters, the request will\n complete successfully. However, if you try to create a new resource using the same token\n but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS SDKs\n will automatically generate a unique client request. For more information about\n idempotency, see Ensuring idempotency in Amazon\n EC2 API requests.
", "smithy.api#idempotencyToken": {} } }, @@ -1876,7 +1876,7 @@ "ClientRequestToken": { "target": "com.amazonaws.iotwireless#ClientRequestToken", "traits": { - "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to implement\n idempotency. It ensures that the request completes no more than one time. If you retry a request\n with the same token and the same parameters, the request will complete successfully. However, if\n you try to create a new resource using the same token but different parameters, an HTTP 409\n conflict occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.\n For more information about idempotency, see Ensuring idempotency in Amazon EC2 API requests.
", + "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to\n implement idempotency. It ensures that the request completes no more than one time. If\n you retry a request with the same token and the same parameters, the request will\n complete successfully. However, if you try to create a new resource using the same token\n but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS SDKs\n will automatically generate a unique client request. For more information about\n idempotency, see Ensuring idempotency in Amazon\n EC2 API requests.
", "smithy.api#idempotencyToken": {} } } @@ -1974,7 +1974,7 @@ "ClientRequestToken": { "target": "com.amazonaws.iotwireless#ClientRequestToken", "traits": { - "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to implement\n idempotency. It ensures that the request completes no more than one time. If you retry a request\n with the same token and the same parameters, the request will complete successfully. However, if\n you try to create a new resource using the same token but different parameters, an HTTP 409\n conflict occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.\n For more information about idempotency, see Ensuring idempotency in Amazon EC2 API requests.
", + "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to\n implement idempotency. It ensures that the request completes no more than one time. If\n you retry a request with the same token and the same parameters, the request will\n complete successfully. However, if you try to create a new resource using the same token\n but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS SDKs\n will automatically generate a unique client request. For more information about\n idempotency, see Ensuring idempotency in Amazon\n EC2 API requests.
", "smithy.api#idempotencyToken": {} } }, @@ -2053,7 +2053,7 @@ } ], "traits": { - "smithy.api#documentation": "Provisions a wireless gateway.
\nWhen provisioning a wireless gateway, you might run into duplication errors\n for the following reasons.
\nIf you specify a GatewayEui value that already exists.
If you used a ClientRequestToken with the same parameters \n within the last 10 minutes.
To avoid this error, make sure that you use unique identifiers and parameters\n for each request within the specified time period.
\nProvisions a wireless gateway.
\nWhen provisioning a wireless gateway, you might run into duplication errors for\n the following reasons.
\nIf you specify a GatewayEui value that already exists.
If you used a ClientRequestToken with the same parameters\n within the last 10 minutes.
To avoid this error, make sure that you use unique identifiers and parameters for\n each request within the specified time period.
\nEach resource must have a unique client request token. The client token is used to implement\n idempotency. It ensures that the request completes no more than one time. If you retry a request\n with the same token and the same parameters, the request will complete successfully. However, if\n you try to create a new resource using the same token but different parameters, an HTTP 409\n conflict occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.\n For more information about idempotency, see Ensuring idempotency in Amazon EC2 API requests.
", + "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to\n implement idempotency. It ensures that the request completes no more than one time. If\n you retry a request with the same token and the same parameters, the request will\n complete successfully. However, if you try to create a new resource using the same token\n but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS SDKs\n will automatically generate a unique client request. For more information about\n idempotency, see Ensuring idempotency in Amazon\n EC2 API requests.
", "smithy.api#idempotencyToken": {} } } @@ -2221,7 +2221,7 @@ "ClientRequestToken": { "target": "com.amazonaws.iotwireless#ClientRequestToken", "traits": { - "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to implement\n idempotency. It ensures that the request completes no more than one time. If you retry a request\n with the same token and the same parameters, the request will complete successfully. However, if\n you try to create a new resource using the same token but different parameters, an HTTP 409\n conflict occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.\n For more information about idempotency, see Ensuring idempotency in Amazon EC2 API requests.
", + "smithy.api#documentation": "Each resource must have a unique client request token. The client token is used to\n implement idempotency. It ensures that the request completes no more than one time. If\n you retry a request with the same token and the same parameters, the request will\n complete successfully. However, if you try to create a new resource using the same token\n but different parameters, an HTTP 409 conflict occurs. If you omit this value, AWS SDKs\n will automatically generate a unique client request. For more information about\n idempotency, see Ensuring idempotency in Amazon\n EC2 API requests.
", "smithy.api#idempotencyToken": {} } }, @@ -2573,7 +2573,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a multicast group if it is not in use by a fuota task.
", + "smithy.api#documentation": "Deletes a multicast group if it is not in use by a FUOTA task.
", "smithy.api#http": { "method": "DELETE", "uri": "/multicast-groups/{Id}", @@ -2937,7 +2937,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a wireless gateway.
\nWhen deleting a wireless gateway, you might run into duplication errors\n for the following reasons.
\nIf you specify a GatewayEui value that already exists.
If you used a ClientRequestToken with the same parameters \n within the last 10 minutes.
To avoid this error, make sure that you use unique identifiers and parameters\n for each request within the specified time period.
\nDeletes a wireless gateway.
\nWhen deleting a wireless gateway, you might run into duplication errors for the\n following reasons.
\nIf you specify a GatewayEui value that already exists.
If you used a ClientRequestToken with the same parameters\n within the last 10 minutes.
To avoid this error, make sure that you use unique identifiers and parameters for\n each request within the specified time period.
\nDisassociates a multicast group from a fuota task.
", + "smithy.api#documentation": "Disassociates a multicast group from a FUOTA task.
", "smithy.api#http": { "method": "DELETE", "uri": "/fuota-tasks/{Id}/multicast-groups/{MulticastGroupId}", @@ -4374,7 +4374,7 @@ "com.amazonaws.iotwireless#FileDescriptor": { "type": "string", "traits": { - "smithy.api#documentation": "The Descriptor specifies some metadata about the File being transferred using FUOTA e.g. the software version.\n It is sent transparently to the device. It is a binary field encoded in base64
", + "smithy.api#documentation": "The descriptor is the metadata about the file that is transferred to the device using\n FUOTA, such as the software version. It is a binary field encoded in base64.
", "smithy.api#length": { "min": 0, "max": 332 @@ -4549,7 +4549,7 @@ } }, "traits": { - "smithy.api#documentation": "The event for a log message, if the log message is tied to a fuota task.
" + "smithy.api#documentation": "The event for a log message, if the log message is tied to a FUOTA task.
" } }, "com.amazonaws.iotwireless#FuotaTaskEventLogOption": { @@ -4569,7 +4569,7 @@ } }, "traits": { - "smithy.api#documentation": "The log options for a FUOTA task event and can be used to set log levels for a\n specific fuota task event.
\nFor a LoRaWAN FuotaTask type, possible event for a log message is Fuota.
The log options for a FUOTA task event and can be used to set log levels for a\n specific FUOTA task event.
\nFor a LoRaWAN FUOTA task, the only possible event for a log message is\n Fuota.
The fuota task type.
", + "smithy.api#documentation": "The FUOTA task type.
", "smithy.api#required": {} } }, @@ -4621,7 +4621,7 @@ } }, "traits": { - "smithy.api#documentation": "The log options for fuota tasks and can be used to set log levels for a specific\n type of fuota task.
" + "smithy.api#documentation": "The log options for FUOTA tasks and can be used to set log levels for a specific type\n of FUOTA task.
" } }, "com.amazonaws.iotwireless#FuotaTaskLogOptionList": { @@ -4630,7 +4630,7 @@ "target": "com.amazonaws.iotwireless#FuotaTaskLogOption" }, "traits": { - "smithy.api#documentation": "The list of fuota task log options.
" + "smithy.api#documentation": "The list of FUOTA task log options.
" } }, "com.amazonaws.iotwireless#FuotaTaskName": { @@ -4692,7 +4692,7 @@ } }, "traits": { - "smithy.api#documentation": "The fuota task type.
" + "smithy.api#documentation": "The FUOTA task type.
" } }, "com.amazonaws.iotwireless#GPST": { @@ -5148,7 +5148,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns current default log levels or log levels by resource types. Based on resource\n types, log levels can be for wireless device log options or wireless gateway log\n options.
", + "smithy.api#documentation": "Returns current default log levels or log levels by resource types. Based on the\n resource type, log levels can be returned for wireless device, wireless gateway, or\n FUOTA task log options.
", "smithy.api#http": { "method": "GET", "uri": "/log-levels", @@ -6024,7 +6024,7 @@ } ], "traits": { - "smithy.api#documentation": "Fetches the log-level override, if any, for a given resource-ID and resource-type. It\n can be used for a wireless device, wireless gateway or fuota task.
", + "smithy.api#documentation": "Fetches the log-level override, if any, for a given resource ID and resource\n type..
", "smithy.api#http": { "method": "GET", "uri": "/log-levels/{ResourceIdentifier}", @@ -6045,7 +6045,7 @@ "ResourceType": { "target": "com.amazonaws.iotwireless#ResourceType", "traits": { - "smithy.api#documentation": "The type of the resource, which can be WirelessDevice,\n WirelessGateway or FuotaTask.
The type of resource, which can be WirelessDevice,\n WirelessGateway, or FuotaTask.
The service type for which to get endpoint information about. Can be CUPS\n for the Configuration and Update Server endpoint, or LNS for the LoRaWAN\n Network Server endpoint or CLAIM for the global endpoint.
The service type for which to get endpoint information about. Can be CUPS\n for the Configuration and Update Server endpoint, or LNS for the LoRaWAN\n Network Server endpoint.
List all multicast groups associated with a fuota task.
", + "smithy.api#documentation": "List all multicast groups associated with a FUOTA task.
", "smithy.api#http": { "method": "GET", "uri": "/fuota-tasks/{Id}/multicast-groups", @@ -10034,8 +10034,9 @@ "EutranCid": { "target": "com.amazonaws.iotwireless#EutranCid", "traits": { - "smithy.api#documentation": "E-UTRAN (Evolved Universal Terrestrial Radio Access Network) cell global identifier\n (EUTRANCID).
", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": 0, + "smithy.api#documentation": "E-UTRAN (Evolved Universal Terrestrial Radio Access Network) cell global identifier\n (EUTRANCID).
" } }, "Rsrp": { @@ -11049,18 +11050,18 @@ "GatewayList": { "target": "com.amazonaws.iotwireless#GatewayListMulticast", "traits": { - "smithy.api#documentation": "The list of gateways that you want to use for sending the multicast downlink. Each downlink will be\n sent to all the gateways in the list with transmission interval between them. If list is empty the gateway\n list will be dynamically selected similar to the case of no ParticipatingGateways\n
" + "smithy.api#documentation": "The list of gateways that you want to use for sending the multicast downlink message.\n Each downlink message will be sent to all the gateways in the list in the order that you\n provided. If the gateway list is empty, then AWS IoT Core for LoRaWAN chooses the\n gateways that were most recently used by the devices to send an uplink message.
" } }, "TransmissionInterval": { "target": "com.amazonaws.iotwireless#TransmissionIntervalMulticast", "traits": { - "smithy.api#documentation": "The duration of time for which AWS IoT Core for LoRaWAN will wait before transmitting\n the multicast payload to the next gateway in the list.
" + "smithy.api#documentation": "The duration of time in milliseconds for which AWS IoT Core for LoRaWAN will wait\n before transmitting the multicast payload to the next gateway in the list.
" } } }, "traits": { - "smithy.api#documentation": "Specify the list of gateways to which you want to send the multicast downlink messages.\n The multicast message will be sent to each gateway in the sequence provided in the list.
" + "smithy.api#documentation": "Specify the list of gateways to which you want to send the multicast downlink\n messages. The multicast message will be sent to each gateway in the list, with the\n transmission interval as the time interval between each message.
" } }, "com.amazonaws.iotwireless#PartnerAccountArn": { @@ -11524,7 +11525,7 @@ } ], "traits": { - "smithy.api#documentation": "Sets the log-level override for a resource-ID and resource-type. This option can be\n specified for a wireless gateway or a wireless device. A limit of 200 log level override\n can be set per account.
", + "smithy.api#documentation": "Sets the log-level override for a resource ID and resource type. A limit of 200 log\n level override can be set per account.
", "smithy.api#http": { "method": "PUT", "uri": "/log-levels/{ResourceIdentifier}", @@ -11545,7 +11546,7 @@ "ResourceType": { "target": "com.amazonaws.iotwireless#ResourceType", "traits": { - "smithy.api#documentation": "The type of the resource, which can be WirelessDevice,\n WirelessGateway, or FuotaTask.
The type of resource, which can be WirelessDevice,\n WirelessGateway, or FuotaTask.
Removes the log-level overrides for all resources; wireless devices, wireless\n gateways, and fuota tasks.
", + "smithy.api#documentation": "Removes the log-level overrides for all resources; wireless devices, wireless\n gateways, and FUOTA tasks.
", "smithy.api#http": { "method": "DELETE", "uri": "/log-levels", @@ -11737,7 +11738,7 @@ } ], "traits": { - "smithy.api#documentation": "Removes the log-level override, if any, for a specific resource-ID and resource-type.\n It can be used for a wireless device, a wireless gateway, or a fuota task.
", + "smithy.api#documentation": "Removes the log-level override, if any, for a specific resource ID and resource type.\n It can be used for a wireless device, a wireless gateway, or a FUOTA task.
", "smithy.api#http": { "method": "DELETE", "uri": "/log-levels/{ResourceIdentifier}", @@ -11758,7 +11759,7 @@ "ResourceType": { "target": "com.amazonaws.iotwireless#ResourceType", "traits": { - "smithy.api#documentation": "The type of the resource, which can be WirelessDevice,\n WirelessGateway, or FuotaTask.
The type of resource, which can be WirelessDevice,\n WirelessGateway, or FuotaTask.
The identifier of the resource. For a Wireless Device, it is the wireless device ID.\n For a wireless gateway, it is the wireless gateway ID.
", + "smithy.api#documentation": "The unique identifier of the resource, which can be the wireless gateway ID, the\n wireless device ID, or the FUOTA task ID.
", "smithy.api#length": { "min": 0, "max": 256 @@ -14295,7 +14296,7 @@ } ], "traits": { - "smithy.api#documentation": "Set default log level, or log levels by resource types. This can be for wireless\n device log options or wireless gateways log options and is used to control the log\n messages that'll be displayed in CloudWatch.
", + "smithy.api#documentation": "Set default log level, or log levels by resource types. This can be for wireless\n device, wireless gateway, or FUOTA task log options, and is used to control the log\n messages that'll be displayed in CloudWatch.
", "smithy.api#http": { "method": "POST", "uri": "/log-levels", @@ -16437,7 +16438,7 @@ "name": "iotwireless" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "AWS IoT Wireless provides bi-directional communication between internet-connected\n wireless devices and the AWS Cloud. To onboard both LoRaWAN and Sidewalk devices to AWS\n IoT, use the IoT Wireless API. These wireless devices use the Low Power Wide Area\n Networking (LPWAN) communication protocol to communicate with AWS IoT.
\nUsing the API, you can perform create, read, update, and delete operations for your\n wireless devices, gateways, destinations, and profiles. After onboarding your devices,\n you can use the API operations to set log levels and monitor your devices with\n CloudWatch.
\nYou can also use the API operations to create multicast groups and schedule a\n multicast session for sending a downlink message to devices in the group. By using\n Firmware Updates Over-The-Air (FUOTA) API operations, you can create a FUOTA task and\n schedule a session to update the firmware of individual devices or an entire group of\n devices in a multicast group.
\nTo connect to the AWS IoT Wireless Service, use the Service endpoints as described in\n IoT Wireless Service\n endpoints in the AWS General Reference.
", + "smithy.api#documentation": "AWS IoT Wireless provides bi-directional communication between internet-connected\n wireless devices and the AWS Cloud. To onboard both LoRaWAN and Sidewalk devices to AWS\n IoT, use the IoT Wireless API. These wireless devices use the Low Power Wide Area\n Networking (LPWAN) communication protocol to communicate with AWS IoT.
\nUsing the API, you can perform create, read, update, and delete operations for your\n wireless devices, gateways, destinations, and profiles. After onboarding your devices,\n you can use the API operations to set log levels and monitor your devices with\n CloudWatch.
\nYou can also use the API operations to create multicast groups and schedule a\n multicast session for sending a downlink message to devices in the group. By using\n Firmware Updates Over-The-Air (FUOTA) API operations, you can create a FUOTA task and\n schedule a session to update the firmware of individual devices or an entire group of\n devices in a multicast group.
\nTo connect to the AWS IoT Wireless Service, use the Service endpoints as described in\n IoT \n Wireless Service endpoints. You can use both IPv4 and IPv6 protocols to connect\n to the endpoints and send requests to the AWS IoT Wireless service. For more information,\n see Using\n\t\t\tIPv6 with AWS IoT Wireless.
", "smithy.api#title": "AWS IoT Wireless", "smithy.rules#endpointRuleSet": { "version": "1.0", diff --git a/codegen/sdk/aws-models/iotfleetwise.json b/codegen/sdk/aws-models/iotfleetwise.json index daaaed721b0..dbe9815bb39 100644 --- a/codegen/sdk/aws-models/iotfleetwise.json +++ b/codegen/sdk/aws-models/iotfleetwise.json @@ -9861,7 +9861,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates a vehicle.
", + "smithy.api#documentation": "Updates a vehicle.
\nAccess to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
\nRemove state templates from the vehicle.
" } + }, + "stateTemplatesToUpdate": { + "target": "com.amazonaws.iotfleetwise#StateTemplateAssociations", + "traits": { + "smithy.api#documentation": "Change the stateTemplateUpdateStrategy of state templates already associated with the vehicle.
Remove existing state template associations from the vehicle.
" } + }, + "stateTemplatesToUpdate": { + "target": "com.amazonaws.iotfleetwise#StateTemplateAssociations", + "traits": { + "smithy.api#documentation": "Change the stateTemplateUpdateStrategy of state templates already associated with the vehicle.
Information about the vehicle to update.
" + "smithy.api#documentation": "Information about the vehicle to update.
\nAccess to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
\n\n The replication specification of the keyspace includes:
\n\n replicationStrategy - the required value is SINGLE_REGION or \n MULTI_REGION.
\n regionList - if the replicationStrategy is MULTI_REGION, the\n regionList requires the current Region and at least one additional Amazon Web Services Region where \n the keyspace is going to be replicated in. The maximum number of supported replication Regions including the current\n Region is six.
\n The replication specification of the keyspace includes:
\n\n replicationStrategy - the required value is SINGLE_REGION or \n MULTI_REGION.
\n regionList - if the replicationStrategy is MULTI_REGION, the\n regionList requires the current Region and at least one additional Amazon Web Services Region where \n the keyspace is going to be replicated in.
\n The regionList can contain up to six Amazon Web Services Regions where the keyspace is replicated in.\n
\n The regionList contains the Amazon Web Services Regions where the keyspace is replicated in.\n
\n The replication specification of the keyspace includes:
\n\n regionList - up to six Amazon Web Services Regions where the keyspace is replicated in.
\n replicationStrategy - the required value is SINGLE_REGION or \n MULTI_REGION.
\n The replication specification of the keyspace includes:
\n\n regionList - the Amazon Web Services Regions where the keyspace is replicated in.
\n replicationStrategy - the required value is SINGLE_REGION or \n MULTI_REGION.
Allows you to configure destinations where error logs will be published during the bot import process.
" + } + }, "idleSessionTTLInSeconds": { "target": "com.amazonaws.lexmodelsv2#SessionTTL", "traits": { @@ -5576,6 +5582,12 @@ "traits": { "smithy.api#documentation": "The list of bot members in a network to be created.
" } + }, + "errorLogSettings": { + "target": "com.amazonaws.lexmodelsv2#ErrorLogSettings", + "traits": { + "smithy.api#documentation": "Specifies the configuration for error logging during bot creation.
" + } } }, "traits": { @@ -5656,6 +5668,12 @@ "traits": { "smithy.api#documentation": "The list of bots in a network that was created.
" } + }, + "errorLogSettings": { + "target": "com.amazonaws.lexmodelsv2#ErrorLogSettings", + "traits": { + "smithy.api#documentation": "Specifies configuration settings for delivering error logs to Cloudwatch Logs in an Amazon Lex bot response.
" + } } }, "traits": { @@ -6029,6 +6047,12 @@ "traits": { "smithy.api#documentation": "Specifies the configuration of the built-in Amazon.QnAIntent. The AMAZON.QnAIntent intent is called when\n Amazon Lex can't determine another intent to invoke. If you specify this field, you can't specify the kendraConfiguration field.
Qinconnect intent configuration details for the create intent request.
" + } } }, "traits": { @@ -6145,6 +6169,12 @@ "traits": { "smithy.api#documentation": "Details about the the configuration of the built-in Amazon.QnAIntent.
Qinconnect intent configuration details for the create intent response.
" + } } }, "traits": { @@ -9508,6 +9538,12 @@ "traits": { "smithy.api#documentation": "If the botStatus is Failed, this contains\n a list of reasons that the bot couldn't be built.
Contains the configuration for error logging that specifies where and how bot errors are recorded, including destinations like CloudWatch Logs.
" + } } }, "traits": { @@ -10296,6 +10332,12 @@ "traits": { "smithy.api#documentation": "Details about the configuration of the built-in Amazon.QnAIntent.
Qinconnect intent configuration details for the describe intent response.
" + } } }, "traits": { @@ -11478,6 +11520,21 @@ } } }, + "com.amazonaws.lexmodelsv2#ErrorLogSettings": { + "type": "structure", + "members": { + "enabled": { + "target": "com.amazonaws.lexmodelsv2#BoxedBoolean", + "traits": { + "smithy.api#documentation": "Settings parameters for the error logs, when it is enabled.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Settings parameters for the error logs, whether it is enabled or disabled.
" + } + }, "com.amazonaws.lexmodelsv2#ErrorMessage": { "type": "string" }, @@ -18779,6 +18836,41 @@ "smithy.api#documentation": "Specifies a list of message groups that Amazon Lex sends to a user to\n elicit a response.
" } }, + "com.amazonaws.lexmodelsv2#QInConnectAssistantARN": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$" + } + }, + "com.amazonaws.lexmodelsv2#QInConnectAssistantConfiguration": { + "type": "structure", + "members": { + "assistantArn": { + "target": "com.amazonaws.lexmodelsv2#QInConnectAssistantARN", + "traits": { + "smithy.api#documentation": "The assistant Arn details of the Qinconnect assistant configuration.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The configuration details of the Qinconnect assistant.
" + } + }, + "com.amazonaws.lexmodelsv2#QInConnectIntentConfiguration": { + "type": "structure", + "members": { + "qInConnectAssistantConfiguration": { + "target": "com.amazonaws.lexmodelsv2#QInConnectAssistantConfiguration", + "traits": { + "smithy.api#documentation": "The Qinconnect assistant configuration details of the Qinconnect intent.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The configuration details of the Qinconnect intent.
" + } + }, "com.amazonaws.lexmodelsv2#QnAIntentConfiguration": { "type": "structure", "members": { @@ -23325,6 +23417,12 @@ "traits": { "smithy.api#documentation": "The list of bot members in the network associated \n with the update action.
" } + }, + "errorLogSettings": { + "target": "com.amazonaws.lexmodelsv2#ErrorLogSettings", + "traits": { + "smithy.api#documentation": "Allows you to modify how Amazon Lex logs errors during bot interactions, including destinations for error logs and the types of errors to be captured.
" + } } }, "traits": { @@ -23399,6 +23497,12 @@ "traits": { "smithy.api#documentation": "The list of bot members in the network that was updated.
" } + }, + "errorLogSettings": { + "target": "com.amazonaws.lexmodelsv2#ErrorLogSettings", + "traits": { + "smithy.api#documentation": "Settings for managing error logs within the response of an update bot operation.
" + } } }, "traits": { @@ -23676,6 +23780,12 @@ "traits": { "smithy.api#documentation": "Specifies the configuration of the built-in Amazon.QnAIntent. The AMAZON.QnAIntent intent is called when\n Amazon Lex can't determine another intent to invoke. If you specify this field, you can't specify the kendraConfiguration field.
Qinconnect intent configuration details for the update intent request.
" + } } }, "traits": { @@ -23804,6 +23914,12 @@ "traits": { "smithy.api#documentation": "Details about the configuration of the built-in Amazon.QnAIntent.
Qinconnect intent configuration details for the update intent response.
" + } } }, "traits": { diff --git a/codegen/sdk/aws-models/m2.json b/codegen/sdk/aws-models/m2.json index f3088202959..6ef81399d86 100644 --- a/codegen/sdk/aws-models/m2.json +++ b/codegen/sdk/aws-models/m2.json @@ -86,6 +86,9 @@ { "target": "com.amazonaws.m2#CancelBatchJobExecution" }, + { + "target": "com.amazonaws.m2#CreateDataSetExportTask" + }, { "target": "com.amazonaws.m2#CreateDataSetImportTask" }, @@ -104,6 +107,9 @@ { "target": "com.amazonaws.m2#GetDataSetDetails" }, + { + "target": "com.amazonaws.m2#GetDataSetExportTask" + }, { "target": "com.amazonaws.m2#GetDataSetImportTask" }, @@ -122,6 +128,9 @@ { "target": "com.amazonaws.m2#ListBatchJobRestartPoints" }, + { + "target": "com.amazonaws.m2#ListDataSetExportHistory" + }, { "target": "com.amazonaws.m2#ListDataSetImportHistory" }, @@ -1664,6 +1673,105 @@ } } }, + "com.amazonaws.m2#CreateDataSetExportTask": { + "type": "operation", + "input": { + "target": "com.amazonaws.m2#CreateDataSetExportTaskRequest" + }, + "output": { + "target": "com.amazonaws.m2#CreateDataSetExportTaskResponse" + }, + "errors": [ + { + "target": "com.amazonaws.m2#AccessDeniedException" + }, + { + "target": "com.amazonaws.m2#ConflictException" + }, + { + "target": "com.amazonaws.m2#InternalServerException" + }, + { + "target": "com.amazonaws.m2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.m2#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.m2#ThrottlingException" + }, + { + "target": "com.amazonaws.m2#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "requiredActions": [ + "s3:GetObject" + ], + "documentation": "Grants permission to create a data set export task" + }, + "smithy.api#documentation": "Starts a data set export task for a specific application.
", + "smithy.api#http": { + "method": "POST", + "uri": "/applications/{applicationId}/dataset-export-task", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.m2#CreateDataSetExportTaskRequest": { + "type": "structure", + "members": { + "applicationId": { + "target": "com.amazonaws.m2#Identifier", + "traits": { + "smithy.api#documentation": "The unique identifier of the application for which you want to export data sets.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "exportConfig": { + "target": "com.amazonaws.m2#DataSetExportConfig", + "traits": { + "smithy.api#documentation": "The data set export task configuration.
", + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.m2#ClientToken", + "traits": { + "smithy.api#documentation": "Unique, case-sensitive identifier you provide to ensure the idempotency of the request to create a data set export. The service generates the clientToken when the API call is triggered. The token expires after one hour, so if you retry the API within this timeframe with the same clientToken, you will get the same response. The service also handles deleting the clientToken after it expires.
", + "smithy.api#idempotencyToken": {} + } + }, + "kmsKeyId": { + "target": "com.amazonaws.m2#KMSKeyId", + "traits": { + "smithy.api#documentation": "The identifier of a customer managed key.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.m2#CreateDataSetExportTaskResponse": { + "type": "structure", + "members": { + "taskId": { + "target": "com.amazonaws.m2#Identifier", + "traits": { + "smithy.api#documentation": "The task identifier. This operation is asynchronous. Use this identifier with the GetDataSetExportTask operation to obtain the status of this task.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.m2#CreateDataSetImportTask": { "type": "operation", "input": { @@ -2048,6 +2156,149 @@ "smithy.api#documentation": "Defines a data set.
" } }, + "com.amazonaws.m2#DataSetExportConfig": { + "type": "union", + "members": { + "s3Location": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The Amazon S3 location of the data sets.
" + } + }, + "dataSets": { + "target": "com.amazonaws.m2#DataSetExportList", + "traits": { + "smithy.api#documentation": "The data sets.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Identifies one or more data sets you want to import with the CreateDataSetExportTask operation.
" + } + }, + "com.amazonaws.m2#DataSetExportItem": { + "type": "structure", + "members": { + "datasetName": { + "target": "com.amazonaws.m2#String200", + "traits": { + "smithy.api#documentation": "The data set.
", + "smithy.api#required": {} + } + }, + "externalLocation": { + "target": "com.amazonaws.m2#ExternalLocation", + "traits": { + "smithy.api#documentation": "The location of the data set.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Identifies a specific data set to export from an external location.
" + } + }, + "com.amazonaws.m2#DataSetExportList": { + "type": "list", + "member": { + "target": "com.amazonaws.m2#DataSetExportItem" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.m2#DataSetExportSummary": { + "type": "structure", + "members": { + "total": { + "target": "com.amazonaws.m2#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "The total number of data set exports.
", + "smithy.api#required": {} + } + }, + "succeeded": { + "target": "com.amazonaws.m2#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "The number of data set exports that have succeeded.
", + "smithy.api#required": {} + } + }, + "failed": { + "target": "com.amazonaws.m2#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "The number of data set exports that have failed.
", + "smithy.api#required": {} + } + }, + "pending": { + "target": "com.amazonaws.m2#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "The number of data set exports that are pending.
", + "smithy.api#required": {} + } + }, + "inProgress": { + "target": "com.amazonaws.m2#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "The number of data set exports that are in progress.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Represents a summary of data set exports.
" + } + }, + "com.amazonaws.m2#DataSetExportTask": { + "type": "structure", + "members": { + "taskId": { + "target": "com.amazonaws.m2#Identifier", + "traits": { + "smithy.api#documentation": "The identifier of the data set export task.
", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.m2#DataSetTaskLifecycle", + "traits": { + "smithy.api#documentation": "The status of the data set export task.
", + "smithy.api#required": {} + } + }, + "summary": { + "target": "com.amazonaws.m2#DataSetExportSummary", + "traits": { + "smithy.api#documentation": "A summary of the data set export task.
", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "If dataset exports failed, the failure reason will show here.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Contains information about a data set export task.
" + } + }, + "com.amazonaws.m2#DataSetExportTaskList": { + "type": "list", + "member": { + "target": "com.amazonaws.m2#DataSetExportTask" + } + }, "com.amazonaws.m2#DataSetImportConfig": { "type": "union", "members": { @@ -3585,6 +3836,109 @@ "smithy.api#output": {} } }, + "com.amazonaws.m2#GetDataSetExportTask": { + "type": "operation", + "input": { + "target": "com.amazonaws.m2#GetDataSetExportTaskRequest" + }, + "output": { + "target": "com.amazonaws.m2#GetDataSetExportTaskResponse" + }, + "errors": [ + { + "target": "com.amazonaws.m2#AccessDeniedException" + }, + { + "target": "com.amazonaws.m2#InternalServerException" + }, + { + "target": "com.amazonaws.m2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.m2#ThrottlingException" + }, + { + "target": "com.amazonaws.m2#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to export a data set at the specified S3 location" + }, + "smithy.api#documentation": "Gets the status of a data set import task initiated with the CreateDataSetExportTask operation.
", + "smithy.api#http": { + "method": "GET", + "uri": "/applications/{applicationId}/dataset-export-tasks/{taskId}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.m2#GetDataSetExportTaskRequest": { + "type": "structure", + "members": { + "applicationId": { + "target": "com.amazonaws.m2#Identifier", + "traits": { + "smithy.api#documentation": "The application identifier.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "taskId": { + "target": "com.amazonaws.m2#Identifier", + "traits": { + "smithy.api#documentation": "The task identifier returned by the CreateDataSetExportTask operation.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.m2#GetDataSetExportTaskResponse": { + "type": "structure", + "members": { + "taskId": { + "target": "com.amazonaws.m2#Identifier", + "traits": { + "smithy.api#documentation": "The task identifier.
", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.m2#DataSetTaskLifecycle", + "traits": { + "smithy.api#documentation": "The status of the task.
", + "smithy.api#required": {} + } + }, + "summary": { + "target": "com.amazonaws.m2#DataSetExportSummary", + "traits": { + "smithy.api#documentation": "A summary of the status of the task.
" + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "If dataset export failed, the failure reason will show here.
" + } + }, + "kmsKeyArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The identifier of a customer managed key used for exported data set encryption.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.m2#GetDataSetImportTask": { "type": "operation", "input": { @@ -4152,6 +4506,25 @@ "smithy.api#default": false, "smithy.api#documentation": "Specifies if a step can be restarted or not.
" } + }, + "stepCheckpoint": { + "target": "com.amazonaws.m2#Integer", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "A registered step-level checkpoint identifier that can be used for restarting an Amazon Web Services Blu Age application batch job.
" + } + }, + "stepCheckpointStatus": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The step-level checkpoint status for an Amazon Web Services Blu Age application batch job.
" + } + }, + "stepCheckpointTime": { + "target": "com.amazonaws.m2#Timestamp", + "traits": { + "smithy.api#documentation": "The step-level checkpoint status for an Amazon Web Services Blu Age application batch job.
" + } } }, "traits": { @@ -4185,12 +4558,36 @@ "traits": { "smithy.api#documentation": "The procedure step name that a batch job was restarted to.
" } + }, + "stepCheckpoint": { + "target": "com.amazonaws.m2#Integer", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "Skip selected step and issue a restart from immediate successor step for an Amazon Web Services Blu Age application batch job.
" + } + }, + "skip": { + "target": "com.amazonaws.m2#Boolean", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "The step-level checkpoint timestamp (creation or last modification) for an Amazon Web Services Blu Age application batch job.
" + } } }, "traits": { "smithy.api#documentation": "Provides step/procedure step information for a restart batch job operation.
" } }, + "com.amazonaws.m2#KMSKeyId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9:/_-]+$" + } + }, "com.amazonaws.m2#ListApplicationVersions": { "type": "operation", "input": { @@ -4695,6 +5092,103 @@ "smithy.api#output": {} } }, + "com.amazonaws.m2#ListDataSetExportHistory": { + "type": "operation", + "input": { + "target": "com.amazonaws.m2#ListDataSetExportHistoryRequest" + }, + "output": { + "target": "com.amazonaws.m2#ListDataSetExportHistoryResponse" + }, + "errors": [ + { + "target": "com.amazonaws.m2#AccessDeniedException" + }, + { + "target": "com.amazonaws.m2#InternalServerException" + }, + { + "target": "com.amazonaws.m2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.m2#ThrottlingException" + }, + { + "target": "com.amazonaws.m2#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to list data set export history" + }, + "smithy.api#documentation": "Lists the data set exports for the specified application.
", + "smithy.api#http": { + "method": "GET", + "uri": "/applications/{applicationId}/dataset-export-tasks", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "dataSetExportTasks" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.m2#ListDataSetExportHistoryRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.m2#NextToken", + "traits": { + "smithy.api#documentation": "A pagination token returned from a previous call to\n this operation. This specifies the next item to return. To return to the beginning of the \n list, exclude this parameter.
", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.m2#MaxResults", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "The maximum number of objects to return.
", + "smithy.api#httpQuery": "maxResults" + } + }, + "applicationId": { + "target": "com.amazonaws.m2#Identifier", + "traits": { + "smithy.api#documentation": "The unique identifier of the application.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.m2#ListDataSetExportHistoryResponse": { + "type": "structure", + "members": { + "dataSetExportTasks": { + "target": "com.amazonaws.m2#DataSetExportTaskList", + "traits": { + "smithy.api#documentation": "The data set export tasks.
", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.m2#NextToken", + "traits": { + "smithy.api#documentation": "If there are more items to return, this contains a token \n that is passed to a subsequent call to this operation to retrieve the next set of items.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.m2#ListDataSetImportHistory": { "type": "operation", "input": { diff --git a/codegen/sdk/aws-models/mailmanager.json b/codegen/sdk/aws-models/mailmanager.json index 18bf9afd09c..652be7194b9 100644 --- a/codegen/sdk/aws-models/mailmanager.json +++ b/codegen/sdk/aws-models/mailmanager.json @@ -1374,6 +1374,12 @@ "smithy.api#documentation": "If you choose an Authenticated ingress endpoint, you must configure either an SMTP password or a secret\n ARN.
" } }, + "NetworkConfiguration": { + "target": "com.amazonaws.mailmanager#NetworkConfiguration", + "traits": { + "smithy.api#documentation": "Specifies the network configuration for the ingress point.\n This allows you to create an IPv4-only, Dual-Stack, or PrivateLink type of ingress point. If not specified, the default network type is IPv4-only.\n
" + } + }, "Tags": { "target": "com.amazonaws.mailmanager#TagList", "traits": { @@ -3289,6 +3295,12 @@ "smithy.api#documentation": "The authentication configuration of the ingress endpoint resource.
" } }, + "NetworkConfiguration": { + "target": "com.amazonaws.mailmanager#NetworkConfiguration", + "traits": { + "smithy.api#documentation": "The network configuration for the ingress point.
" + } + }, "CreatedTimestamp": { "target": "smithy.api#Timestamp", "traits": { @@ -4071,6 +4083,60 @@ "smithy.api#documentation": "The union type representing the allowed types for the left hand side of an IP\n condition.
" } }, + "com.amazonaws.mailmanager#IngressIpv6Attribute": { + "type": "enum", + "members": { + "SENDER_IPV6": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SENDER_IPV6" + } + } + } + }, + "com.amazonaws.mailmanager#IngressIpv6Expression": { + "type": "structure", + "members": { + "Evaluate": { + "target": "com.amazonaws.mailmanager#IngressIpv6ToEvaluate", + "traits": { + "smithy.api#documentation": "The left hand side argument of an IPv6 condition expression.
", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.mailmanager#IngressIpOperator", + "traits": { + "smithy.api#documentation": "The matching operator for an IPv6 condition expression.
", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.mailmanager#Ipv6Cidrs", + "traits": { + "smithy.api#documentation": "The right hand side argument of an IPv6 condition expression.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The union type representing the allowed types for the left hand side of an IPv6 condition.
" + } + }, + "com.amazonaws.mailmanager#IngressIpv6ToEvaluate": { + "type": "union", + "members": { + "Attribute": { + "target": "com.amazonaws.mailmanager#IngressIpv6Attribute", + "traits": { + "smithy.api#documentation": "An enum type representing the allowed attribute types for an IPv6 condition.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The structure for an IPv6 based condition matching on the incoming mail.
" + } + }, "com.amazonaws.mailmanager#IngressIsInAddressList": { "type": "structure", "members": { @@ -4521,6 +4587,23 @@ "smithy.api#documentation": "The union type representing the allowed types for the left hand side of a TLS\n condition.
" } }, + "com.amazonaws.mailmanager#IpType": { + "type": "enum", + "members": { + "IPV4": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IPV4" + } + }, + "DUAL_STACK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DUAL_STACK" + } + } + } + }, "com.amazonaws.mailmanager#Ipv4Cidr": { "type": "string", "traits": { @@ -4533,6 +4616,21 @@ "target": "com.amazonaws.mailmanager#Ipv4Cidr" } }, + "com.amazonaws.mailmanager#Ipv6Cidr": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 49 + }, + "smithy.api#pattern": "^(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))\\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9])$" + } + }, + "com.amazonaws.mailmanager#Ipv6Cidrs": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#Ipv6Cidr" + } + }, "com.amazonaws.mailmanager#JobId": { "type": "string", "traits": { @@ -6517,6 +6615,26 @@ "smithy.api#pattern": "^[a-zA-Z0-9:_/+=,@.#-]+$" } }, + "com.amazonaws.mailmanager#NetworkConfiguration": { + "type": "union", + "members": { + "PublicNetworkConfiguration": { + "target": "com.amazonaws.mailmanager#PublicNetworkConfiguration", + "traits": { + "smithy.api#documentation": "Specifies the network configuration for the public ingress point.
" + } + }, + "PrivateNetworkConfiguration": { + "target": "com.amazonaws.mailmanager#PrivateNetworkConfiguration", + "traits": { + "smithy.api#documentation": "Specifies the network configuration for the private ingress point.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The network type (IPv4-only, Dual-Stack, PrivateLink) of the ingress endpoint resource.
" + } + }, "com.amazonaws.mailmanager#NoAuthentication": { "type": "structure", "members": {}, @@ -6557,6 +6675,12 @@ "smithy.api#documentation": "This represents an IP based condition matching on the incoming mail. It performs the\n operation configured in 'Operator' and evaluates the 'Protocol' object against the\n 'Value'.
" } }, + "Ipv6Expression": { + "target": "com.amazonaws.mailmanager#IngressIpv6Expression", + "traits": { + "smithy.api#documentation": "This represents an IPv6 based condition matching on the incoming mail. It performs the\n operation configured in 'Operator' and evaluates the 'Protocol' object against the\n 'Value'.
" + } + }, "TlsExpression": { "target": "com.amazonaws.mailmanager#IngressTlsProtocolExpression", "traits": { @@ -6619,6 +6743,37 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.mailmanager#PrivateNetworkConfiguration": { + "type": "structure", + "members": { + "VpcEndpointId": { + "target": "com.amazonaws.mailmanager#VpcEndpointId", + "traits": { + "smithy.api#documentation": "The identifier of the VPC endpoint to associate with this private ingress point.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies the network configuration for the private ingress point.
" + } + }, + "com.amazonaws.mailmanager#PublicNetworkConfiguration": { + "type": "structure", + "members": { + "IpType": { + "target": "com.amazonaws.mailmanager#IpType", + "traits": { + "smithy.api#default": "IPV4", + "smithy.api#documentation": "The IP address type for the public ingress point. Valid values are IPV4 and DUAL_STACK.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies the network configuration for the public ingress point.
" + } + }, "com.amazonaws.mailmanager#QBusinessApplicationId": { "type": "string", "traits": { @@ -7596,9 +7751,9 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 18 + "max": 43 }, - "smithy.api#pattern": "^(([0-9]|.|/)*)$" + "smithy.api#pattern": "^(([0-9]|.|:|/)*)$" } }, "com.amazonaws.mailmanager#RuleIpToEvaluate": { @@ -9583,6 +9738,12 @@ "smithy.api#error": "client", "smithy.api#httpError": 400 } + }, + "com.amazonaws.mailmanager#VpcEndpointId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^vpce-[a-zA-Z0-9]{17}$" + } } } } \ No newline at end of file diff --git a/codegen/sdk/aws-models/marketplace-entitlement-service.json b/codegen/sdk/aws-models/marketplace-entitlement-service.json index 216f9a9406c..d9c050c96cf 100644 --- a/codegen/sdk/aws-models/marketplace-entitlement-service.json +++ b/codegen/sdk/aws-models/marketplace-entitlement-service.json @@ -55,12 +55,6 @@ "smithy.rules#endpointRuleSet": { "version": "1.0", "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, "UseDualStack": { "builtIn": "AWS::UseDualStack", "required": true, @@ -80,6 +74,12 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" } }, "rules": [ @@ -111,152 +111,158 @@ "type": "error" }, { - "conditions": [ + "conditions": [], + "rules": [ { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, - true - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree" } ], "type": "tree" }, { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], + "conditions": [], "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { "ref": "Region" } - ], - "assign": "PartitionResult" + ] } ], "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false ] }, { "fn": "booleanEquals", "argv": [ - true, + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://entitlement-marketplace.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws-cn" ] - } - ], - "rules": [ + }, { - "conditions": [], - "endpoint": { - "url": "https://entitlement.marketplace-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] } ], - "type": "tree" + "endpoint": { + "url": "https://entitlement-marketplace.{Region}.amazonaws.com.cn", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ { "fn": "getAttr", @@ -264,165 +270,294 @@ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] }, + "aws-cn" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://entitlement-marketplace.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://entitlement.marketplace-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://entitlement.marketplace-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ], "type": "tree" }, { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] }, - true - ] - } - ], - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", + "ref": "UseDualStack" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] }, - "supportsDualStack" + true ] } - ] - } - ], - "rules": [ + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://entitlement.marketplace-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://entitlement.marketplace.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ], "type": "tree" }, { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ + "conditions": [ { - "ref": "Region" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - "cn-northwest-1" - ] - } - ], - "endpoint": { - "url": "https://entitlement-marketplace.cn-northwest-1.amazonaws.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws", { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "name" + true ] } - ] + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://entitlement.marketplace.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "endpoint": { + "url": "https://entitlement.marketplace.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], - "endpoint": { - "url": "https://entitlement.marketplace.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://entitlement.marketplace.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree" } ], "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ], "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] }, "smithy.rules#endpointTests": { "testCases": [ { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region not set and fips disabled", "expect": { "endpoint": { - "url": "https://entitlement.marketplace.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "Region": "us-east-1", + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { @@ -455,7 +590,7 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://entitlement.marketplace.us-east-1.api.aws" + "url": "https://entitlement-marketplace.us-east-1.api.aws" } }, "params": { @@ -465,118 +600,118 @@ } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://entitlement-marketplace.cn-northwest-1.amazonaws.com.cn" + "url": "https://entitlement.marketplace.us-east-1.amazonaws.com" } }, "params": { - "Region": "cn-northwest-1", + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://entitlement.marketplace-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://entitlement.marketplace-fips.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://entitlement.marketplace-fips.cn-north-1.amazonaws.com.cn" + "url": "https://entitlement.marketplace-fips.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://entitlement.marketplace.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://entitlement-marketplace.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://entitlement.marketplace.cn-north-1.amazonaws.com.cn" + "url": "https://entitlement-marketplace.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://entitlement.marketplace-fips.us-gov-east-1.api.aws" + "url": "https://entitlement.marketplace-fips.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://entitlement.marketplace-fips.us-gov-east-1.amazonaws.com" + "url": "https://entitlement.marketplace-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://entitlement.marketplace.us-gov-east-1.api.aws" + "url": "https://entitlement.marketplace.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://entitlement.marketplace.us-gov-east-1.amazonaws.com" + "url": "https://entitlement.marketplace.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } @@ -678,54 +813,99 @@ } }, { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "url": "https://entitlement.marketplace-fips.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-east-1", + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "eu-isoe-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "url": "https://entitlement.marketplace.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { + "Region": "eu-isoe-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + "endpoint": { + "url": "https://entitlement.marketplace-fips.us-isof-south-1.csp.hci.ic.gov" + } }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entitlement.marketplace.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { @@ -766,6 +946,12 @@ "smithy.api#documentation": "The customer identifier is a handle to each unique customer in an application. Customer\n identifiers are obtained through the ResolveCustomer operation in AWS Marketplace Metering\n Service.
" } }, + "CustomerAWSAccountId": { + "target": "com.amazonaws.marketplaceentitlementservice#NonEmptyString", + "traits": { + "smithy.api#documentation": "\n The CustomerAWSAccountID parameter specifies the AWS account ID of the buyer.\n
GetEntitlements retrieves entitlement values for a given product. The results can be\n filtered based on customer identifier or product dimensions.
", + "smithy.api#documentation": "GetEntitlements retrieves entitlement values for a given product. The results can be\n filtered based on customer identifier, AWS account ID, or product dimensions.
\n\n The CustomerIdentifier parameter is on path for deprecation. Use CustomerAWSAccountID instead.
These parameters are mutually exclusive. You can't specify both CustomerIdentifier and CustomerAWSAccountID in the same request.\n
Filter is used to return entitlements for a specific customer or for a specific\n dimension. Filters are described as keys mapped to a lists of values. Filtered requests are\n unioned for each value in the value list, and then\n intersected for each filter key.
" + "smithy.api#documentation": "Filter is used to return entitlements for a specific customer or for a specific\n dimension. Filters are described as keys mapped to a lists of values. Filtered requests are\n unioned for each value in the value list, and then\n intersected for each filter key.
\n\n CustomerIdentifier and CustomerAWSAccountID are mutually exclusive. You can't specify both in the same request.\n
This reference provides descriptions of the low-level AWS Marketplace Metering Service\n API.
\nAWS Marketplace sellers can use this API to submit usage data for custom usage\n dimensions.
\nFor information on the permissions you need to use this API, see AWS Marketplace metering and entitlement API permissions in the\n AWS Marketplace Seller Guide.\n
\n\n Submitting Metering Records\n
\n\n MeterUsage - Submits the metering record for an AWS\n Marketplace product. MeterUsage is called from an EC2 instance or a\n container running on EKS or ECS.
\n BatchMeterUsage - Submits the metering record for a set of\n customers. BatchMeterUsage is called from a software-as-a-service\n (SaaS) application.
\n Accepting New Customers\n
\n\n ResolveCustomer - Called by a SaaS application during the\n registration process. When a buyer visits your website during the registration\n process, the buyer submits a Registration Token through the browser. The\n Registration Token is resolved through this API to obtain a\n CustomerIdentifier\n \n along with the CustomerAWSAccountId and\n ProductCode.
\n Entitlement and Metering for Paid Container Products\n
\nPaid container software products sold through AWS Marketplace must integrate\n with the AWS Marketplace Metering Service and call the\n RegisterUsage operation for software entitlement and metering.\n Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call\n RegisterUsage, but you can do so if you want to receive usage\n data in your seller reports. For more information on using the\n RegisterUsage operation, see Container-Based Products.
\n BatchMeterUsage API calls are captured by AWS CloudTrail. You can use\n Cloudtrail to verify that the SaaS metering records that you sent are accurate by\n searching for records with the eventName of BatchMeterUsage.\n You can also use CloudTrail to audit records over time. For more information, see the\n \n AWS CloudTrail User Guide.\n
This reference provides descriptions of the low-level Marketplace Metering Service API.
\nAmazon Web Services Marketplace sellers can use this API to submit usage data for custom usage\n dimensions.
\nFor information about the permissions that you need to use this API, see Amazon Web Services Marketplace metering and entitlement API permissions in the Amazon Web Services Marketplace\n Seller Guide.\n
\n\n Submitting metering records\n
\n\n MeterUsage\n
\nSubmits the metering record for an Amazon Web Services Marketplace product.
\nCalled from: Amazon Elastic Compute Cloud (Amazon EC2) instance or a container running on either\n Amazon Elastic Kubernetes Service (Amazon EKS) or Amazon Elastic Container Service (Amazon ECS)
\nSupported product types: Amazon Machine Images (AMIs) and containers
\nVendor-metered tagging: Supported allocation tagging
\n\n BatchMeterUsage\n
\nSubmits the metering record for a set of customers.\n BatchMeterUsage API calls are captured by CloudTrail. You can use\n CloudTrail to verify that the software as a subscription (SaaS) metering records that\n you sent are accurate by searching for records with the eventName\n of BatchMeterUsage. You can also use CloudTrail to audit records over\n time. For more information, see the CloudTrail User\n Guide.
Called from: SaaS applications
\nSupported product type: SaaS
\nVendor-metered tagging: Supports allocation tagging
\n\n Accepting new customers\n
\n\n ResolveCustomer\n
\nResolves the registration token that the buyer submits through the browser\n during the registration process. Obtains a CustomerIdentifier along\n with the CustomerAWSAccountId and ProductCode.
Called from: SaaS application during the registration process
\nSupported product type: SaaS
\nVendor-metered tagging: Not applicable
\n\n Entitlement and metering for paid container\n products\n
\n\n RegisteredUsage\n
\nProvides software entitlement and metering. Paid container software products\n sold through Amazon Web Services Marketplace must integrate with the Marketplace Metering Service and call the\n RegisterUsage operation. Free and Bring Your Own License model\n (BYOL) products for Amazon ECS or Amazon EKS aren't required to call\n RegisterUsage. However, you can do so if you want to receive\n usage data in your seller reports. For more information about using the\n RegisterUsage operation, see Container-based products.
Called from: Paid container software products
\nSupported product type: Containers
\nVendor-metered tagging: Not applicable
\n\n Entitlement custom metering for container\n products\n
\nMeterUsage API is available in GovCloud Regions but only supports AMI\n FCP products in GovCloud Regions. Flexible Consumption Pricing (FCP) Container\n products aren’t supported in GovCloud Regions: us-gov-west-1 and us-gov-east-1.\n For more information, see Container-based products.
\nCustom metering for container products are called using the\n MeterUsage API. The API is used for FCP AMI and FCP Container product\n metering.
\n\n Custom metering for Amazon EKS is available in 17\n Amazon Web Services Regions\n
\nThe metering service supports Amazon ECS and EKS for Flexible Consumption Pricing\n (FCP) products using MeterUsage API. Amazon ECS is supported in all\n Amazon Web Services Regions that MeterUsage API is available except for\n GovCloud.
\nAmazon EKS is supported in the following: us-east-1, us-east-2, us-west-1,\n us-west-2, eu-west-1, eu-central-1, eu-west-2, eu-west-3, eu-north-1, ap-east-1,\n ap-southeast-1, ap-northeast-1, ap-southeast-2, ap-northeast-2, ap-south-1,\n ca-central-1, sa-east-1.
\nFor questions about adding Amazon Web Services Regions for metering, contact Amazon Web Services\n Marketplace Seller Operations.
\n\n BatchMeterUsage is called from a SaaS application listed on AWS\n Marketplace to post metering records for a set of customers.
For identical requests, the API is idempotent; requests can be retried with the same\n records or a subset of the input records.
\nEvery request to BatchMeterUsage is for one product. If you need to meter\n usage for multiple products, you must make multiple calls to\n BatchMeterUsage.
Usage records are expected to be submitted as quickly as possible after the event that\n is being recorded, and are not accepted more than 6 hours after the event.
\n\n BatchMeterUsage can process up to 25 UsageRecords at a\n time.
A UsageRecord can optionally include multiple usage allocations, to\n provide customers with usage data split into buckets by tags that you define (or allow\n the customer to define).
\n BatchMeterUsage returns a list of UsageRecordResult objects,\n showing the result for each UsageRecord, as well as a list of\n UnprocessedRecords, indicating errors in the service side that you\n should retry.
\n BatchMeterUsage requests must be less than 1MB in size.
For an example of using BatchMeterUsage, see BatchMeterUsage code example in the AWS Marketplace Seller\n Guide.
\n The CustomerIdentifier parameter is scheduled for deprecation. Use CustomerAWSAccountID instead.
These parameters are mutually exclusive. You can't specify both CustomerIdentifier and CustomerAWSAccountID in the same request.\n
To post metering records for customers, SaaS applications call\n BatchMeterUsage, which is used for metering SaaS flexible\n consumption pricing (FCP). Identical requests are idempotent and can be\n retried with the same records or a subset of records. Each\n BatchMeterUsage request is for only one product. If you\n want to meter usage for multiple products, you must make multiple\n BatchMeterUsage calls.
Usage records should be submitted in quick succession following a\n recorded event. Usage records aren't accepted 6 hours or more after an\n event.
\n\n BatchMeterUsage can process up to 25\n UsageRecords at a time, and each request must be less than\n 1 MB in size. Optionally, you can have multiple usage allocations for\n usage data that's split into buckets according to predefined tags.
\n BatchMeterUsage returns a list of\n UsageRecordResult objects, which have each\n UsageRecord. It also returns a list of\n UnprocessedRecords, which indicate errors on the service\n side that should be retried.
For Amazon Web Services Regions that support BatchMeterUsage, see BatchMeterUsage Region support.\n
For an example of BatchMeterUsage, see BatchMeterUsage code example in the Amazon Web Services Marketplace Seller\n Guide.
Product code is used to uniquely identify a product in AWS Marketplace. The product\n code should be the same as the one used during the publishing of a new product.
", + "smithy.api#documentation": "Product code is used to uniquely identify a product in Amazon Web Services Marketplace. The product code should\n be the same as the one used during the publishing of a new product.
", "smithy.api#required": {} } } @@ -1106,7 +1007,7 @@ "Results": { "target": "com.amazonaws.marketplacemetering#UsageRecordResultList", "traits": { - "smithy.api#documentation": "Contains all UsageRecords processed by BatchMeterUsage.\n These records were either honored by AWS Marketplace Metering Service or were invalid.\n Invalid records should be fixed before being resubmitted.
Contains all UsageRecords processed by BatchMeterUsage.\n These records were either honored by Amazon Web Services Marketplace Metering Service or were invalid. Invalid\n records should be fixed before being resubmitted.
An internal error has occurred. Retry your request. If the problem persists, post a\n message with details on the AWS forums.
", + "smithy.api#documentation": "An internal error has occurred. Retry your request. If the problem persists, post a\n message with details on the Amazon Web Services forums.
", "smithy.api#error": "server" } }, @@ -1224,7 +1125,7 @@ } }, "traits": { - "smithy.api#documentation": "The endpoint being called is in a AWS Region different from your EC2 instance, ECS\n task, or EKS pod. The Region of the Metering Service endpoint and the AWS Region of the\n resource must match.
", + "smithy.api#documentation": "The endpoint being called is in a Amazon Web Services Region different from your EC2 instance, ECS\n task, or EKS pod. The Region of the Metering Service endpoint and the Amazon Web Services Region of\n the resource must match.
", "smithy.api#error": "client" } }, @@ -1260,7 +1161,7 @@ } }, "traits": { - "smithy.api#documentation": "\n RegisterUsage must be called in the same AWS Region the ECS task was\n launched in. This prevents a container from hardcoding a Region (e.g.\n withRegion(“us-east-1”) when calling RegisterUsage.
\n RegisterUsage must be called in the same Amazon Web Services Region the ECS task was\n launched in. This prevents a container from hardcoding a Region (e.g.\n withRegion(“us-east-1”) when calling RegisterUsage.
The usage allocation objects are invalid, or the number of allocations is greater than\n 500 for a single usage record.
", + "smithy.api#documentation": "Sum of allocated usage quantities is not equal to the usage quantity.
", "smithy.api#error": "client" } }, @@ -1353,7 +1254,7 @@ } ], "traits": { - "smithy.api#documentation": "API to emit metering records. For identical requests, the API is idempotent. It simply\n returns the metering record ID.
\n\n MeterUsage is authenticated on the buyer's AWS account using credentials\n from the EC2 instance, ECS task, or EKS pod.
\n MeterUsage can optionally include multiple usage allocations, to provide\n customers with usage data split into buckets by tags that you define (or allow the\n customer to define).
Usage records are expected to be submitted as quickly as possible after the event that\n is being recorded, and are not accepted more than 6 hours after the event.
" + "smithy.api#documentation": "API to emit metering records. For identical requests, the API is\n idempotent and returns the metering record ID. This is used for metering\n flexible consumption pricing (FCP) Amazon Machine Images (AMI) and\n container products.
\n\n MeterUsage is authenticated on the buyer's Amazon Web Services account using\n credentials from the Amazon EC2 instance, Amazon ECS task, or Amazon EKS pod.
\n MeterUsage can optionally include multiple usage allocations, to provide\n customers with usage data split into buckets by tags that you define (or allow the\n customer to define).
Usage records are expected to be submitted as quickly as possible after the event that\n is being recorded, and are not accepted more than 6 hours after the event.
\nFor Amazon Web Services Regions that support MeterUsage, see MeterUsage Region support for Amazon EC2 and MeterUsage Region support for Amazon ECS and Amazon EKS.
Product code is used to uniquely identify a product in AWS Marketplace. The product\n code should be the same as the one used during the publishing of a new product.
", + "smithy.api#documentation": "Product code is used to uniquely identify a product in Amazon Web Services Marketplace. The product code\n should be the same as the one used during the publishing of a new product.
", "smithy.api#required": {} } }, "Timestamp": { "target": "com.amazonaws.marketplacemetering#Timestamp", "traits": { - "smithy.api#documentation": "Timestamp, in UTC, for which the usage is being reported. Your application can meter\n usage for up to one hour in the past. Make sure the timestamp value is not\n before the start of the software usage.
Timestamp, in UTC, for which the usage is being reported. Your application can meter\n usage for up to six hours in the past. Make sure the timestamp value is not\n before the start of the software usage.
AWS Marketplace does not support metering usage from the underlying platform.\n Currently, Amazon ECS, Amazon EKS, and AWS Fargate are supported.
", + "smithy.api#documentation": "Amazon Web Services Marketplace does not support metering usage from the underlying platform. Currently, Amazon ECS, Amazon EKS, and Fargate are supported.
", "smithy.api#error": "client" } }, @@ -1490,7 +1391,7 @@ } ], "traits": { - "smithy.api#documentation": "Paid container software products sold through AWS Marketplace must integrate with the\n AWS Marketplace Metering Service and call the RegisterUsage operation for\n software entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS\n aren't required to call RegisterUsage, but you may choose to do so if you\n would like to receive usage data in your seller reports. The sections below explain the\n behavior of RegisterUsage. RegisterUsage performs two primary\n functions: metering and entitlement.
\n Entitlement: RegisterUsage allows you to\n verify that the customer running your paid software is subscribed to your\n product on AWS Marketplace, enabling you to guard against unauthorized use. Your\n container image that integrates with RegisterUsage is only required\n to guard against unauthorized use at container startup, as such a\n CustomerNotSubscribedException or\n PlatformNotSupportedException will only be thrown on the\n initial call to RegisterUsage. Subsequent calls from the same\n Amazon ECS task instance (e.g. task-id) or Amazon EKS pod will not throw a\n CustomerNotSubscribedException, even if the customer\n unsubscribes while the Amazon ECS task or Amazon EKS pod is still\n running.
\n Metering: RegisterUsage meters software use\n per ECS task, per hour, or per pod for Amazon EKS with usage prorated to the\n second. A minimum of 1 minute of usage applies to tasks that are short lived.\n For example, if a customer has a 10 node Amazon ECS or Amazon EKS cluster and a\n service configured as a Daemon Set, then Amazon ECS or Amazon EKS will launch a\n task on all 10 cluster nodes and the customer will be charged: (10 *\n hourly_rate). Metering for software use is automatically handled by the AWS\n Marketplace Metering Control Plane -- your software is not required to perform\n any metering specific actions, other than call RegisterUsage once\n for metering of software use to commence. The AWS Marketplace Metering Control\n Plane will also continue to bill customers for running ECS tasks and Amazon EKS\n pods, regardless of the customers subscription state, removing the need for your\n software to perform entitlement checks at runtime.
Paid container software products sold through Amazon Web Services Marketplace must integrate with the Amazon Web Services Marketplace\n Metering Service and call the RegisterUsage operation for software\n entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call RegisterUsage, but you may choose to\n do so if you would like to receive usage data in your seller reports. The sections below\n explain the behavior of RegisterUsage. RegisterUsage performs\n two primary functions: metering and entitlement.
\n Entitlement: RegisterUsage allows you to\n verify that the customer running your paid software is subscribed to your\n product on Amazon Web Services Marketplace, enabling you to guard against unauthorized use. Your container\n image that integrates with RegisterUsage is only required to guard\n against unauthorized use at container startup, as such a\n CustomerNotSubscribedException or\n PlatformNotSupportedException will only be thrown on the\n initial call to RegisterUsage. Subsequent calls from the same\n Amazon ECS task instance (e.g. task-id) or Amazon EKS pod\n will not throw a CustomerNotSubscribedException, even if the\n customer unsubscribes while the Amazon ECS task or Amazon EKS\n pod is still running.
\n Metering: RegisterUsage meters software use\n per ECS task, per hour, or per pod for Amazon EKS with usage prorated to\n the second. A minimum of 1 minute of usage applies to tasks that are short\n lived. For example, if a customer has a 10 node Amazon ECS or Amazon EKS cluster and a service configured as a Daemon Set, then Amazon ECS or Amazon EKS will launch a task on all 10 cluster nodes\n and the customer will be charged for 10 tasks. Software metering\n is handled by the Amazon Web Services Marketplace metering control plane—your software is\n not required to perform metering-specific actions other than to call\n RegisterUsage to commence metering.\n The Amazon Web Services Marketplace metering control plane will also bill customers for\n running ECS tasks and Amazon EKS pods, regardless of the customer's\n subscription state, which removes the need for your software to run entitlement\n checks at runtime. For containers, RegisterUsage should be called\n immediately at launch. If you don’t register the container within the first 6 hours\n of the launch, Amazon Web Services Marketplace Metering Service doesn’t provide any metering\n guarantees for previous months. Metering will continue, however, for the\n current month forward until the container ends. RegisterUsage is\n for metering paid hourly container products.
For Amazon Web Services Regions that support RegisterUsage, see RegisterUsage Region support.\n
Product code is used to uniquely identify a product in AWS Marketplace. The product\n code should be the same as the one used during the publishing of a new product.
", + "smithy.api#documentation": "Product code is used to uniquely identify a product in Amazon Web Services Marketplace. The product code should\n be the same as the one used during the publishing of a new product.
", "smithy.api#required": {} } }, "PublicKeyVersion": { "target": "com.amazonaws.marketplacemetering#VersionInteger", "traits": { - "smithy.api#documentation": "Public Key Version provided by AWS Marketplace
", + "smithy.api#documentation": "Public Key Version provided by Amazon Web Services Marketplace
", "smithy.api#required": {} } }, @@ -1567,7 +1468,7 @@ } ], "traits": { - "smithy.api#documentation": "\n ResolveCustomer is called by a SaaS application during the registration\n process. When a buyer visits your website during the registration process, the buyer\n submits a registration token through their browser. The registration token is resolved\n through this API to obtain a CustomerIdentifier\n along with the\n CustomerAWSAccountId and\n ProductCode.
The API needs to called from the seller account id used to publish the SaaS\n application to successfully resolve the token.
\nFor an example of using ResolveCustomer, see ResolveCustomer code example in the AWS Marketplace Seller\n Guide.
\n ResolveCustomer is called by a SaaS application during the registration\n process. When a buyer visits your website during the registration process, the buyer\n submits a registration token through their browser. The registration token is resolved\n through this API to obtain a CustomerIdentifier along with the\n CustomerAWSAccountId and ProductCode.
To successfully resolve the token, the API must be called from the account that was used to publish the SaaS\n application. For an example of using ResolveCustomer, see ResolveCustomer code example in the Amazon Web Services Marketplace Seller\n Guide.
Permission is required for this operation. Your IAM role or user performing this\n operation requires a policy to allow the aws-marketplace:ResolveCustomer\n action. For more information, see Actions, resources, and condition keys for Amazon Web Services Marketplace Metering Service in\n the Service Authorization Reference.
For Amazon Web Services Regions that support ResolveCustomer, see ResolveCustomer Region support.\n
When a buyer visits your website during the registration process, the buyer submits a\n registration token through the browser. The registration token is resolved to obtain a\n CustomerIdentifier\n along with the\n CustomerAWSAccountId\n and\n ProductCode.
When a buyer visits your website during the registration process, the buyer submits a\n registration token through the browser. The registration token is resolved to obtain a\n CustomerIdentifier along with the CustomerAWSAccountId and\n ProductCode.
The CustomerAWSAccountId provides the AWS account ID associated with the\n CustomerIdentifier for the individual customer.
The CustomerAWSAccountId provides the Amazon Web Services account ID associated with\n the CustomerIdentifier for the individual customer.
The result of the ResolveCustomer operation. Contains the\n CustomerIdentifier\n \n along with the CustomerAWSAccountId and\n ProductCode.
The result of the ResolveCustomer operation. Contains the\n CustomerIdentifier along with the CustomerAWSAccountId and\n ProductCode.
The CustomerIdentifier is obtained through the\n ResolveCustomer operation and represents an individual buyer in your\n application.
The CustomerIdentifier is obtained through the\n ResolveCustomer operation and represents an individual buyer in your\n application.
During the process of registering a product on AWS Marketplace, dimensions are\n specified. These represent different units of value in your application.
", + "smithy.api#documentation": "During the process of registering a product on Amazon Web Services Marketplace, dimensions are specified.\n These represent different units of value in your application.
", "smithy.api#required": {} } }, @@ -1784,6 +1686,12 @@ "traits": { "smithy.api#documentation": "The set of UsageAllocations to submit. The sum of all\n UsageAllocation quantities must equal the Quantity of the\n UsageRecord.
\n The CustomerAWSAccountID parameter specifies the AWS account ID of the buyer.\n
The UsageRecordResult\n Status indicates the status of an individual UsageRecord\n processed by BatchMeterUsage.
\n Success- The UsageRecord was accepted and\n honored by BatchMeterUsage.
\n CustomerNotSubscribed- The CustomerIdentifier\n specified is not able to use your product. The UsageRecord was not\n honored. There are three causes for this result:
The customer identifier is invalid.
\nThe customer identifier provided in the metering record does not have\n an active agreement or subscription with this product. Future\n UsageRecords for this customer will fail until the\n customer subscribes to your product.
The customer's AWS account was suspended.
\n\n DuplicateRecord- Indicates that the\n UsageRecord was invalid and not honored. A previously metered\n UsageRecord had the same customer, dimension, and time, but a\n different quantity.
The UsageRecordResult\n Status indicates the status of an individual UsageRecord\n processed by BatchMeterUsage.
\n Success- The UsageRecord was accepted and\n honored by BatchMeterUsage.
\n CustomerNotSubscribed- The CustomerIdentifier\n specified is not able to use your product. The UsageRecord was not\n honored. There are three causes for this result:
The customer identifier is invalid.
\nThe customer identifier provided in the metering record does not have\n an active agreement or subscription with this product. Future\n UsageRecords for this customer will fail until the\n customer subscribes to your product.
The customer's Amazon Web Services account was suspended.
\n\n DuplicateRecord- Indicates that the\n UsageRecord was invalid and not honored. A previously metered\n UsageRecord had the same customer, dimension, and time, but a\n different quantity.
For ads that have media files with streaming delivery and supported file extensions, indicates what transcoding action MediaTailor takes when it first receives these ads from the ADS. \n TRANSCODE indicates that MediaTailor must transcode the ads. \n NONE indicates that you have already transcoded the ads outside of MediaTailor and don't need them transcoded as part of the ad insertion workflow. \n For more information about ad conditioning see https://docs.aws.amazon.com/precondition-ads.html.
For ads that have media files with streaming delivery and supported file extensions, indicates what transcoding action MediaTailor takes when it first receives these ads from the ADS. \n TRANSCODE indicates that MediaTailor must transcode the ads. \n NONE indicates that you have already transcoded the ads outside of MediaTailor and don't need them transcoded as part of the ad insertion workflow. \n For more information about ad conditioning see Using preconditioned ads in the Elemental MediaTailor user guide.
Indicates that MediaTailor emits RAW_ADS_RESPONSE logs for playback sessions that are initialized with this configuration.
Indicates that MediaTailor won't emit the selected events in the logs for playback sessions that are initialized with this configuration.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Settings for customizing what events are included in logs for interactions with the ad decision server (ADS).
\nFor more information about ADS logs, inlcuding descriptions of the event types, see MediaTailor ADS logs description and event types\n in Elemental MediaTailor User Guide.
" + } + }, + "com.amazonaws.mediatailor#AdsInteractionPublishOptInEventType": { + "type": "enum", + "members": { + "RAW_ADS_RESPONSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RAW_ADS_RESPONSE" + } + } + } + }, "com.amazonaws.mediatailor#Alert": { "type": "structure", "members": { @@ -760,6 +1042,18 @@ "traits": { "smithy.api#documentation": "The method used for collecting logs from AWS Elemental MediaTailor. To configure MediaTailor to send logs directly to Amazon CloudWatch Logs, choose LEGACY_CLOUDWATCH. To configure MediaTailor to \n send logs to CloudWatch, which then vends the logs to your destination of choice, choose VENDED_LOGS. Supported destinations are CloudWatch Logs log group, Amazon S3 bucket, and Amazon Data Firehose stream.
To use vended logs, you must configure the delivery destination in Amazon CloudWatch, as described in Enable logging from AWS services, Logging that requires additional permissions [V2].
" } + }, + "AdsInteractionLog": { + "target": "com.amazonaws.mediatailor#AdsInteractionLog", + "traits": { + "smithy.api#documentation": "The event types that MediaTailor emits in logs for interactions with the ADS.
" + } + }, + "ManifestServiceInteractionLog": { + "target": "com.amazonaws.mediatailor#ManifestServiceInteractionLog", + "traits": { + "smithy.api#documentation": "The event types that MediaTailor emits in logs for interactions with the origin server.
" + } } }, "traits": { @@ -788,6 +1082,18 @@ "traits": { "smithy.api#documentation": "The method used for collecting logs from AWS Elemental MediaTailor. LEGACY_CLOUDWATCH indicates that MediaTailor is sending logs directly to Amazon CloudWatch Logs. VENDED_LOGS indicates that MediaTailor is sending logs to CloudWatch, which then vends the logs to your destination of choice. Supported destinations are CloudWatch Logs log group, Amazon S3 bucket, and Amazon Data Firehose stream.
The event types that MediaTailor emits in logs for interactions with the ADS.
" + } + }, + "ManifestServiceInteractionLog": { + "target": "com.amazonaws.mediatailor#ManifestServiceInteractionLog", + "traits": { + "smithy.api#documentation": "The event types that MediaTailor emits in logs for interactions with the origin server.
" + } } } }, @@ -3595,6 +3901,18 @@ "traits": { "smithy.api#documentation": "The method used for collecting logs from AWS Elemental MediaTailor. LEGACY_CLOUDWATCH indicates that MediaTailor is sending logs directly to Amazon CloudWatch Logs. VENDED_LOGS indicates that MediaTailor is sending logs to CloudWatch, which then vends the logs to your destination of choice. Supported destinations are CloudWatch Logs log group, Amazon S3 bucket, and Amazon Data Firehose stream.
Settings for customizing what events are included in logs for interactions with the ad decision server (ADS).
" + } + }, + "ManifestServiceInteractionLog": { + "target": "com.amazonaws.mediatailor#ManifestServiceInteractionLog", + "traits": { + "smithy.api#documentation": "Settings for customizing what events are included in logs for interactions with the origin server.
" + } } }, "traits": { @@ -3663,6 +3981,217 @@ "smithy.api#documentation": "The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor.
" } }, + "com.amazonaws.mediatailor#ManifestServiceExcludeEventType": { + "type": "enum", + "members": { + "GENERATED_MANIFEST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GENERATED_MANIFEST" + } + }, + "ORIGIN_MANIFEST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ORIGIN_MANIFEST" + } + }, + "SESSION_INITIALIZED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SESSION_INITIALIZED" + } + }, + "TRACKING_RESPONSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TRACKING_RESPONSE" + } + }, + "CONFIG_SYNTAX_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONFIG_SYNTAX_ERROR" + } + }, + "CONFIG_SECURITY_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONFIG_SECURITY_ERROR" + } + }, + "UNKNOWN_HOST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNKNOWN_HOST" + } + }, + "TIMEOUT_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TIMEOUT_ERROR" + } + }, + "CONNECTION_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONNECTION_ERROR" + } + }, + "IO_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IO_ERROR" + } + }, + "UNKNOWN_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNKNOWN_ERROR" + } + }, + "HOST_DISALLOWED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HOST_DISALLOWED" + } + }, + "PARSING_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PARSING_ERROR" + } + }, + "MANIFEST_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MANIFEST_ERROR" + } + }, + "NO_MASTER_OR_MEDIA_PLAYLIST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NO_MASTER_OR_MEDIA_PLAYLIST" + } + }, + "NO_MASTER_PLAYLIST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NO_MASTER_PLAYLIST" + } + }, + "NO_MEDIA_PLAYLIST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NO_MEDIA_PLAYLIST" + } + }, + "INCOMPATIBLE_HLS_VERSION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INCOMPATIBLE_HLS_VERSION" + } + }, + "SCTE35_PARSING_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SCTE35_PARSING_ERROR" + } + }, + "INVALID_SINGLE_PERIOD_DASH_MANIFEST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_SINGLE_PERIOD_DASH_MANIFEST" + } + }, + "UNSUPPORTED_SINGLE_PERIOD_DASH_MANIFEST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNSUPPORTED_SINGLE_PERIOD_DASH_MANIFEST" + } + }, + "LAST_PERIOD_MISSING_AUDIO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LAST_PERIOD_MISSING_AUDIO" + } + }, + "LAST_PERIOD_MISSING_AUDIO_WARNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LAST_PERIOD_MISSING_AUDIO_WARNING" + } + }, + "ERROR_ORIGIN_PREFIX_INTERPOLATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR_ORIGIN_PREFIX_INTERPOLATION" + } + }, + "ERROR_ADS_INTERPOLATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR_ADS_INTERPOLATION" + } + }, + "ERROR_LIVE_PRE_ROLL_ADS_INTERPOLATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR_LIVE_PRE_ROLL_ADS_INTERPOLATION" + } + }, + "ERROR_CDN_AD_SEGMENT_INTERPOLATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR_CDN_AD_SEGMENT_INTERPOLATION" + } + }, + "ERROR_CDN_CONTENT_SEGMENT_INTERPOLATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR_CDN_CONTENT_SEGMENT_INTERPOLATION" + } + }, + "ERROR_SLATE_AD_URL_INTERPOLATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR_SLATE_AD_URL_INTERPOLATION" + } + }, + "ERROR_PROFILE_NAME_INTERPOLATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR_PROFILE_NAME_INTERPOLATION" + } + }, + "ERROR_BUMPER_START_INTERPOLATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR_BUMPER_START_INTERPOLATION" + } + }, + "ERROR_BUMPER_END_INTERPOLATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR_BUMPER_END_INTERPOLATION" + } + } + } + }, + "com.amazonaws.mediatailor#ManifestServiceInteractionLog": { + "type": "structure", + "members": { + "ExcludeEventTypes": { + "target": "com.amazonaws.mediatailor#__manifestServiceExcludeEventTypesList", + "traits": { + "smithy.api#documentation": "Indicates that MediaTailor won't emit the selected events in the logs for playback sessions that are initialized with this configuration.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Settings for customizing what events are included in logs for interactions with the origin server.
\nFor more information about manifest service logs, including descriptions of the event types, see MediaTailor manifest logs description and event types\n in Elemental MediaTailor User Guide.
" + } + }, "com.amazonaws.mediatailor#MaxResults": { "type": "integer", "traits": { @@ -6689,6 +7218,18 @@ "smithy.api#unstable": {} } }, + "com.amazonaws.mediatailor#__adsInteractionExcludeEventTypesList": { + "type": "list", + "member": { + "target": "com.amazonaws.mediatailor#AdsInteractionExcludeEventType" + } + }, + "com.amazonaws.mediatailor#__adsInteractionPublishOptInEventTypesList": { + "type": "list", + "member": { + "target": "com.amazonaws.mediatailor#AdsInteractionPublishOptInEventType" + } + }, "com.amazonaws.mediatailor#__boolean": { "type": "boolean" }, @@ -6811,6 +7352,12 @@ "com.amazonaws.mediatailor#__long": { "type": "long" }, + "com.amazonaws.mediatailor#__manifestServiceExcludeEventTypesList": { + "type": "list", + "member": { + "target": "com.amazonaws.mediatailor#ManifestServiceExcludeEventType" + } + }, "com.amazonaws.mediatailor#__mapOf__string": { "type": "map", "key": { diff --git a/codegen/sdk/aws-models/networkmanager.json b/codegen/sdk/aws-models/networkmanager.json index 2019fcd2fbe..bec36df6925 100644 --- a/codegen/sdk/aws-models/networkmanager.json +++ b/codegen/sdk/aws-models/networkmanager.json @@ -9185,12 +9185,6 @@ "smithy.rules#endpointRuleSet": { "version": "1.0", "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, "UseDualStack": { "builtIn": "AWS::UseDualStack", "required": true, @@ -9210,6 +9204,12 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" } }, "rules": [ @@ -9241,392 +9241,667 @@ "type": "error" }, { - "conditions": [ + "conditions": [], + "rules": [ { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, - true - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree" } ], "type": "tree" }, { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], + "conditions": [], "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { "ref": "Region" } - ], - "assign": "PartitionResult" + ] } ], "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "aws.partition", "argv": [ { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", "argv": [ { - "ref": "PartitionResult" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] }, - "name" + "aws" ] }, - "aws" - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseFIPS" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - false - ] + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://networkmanager.us-west-2.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2" + } + ] + }, + "headers": {} + }, + "type": "endpoint" }, { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws" + ] }, - false - ] - } - ], - "endpoint": { - "url": "https://networkmanager.us-west-2.amazonaws.com", - "properties": { - "authSchemes": [ { - "name": "sigv4", - "signingName": "networkmanager", - "signingRegion": "us-west-2" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "name" + true + ] + } + ], + "endpoint": { + "url": "https://networkmanager.us-west-2.api.aws", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2" + } ] }, - "aws-us-gov" - ] + "headers": {} + }, + "type": "endpoint" }, { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws" + ] }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] }, - false - ] - } - ], - "endpoint": { - "url": "https://networkmanager.us-gov-west-1.amazonaws.com", - "properties": { - "authSchemes": [ { - "name": "sigv4", - "signingName": "networkmanager", - "signingRegion": "us-gov-west-1" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] } - ] + ], + "endpoint": { + "url": "https://networkmanager-fips.us-west-2.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2" + } + ] + }, + "headers": {} + }, + "type": "endpoint" }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws" + ] }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] }, - true - ] - } - ], - "rules": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://networkmanager-fips.us-west-2.api.aws", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws-us-gov" ] }, { "fn": "booleanEquals", "argv": [ - true, + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://networkmanager.us-gov-west-1.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] + }, + "aws-us-gov" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://networkmanager.us-gov-west-1.api.aws", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" } ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://networkmanager-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ], "type": "tree" }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] + "ref": "UseFIPS" }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://networkmanager-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ], "type": "tree" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://networkmanager.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://networkmanager.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ], "type": "tree" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://networkmanager.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" - }, - { - "conditions": [], - "endpoint": { - "url": "https://networkmanager.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ], "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ], "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] }, "smithy.rules#endpointTests": { "testCases": [ { - "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region not set and fips disabled", "expect": { "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "networkmanager", - "signingRegion": "us-west-2" - } - ] - }, - "url": "https://networkmanager.us-west-2.amazonaws.com" + "url": "https://example.com" } }, "params": { - "Region": "aws-global", + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://networkmanager-fips.us-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2" + } + ] + }, + "url": "https://networkmanager-fips.us-west-2.api.aws" } }, "params": { @@ -9639,7 +9914,15 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://networkmanager-fips.us-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2" + } + ] + }, + "url": "https://networkmanager-fips.us-west-2.amazonaws.com" } }, "params": { @@ -9652,7 +9935,15 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://networkmanager.us-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2" + } + ] + }, + "url": "https://networkmanager.us-west-2.api.aws" } }, "params": { @@ -9669,7 +9960,6 @@ "authSchemes": [ { "name": "sigv4", - "signingName": "networkmanager", "signingRegion": "us-west-2" } ] @@ -9684,127 +9974,160 @@ } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://networkmanager-fips.cn-north-1.api.amazonwebservices.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://networkmanager-fips.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://networkmanager-fips.cn-north-1.amazonaws.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://networkmanager-fips.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://networkmanager.cn-north-1.api.amazonwebservices.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://networkmanager.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://networkmanager.cn-north-1.amazonaws.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://networkmanager.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region aws-us-gov-global with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "networkmanager", "signingRegion": "us-gov-west-1" } ] }, - "url": "https://networkmanager.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "Region": "aws-us-gov-global", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://networkmanager-fips.us-gov-east-1.api.aws" + "url": "https://networkmanager.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://networkmanager-fips.us-gov-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://networkmanager.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://networkmanager.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://networkmanager.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "networkmanager", "signingRegion": "us-gov-west-1" } ] @@ -9813,7 +10136,7 @@ } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } @@ -9833,6 +10156,14 @@ "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, "url": "https://networkmanager-fips.us-iso-east-1.c2s.ic.gov" } }, @@ -9857,6 +10188,14 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, "url": "https://networkmanager.us-iso-east-1.c2s.ic.gov" } }, @@ -9881,6 +10220,14 @@ "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, "url": "https://networkmanager-fips.us-isob-east-1.sc2s.sgov.gov" } }, @@ -9905,6 +10252,14 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, "url": "https://networkmanager.us-isob-east-1.sc2s.sgov.gov" } }, @@ -9915,54 +10270,131 @@ } }, { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://networkmanager-fips.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-east-1", + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "eu-isoe-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://networkmanager.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { + "Region": "eu-isoe-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://networkmanager-fips.us-isof-south-1.csp.hci.ic.gov" + } }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://networkmanager.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { diff --git a/codegen/sdk/aws-models/opensearch.json b/codegen/sdk/aws-models/opensearch.json index a6bf9f26500..40f9b679e06 100644 --- a/codegen/sdk/aws-models/opensearch.json +++ b/codegen/sdk/aws-models/opensearch.json @@ -2044,18 +2044,18 @@ "key": { "target": "com.amazonaws.opensearch#AppConfigType", "traits": { - "smithy.api#documentation": "Specify the item to configure, such as admin role for the OpenSearch Application.
" + "smithy.api#documentation": "The configuration item to set, such as the admin role for the OpenSearch application.
" } }, "value": { "target": "com.amazonaws.opensearch#AppConfigValue", "traits": { - "smithy.api#documentation": "Specifies the value to configure for the key, such as an IAM user ARN.
" + "smithy.api#documentation": "The value assigned to the configuration key, such as an IAM user ARN.
" } } }, "traits": { - "smithy.api#documentation": "Configurations of the OpenSearch Application.
" + "smithy.api#documentation": "Configuration settings for an OpenSearch application. For more information, see \n see Using the OpenSearch user interface in Amazon OpenSearch Service.
" } }, "com.amazonaws.opensearch#AppConfigType": { @@ -2159,7 +2159,7 @@ "id": { "target": "com.amazonaws.opensearch#Id", "traits": { - "smithy.api#documentation": "Unique identifier for an OpenSearch application.
" + "smithy.api#documentation": "The unique identifier of an OpenSearch application.
" } }, "arn": { @@ -2168,36 +2168,36 @@ "name": { "target": "com.amazonaws.opensearch#ApplicationName", "traits": { - "smithy.api#documentation": "Name of an OpenSearch Application.
" + "smithy.api#documentation": "The name of an OpenSearch application.
" } }, "endpoint": { "target": "com.amazonaws.opensearch#String", "traits": { - "smithy.api#documentation": "Endpoint URL of an OpenSearch Application.
" + "smithy.api#documentation": "The endpoint URL of an OpenSearch application.
" } }, "status": { "target": "com.amazonaws.opensearch#ApplicationStatus", "traits": { - "smithy.api#documentation": "Status of an OpenSearch Application. Possible values are CREATING, UPDATING, DELETING, FAILED, ACTIVE, and DELETED.
The current status of an OpenSearch application. Possible values: CREATING, UPDATING, DELETING, FAILED, ACTIVE, and DELETED.
Timestamp at which an OpenSearch Application was created.
" + "smithy.api#documentation": "The timestamp when an OpenSearch application was created.
" } }, "lastUpdatedAt": { "target": "com.amazonaws.opensearch#Timestamp", "traits": { - "smithy.api#documentation": "Timestamp at which an OpenSearch Application was last updated.
" + "smithy.api#documentation": "The timestamp of the last update to an OpenSearch application.
" } } }, "traits": { - "smithy.api#documentation": "Basic information of the OpenSearch Application.
" + "smithy.api#documentation": "Basic details of an OpenSearch application.
" } }, "com.amazonaws.opensearch#AssociatePackage": { @@ -3646,7 +3646,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an OpenSearch Application.
", + "smithy.api#documentation": "Creates an OpenSearch UI application. For more information, see Using the OpenSearch user interface in Amazon OpenSearch Service.
", "smithy.api#http": { "method": "POST", "uri": "/2021-01-01/opensearch/application", @@ -3660,33 +3660,33 @@ "clientToken": { "target": "com.amazonaws.opensearch#ClientToken", "traits": { - "smithy.api#documentation": "A unique client idempotency token. It will be auto generated if not provided.
", + "smithy.api#documentation": "Unique, case-sensitive identifier to ensure idempotency of the request.
", "smithy.api#idempotencyToken": {} } }, "name": { "target": "com.amazonaws.opensearch#ApplicationName", "traits": { - "smithy.api#documentation": "Name of the OpenSearch Appication to create. Application names are unique across the applications\n owned by an account within an Amazon Web Services Region.
", + "smithy.api#documentation": "The unique name of the OpenSearch application. Names must be unique within an Amazon Web Services Region for each account.
", "smithy.api#required": {} } }, "dataSources": { "target": "com.amazonaws.opensearch#DataSources", "traits": { - "smithy.api#documentation": "Data sources to be associated with the OpenSearch Application.
" + "smithy.api#documentation": "The data sources to link to the OpenSearch application.
" } }, "iamIdentityCenterOptions": { "target": "com.amazonaws.opensearch#IamIdentityCenterOptionsInput", "traits": { - "smithy.api#documentation": "Settings of IAM Identity Center for the OpenSearch Application.
" + "smithy.api#documentation": "Configuration settings for integrating Amazon Web Services IAM Identity Center with the OpenSearch application.
" } }, "appConfigs": { "target": "com.amazonaws.opensearch#AppConfigs", "traits": { - "smithy.api#documentation": "Configurations of the OpenSearch Application, inlcuding admin configuration.
" + "smithy.api#documentation": "Configuration settings for the OpenSearch application, including administrative options.
" } }, "tagList": { @@ -3703,13 +3703,13 @@ "id": { "target": "com.amazonaws.opensearch#Id", "traits": { - "smithy.api#documentation": "Unique identifier for the created OpenSearch Application.
" + "smithy.api#documentation": "The unique identifier assigned to the OpenSearch application.
" } }, "name": { "target": "com.amazonaws.opensearch#ApplicationName", "traits": { - "smithy.api#documentation": "Name of the created OpenSearch Application.
" + "smithy.api#documentation": "The name of the OpenSearch application.
" } }, "arn": { @@ -3718,19 +3718,19 @@ "dataSources": { "target": "com.amazonaws.opensearch#DataSources", "traits": { - "smithy.api#documentation": "Data sources associated with the created OpenSearch Application.
" + "smithy.api#documentation": "The data sources linked to the OpenSearch application.
" } }, "iamIdentityCenterOptions": { "target": "com.amazonaws.opensearch#IamIdentityCenterOptions", "traits": { - "smithy.api#documentation": "Settings of IAM Identity Center for the created OpenSearch Application.
" + "smithy.api#documentation": "The IAM Identity Center settings configured for the OpenSearch application.
" } }, "appConfigs": { "target": "com.amazonaws.opensearch#AppConfigs", "traits": { - "smithy.api#documentation": "Configurations of the OpenSearch Application, inlcuding admin configuration.
" + "smithy.api#documentation": "Configuration settings for the OpenSearch application, including administrative options.
" } }, "tagList": { @@ -3739,7 +3739,7 @@ "createdAt": { "target": "com.amazonaws.opensearch#Timestamp", "traits": { - "smithy.api#documentation": "Timestamp when the OpenSearch Application was created.
" + "smithy.api#documentation": "The timestamp indicating when the OpenSearch application was created.
" } } }, @@ -3884,7 +3884,7 @@ "IdentityCenterOptions": { "target": "com.amazonaws.opensearch#IdentityCenterOptionsInput", "traits": { - "smithy.api#documentation": "Options for IAM Identity Center Option control for the domain.
" + "smithy.api#documentation": "Configuration options for enabling and managing IAM Identity Center integration within a domain.
" } }, "TagList": { @@ -4417,7 +4417,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an existing OpenSearch Application.
", + "smithy.api#documentation": "Deletes a specified OpenSearch application.
", "smithy.api#http": { "method": "DELETE", "uri": "/2021-01-01/opensearch/application/{id}", @@ -4431,7 +4431,7 @@ "id": { "target": "com.amazonaws.opensearch#Id", "traits": { - "smithy.api#documentation": "Unique identifier for the OpenSearch Application that you want to delete.
", + "smithy.api#documentation": "The unique identifier of the OpenSearch application to delete.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -6626,7 +6626,7 @@ "IdentityCenterOptions": { "target": "com.amazonaws.opensearch#IdentityCenterOptionsStatus", "traits": { - "smithy.api#documentation": "Container for IAM Identity Center Option control for the domain.
" + "smithy.api#documentation": "Configuration options for enabling and managing IAM Identity Center integration within a domain.
" } }, "AutoTuneOptions": { @@ -6916,7 +6916,7 @@ "NodeType": { "target": "com.amazonaws.opensearch#NodeType", "traits": { - "smithy.api#documentation": "Indicates whether the nodes is a data, master, or ultrawarm node.
" + "smithy.api#documentation": "Indicates whether the nodes is a data, master, or UltraWarm node.
" } }, "AvailabilityZone": { @@ -6946,7 +6946,7 @@ "StorageVolumeType": { "target": "com.amazonaws.opensearch#VolumeType", "traits": { - "smithy.api#documentation": "If the nodes has EBS storage, indicates if the volume type is GP2 or GP3. Only applicable\n for data nodes.
" + "smithy.api#documentation": "If the nodes has EBS storage, indicates if the volume type is gp2 or gp3. Only applicable\n for data nodes.
" } }, "StorageSize": { @@ -7320,7 +7320,7 @@ "IdentityCenterOptions": { "target": "com.amazonaws.opensearch#IdentityCenterOptions", "traits": { - "smithy.api#documentation": "Container for IAM Identity Center Option control for the domain.
" + "smithy.api#documentation": "Configuration options for controlling IAM Identity Center integration within a domain.
" } }, "AutoTuneOptions": { @@ -7751,7 +7751,7 @@ } ], "traits": { - "smithy.api#documentation": "Check the configuration and status of an existing OpenSearch Application.
", + "smithy.api#documentation": "Retrieves the configuration and status of an existing OpenSearch application.
", "smithy.api#http": { "method": "GET", "uri": "/2021-01-01/opensearch/application/{id}", @@ -7765,7 +7765,7 @@ "id": { "target": "com.amazonaws.opensearch#Id", "traits": { - "smithy.api#documentation": "Unique identifier of the checked OpenSearch Application.
", + "smithy.api#documentation": "The unique identifier of the OpenSearch application to retrieve.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7781,7 +7781,7 @@ "id": { "target": "com.amazonaws.opensearch#Id", "traits": { - "smithy.api#documentation": "Unique identifier of the checked OpenSearch Application.
" + "smithy.api#documentation": "The unique identifier of the OpenSearch application.
" } }, "arn": { @@ -7790,49 +7790,49 @@ "name": { "target": "com.amazonaws.opensearch#ApplicationName", "traits": { - "smithy.api#documentation": "Name of the checked OpenSearch Application.
" + "smithy.api#documentation": "The name of the OpenSearch application.
" } }, "endpoint": { "target": "com.amazonaws.opensearch#String", "traits": { - "smithy.api#documentation": "Endpoint URL of the checked OpenSearch Application.
" + "smithy.api#documentation": "The endpoint URL of the OpenSearch application.
" } }, "status": { "target": "com.amazonaws.opensearch#ApplicationStatus", "traits": { - "smithy.api#documentation": "Current status of the checked OpenSearch Application. Possible values are CREATING, UPDATING, DELETING, FAILED, ACTIVE, and DELETED.
The current status of the OpenSearch application. Possible values: CREATING, UPDATING, DELETING, FAILED, ACTIVE, and DELETED.
IAM Identity Center settings for the checked OpenSearch Application.
" + "smithy.api#documentation": "The IAM Identity Center settings configured for the OpenSearch application.
" } }, "dataSources": { "target": "com.amazonaws.opensearch#DataSources", "traits": { - "smithy.api#documentation": "Associated data sources to the checked OpenSearch Application.
" + "smithy.api#documentation": "The data sources associated with the OpenSearch application.
" } }, "appConfigs": { "target": "com.amazonaws.opensearch#AppConfigs", "traits": { - "smithy.api#documentation": "App configurations of the checked OpenSearch Application.
" + "smithy.api#documentation": "The configuration settings of the OpenSearch application.
" } }, "createdAt": { "target": "com.amazonaws.opensearch#Timestamp", "traits": { - "smithy.api#documentation": "Timestamp at which the checked OpenSearch Application was created.
" + "smithy.api#documentation": "The timestamp when the OpenSearch application was created.
" } }, "lastUpdatedAt": { "target": "com.amazonaws.opensearch#Timestamp", "traits": { - "smithy.api#documentation": "Timestamp at which the checked OpenSearch Application was last updated.
" + "smithy.api#documentation": "The timestamp of the last update to the OpenSearch application.
" } } }, @@ -8507,7 +8507,7 @@ "enabled": { "target": "com.amazonaws.opensearch#Boolean", "traits": { - "smithy.api#documentation": "IAM Identity Center is enabled for the OpenSearch Application.
" + "smithy.api#documentation": "Indicates whether IAM Identity Center is enabled for the OpenSearch Application.
" } }, "iamIdentityCenterInstanceArn": { @@ -8516,7 +8516,7 @@ "iamRoleForIdentityCenterApplicationArn": { "target": "com.amazonaws.opensearch#RoleArn", "traits": { - "smithy.api#documentation": "Amazon Resource Name of the IAM Identity Center's Application created for the OpenSearch Application after enabling IAM Identity Center.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role assigned to the IAM Identity Center application for the OpenSearch Application.
" } }, "iamIdentityCenterApplicationArn": { @@ -8524,7 +8524,7 @@ } }, "traits": { - "smithy.api#documentation": "Settings for IAM Identity Center for an OpenSearch Application.
" + "smithy.api#documentation": "Configuration settings for IAM Identity Center in an OpenSearch Application.
" } }, "com.amazonaws.opensearch#IamIdentityCenterOptionsInput": { @@ -8533,7 +8533,7 @@ "enabled": { "target": "com.amazonaws.opensearch#Boolean", "traits": { - "smithy.api#documentation": "Enable/disable settings for IAM Identity Center.
" + "smithy.api#documentation": "Specifies whether IAM Identity Center is enabled or disabled.
" } }, "iamIdentityCenterInstanceArn": { @@ -8542,12 +8542,12 @@ "iamRoleForIdentityCenterApplicationArn": { "target": "com.amazonaws.opensearch#RoleArn", "traits": { - "smithy.api#documentation": "Amazon Resource Name of IAM Identity Center's application.
" + "smithy.api#documentation": "The ARN of the IAM role associated with the IAM Identity Center application.
" } } }, "traits": { - "smithy.api#documentation": "Settings for IAM Identity Center.
" + "smithy.api#documentation": "Configuration settings for enabling and managing IAM Identity Center.
" } }, "com.amazonaws.opensearch#Id": { @@ -8582,42 +8582,42 @@ "EnabledAPIAccess": { "target": "com.amazonaws.opensearch#Boolean", "traits": { - "smithy.api#documentation": "True to enable IAM Identity Center for API access in Amazon OpenSearch Service.
" + "smithy.api#documentation": "Indicates whether IAM Identity Center is enabled for the application.
" } }, "IdentityCenterInstanceARN": { "target": "com.amazonaws.opensearch#IdentityCenterInstanceARN", "traits": { - "smithy.api#documentation": "The ARN for IAM Identity Center Instance.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM Identity Center instance.
" } }, "SubjectKey": { "target": "com.amazonaws.opensearch#SubjectKeyIdCOption", "traits": { - "smithy.api#documentation": "Specify the attribute that contains the subject (username, userID, email) of IAM Identity Center.
" + "smithy.api#documentation": "Specifies the attribute that contains the subject identifier (such as username, user ID, or email) in IAM Identity Center.
" } }, "RolesKey": { "target": "com.amazonaws.opensearch#RolesKeyIdCOption", "traits": { - "smithy.api#documentation": "Specify the attribute that contains the backend role (groupName, groupID) of IAM Identity Center
" + "smithy.api#documentation": "Specifies the attribute that contains the backend role identifier (such as group name or group ID) in IAM Identity Center.
" } }, "IdentityCenterApplicationARN": { "target": "com.amazonaws.opensearch#IdentityCenterApplicationARN", "traits": { - "smithy.api#documentation": "The ARN for IAM Identity Center Application which will integrate with Amazon OpenSearch Service.
" + "smithy.api#documentation": "The ARN of the IAM Identity Center application that integrates with Amazon OpenSearch Service.
" } }, "IdentityStoreId": { "target": "com.amazonaws.opensearch#IdentityStoreId", "traits": { - "smithy.api#documentation": "The ID of IAM Identity Store.
" + "smithy.api#documentation": "The identifier of the IAM Identity Store.
" } } }, "traits": { - "smithy.api#documentation": "Container for IAM Identity Center Options settings.
" + "smithy.api#documentation": "Settings container for integrating IAM Identity Center with OpenSearch UI applications, \n which enables enabling secure user authentication and access control across multiple data sources.\n This setup supports single sign-on (SSO) through IAM Identity Center, allowing centralized user management.
" } }, "com.amazonaws.opensearch#IdentityCenterOptionsInput": { @@ -8626,30 +8626,30 @@ "EnabledAPIAccess": { "target": "com.amazonaws.opensearch#Boolean", "traits": { - "smithy.api#documentation": "True to enable IAM Identity Center for API access in Amazon OpenSearch Service.
" + "smithy.api#documentation": "Indicates whether IAM Identity Center is enabled for API access in Amazon OpenSearch Service.
" } }, "IdentityCenterInstanceARN": { "target": "com.amazonaws.opensearch#IdentityCenterInstanceARN", "traits": { - "smithy.api#documentation": "The ARN for IAM Identity Center Instance which will be used for IAM Identity Center Application creation.
" + "smithy.api#documentation": "The ARN of the IAM Identity Center instance used to create an OpenSearch UI application that uses IAM Identity Center for authentication.
" } }, "SubjectKey": { "target": "com.amazonaws.opensearch#SubjectKeyIdCOption", "traits": { - "smithy.api#documentation": "Specify the attribute that contains the subject (username, userID, email) of IAM Identity Center.
" + "smithy.api#documentation": "Specifies the attribute that contains the subject identifier (such as username, user ID, or email) in IAM Identity Center.
" } }, "RolesKey": { "target": "com.amazonaws.opensearch#RolesKeyIdCOption", "traits": { - "smithy.api#documentation": "Specify the attribute that contains the backend role (groupName, groupID) of IAM Identity Center
" + "smithy.api#documentation": "Specifies the attribute that contains the backend role identifier (such as group name or group ID) in IAM Identity Center.
" } } }, "traits": { - "smithy.api#documentation": "Container for IAM Identity Center Options settings.
" + "smithy.api#documentation": "Configuration settings for enabling and managing IAM Identity Center.
" } }, "com.amazonaws.opensearch#IdentityCenterOptionsStatus": { @@ -8658,20 +8658,20 @@ "Options": { "target": "com.amazonaws.opensearch#IdentityCenterOptions", "traits": { - "smithy.api#documentation": "Container for IAM Identity Center Options settings.
", + "smithy.api#documentation": "Configuration settings for IAM Identity Center integration.
", "smithy.api#required": {} } }, "Status": { "target": "com.amazonaws.opensearch#OptionStatus", "traits": { - "smithy.api#documentation": "The status of IAM Identity Center Options settings for a domain.
", + "smithy.api#documentation": "The status of IAM Identity Center configuration settings for a domain.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "The status of IAM Identity Center Options settings for a domain.
" + "smithy.api#documentation": "The status of IAM Identity Center configuration settings for a domain.
" } }, "com.amazonaws.opensearch#IdentityPoolId": { @@ -9223,7 +9223,7 @@ } ], "traits": { - "smithy.api#documentation": "List all OpenSearch Applications under your account.
", + "smithy.api#documentation": "Lists all OpenSearch applications under your account.
", "smithy.api#http": { "method": "GET", "uri": "/2021-01-01/opensearch/list-applications", @@ -9249,7 +9249,7 @@ "statuses": { "target": "com.amazonaws.opensearch#ApplicationStatuses", "traits": { - "smithy.api#documentation": "OpenSearch Application Status can be used as filters for the listing request. Possible values are CREATING, UPDATING, DELETING, FAILED, ACTIVE, and DELETED.
Filters the list of OpenSearch applications by status. Possible values: CREATING, UPDATING, DELETING, FAILED, ACTIVE, and DELETED.
Summary of the OpenSearch Applications, including ID, ARN, name, endpoint, status, create time and last update time.
" + "smithy.api#documentation": "Summarizes OpenSearch applications, including ID, ARN, name, endpoint, status, creation time, and last update time.
" } }, "nextToken": { @@ -10697,24 +10697,24 @@ "Enabled": { "target": "com.amazonaws.opensearch#Boolean", "traits": { - "smithy.api#documentation": "A boolean that indicates whether a particular node type is enabled or not.
" + "smithy.api#documentation": "A boolean value indicating whether a specific node type is active or inactive.
" } }, "Type": { "target": "com.amazonaws.opensearch#OpenSearchPartitionInstanceType", "traits": { - "smithy.api#documentation": "The instance type of a particular node type in the cluster.
" + "smithy.api#documentation": "The instance type of a particular node within the cluster.
" } }, "Count": { "target": "com.amazonaws.opensearch#IntegerClass", "traits": { - "smithy.api#documentation": "The number of nodes of a particular node type in the cluster.
" + "smithy.api#documentation": "The number of nodes of a specific type within the cluster.
" } } }, "traits": { - "smithy.api#documentation": "Container for specifying configuration of any node type.
" + "smithy.api#documentation": "Configuration options for defining the setup of any node type within the cluster.
" } }, "com.amazonaws.opensearch#NodeId": { @@ -10732,18 +10732,18 @@ "NodeType": { "target": "com.amazonaws.opensearch#NodeOptionsNodeType", "traits": { - "smithy.api#documentation": "Container for node type like coordinating.
" + "smithy.api#documentation": "Defines the type of node, such as coordinating nodes.
" } }, "NodeConfig": { "target": "com.amazonaws.opensearch#NodeConfig", "traits": { - "smithy.api#documentation": "Container for specifying configuration of any node type.
" + "smithy.api#documentation": "Configuration options for defining the setup of any node type.
" } } }, "traits": { - "smithy.api#documentation": "Container for specifying node type.
" + "smithy.api#documentation": "Configuration settings for defining the node type within a cluster.
" } }, "com.amazonaws.opensearch#NodeOptionsList": { @@ -11965,7 +11965,7 @@ "PackageOwner": { "target": "com.amazonaws.opensearch#PackageOwner", "traits": { - "smithy.api#documentation": "The owner of the package who is allowed to create/update a package and add users to the package scope.
" + "smithy.api#documentation": "The owner of the package who is allowed to create and update a package and add users to the package scope.
" } }, "PackageVendingOptions": { @@ -11977,7 +11977,7 @@ "PackageEncryptionOptions": { "target": "com.amazonaws.opensearch#PackageEncryptionOptions", "traits": { - "smithy.api#documentation": "Package Encryption Options for a package.
" + "smithy.api#documentation": "Encryption options for a package.
" } } }, @@ -11998,7 +11998,7 @@ "PrerequisitePackageIDList": { "target": "com.amazonaws.opensearch#PackageIDList", "traits": { - "smithy.api#documentation": "List of package IDs that must be associated with the domain with or before the package can be associated.
" + "smithy.api#documentation": "List of package IDs that must be linked to the domain before or simultaneously with the package association.
" } }, "AssociationConfiguration": { @@ -12030,13 +12030,13 @@ "KmsKeyIdentifier": { "target": "com.amazonaws.opensearch#KmsKeyId", "traits": { - "smithy.api#documentation": "KMS key ID for encrypting the package.
" + "smithy.api#documentation": "KMS key ID for encrypting the package.
" } }, "EncryptionEnabled": { "target": "com.amazonaws.opensearch#Boolean", "traits": { - "smithy.api#documentation": "This indicates whether encryption is enabled for the package.
", + "smithy.api#documentation": "Whether encryption is enabled for the package.
", "smithy.api#required": {} } } @@ -12223,13 +12223,13 @@ "VendingEnabled": { "target": "com.amazonaws.opensearch#Boolean", "traits": { - "smithy.api#documentation": "This indicates whether vending is enabled for the package to determine if package can be used by other users.\n
", + "smithy.api#documentation": "Indicates whether the package vending feature is enabled, allowing the package to be used by other users.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "The vending options for a package to determine if the package can be used by other users.\n
" + "smithy.api#documentation": "Configuration options for determining whether a package can be made available for use by other users.
" } }, "com.amazonaws.opensearch#PackageVersion": { @@ -14019,7 +14019,7 @@ } ], "traits": { - "smithy.api#documentation": "Update the OpenSearch Application.
", + "smithy.api#documentation": "Updates the configuration and settings of an existing OpenSearch application.
", "smithy.api#http": { "method": "PUT", "uri": "/2021-01-01/opensearch/application/{id}", @@ -14033,7 +14033,7 @@ "id": { "target": "com.amazonaws.opensearch#Id", "traits": { - "smithy.api#documentation": "Unique identifier of the OpenSearch Application to be updated.
", + "smithy.api#documentation": "The unique identifier for the OpenSearch application to be updated.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -14041,13 +14041,13 @@ "dataSources": { "target": "com.amazonaws.opensearch#DataSources", "traits": { - "smithy.api#documentation": "Data sources to be associated with the OpenSearch Application.
" + "smithy.api#documentation": "The data sources to associate with the OpenSearch application.
" } }, "appConfigs": { "target": "com.amazonaws.opensearch#AppConfigs", "traits": { - "smithy.api#documentation": "Configurations to be changed for the OpenSearch Application.
" + "smithy.api#documentation": "The configuration settings to modify for the OpenSearch application.
" } } }, @@ -14061,13 +14061,13 @@ "id": { "target": "com.amazonaws.opensearch#Id", "traits": { - "smithy.api#documentation": "Unique identifier of the updated OpenSearch Application.
" + "smithy.api#documentation": "The unique identifier of the updated OpenSearch application.
" } }, "name": { "target": "com.amazonaws.opensearch#ApplicationName", "traits": { - "smithy.api#documentation": "Name of the updated OpenSearch Application.
" + "smithy.api#documentation": "The name of the updated OpenSearch application.
" } }, "arn": { @@ -14076,31 +14076,31 @@ "dataSources": { "target": "com.amazonaws.opensearch#DataSources", "traits": { - "smithy.api#documentation": "Data sources associated with the updated OpenSearch Application.
" + "smithy.api#documentation": "The data sources associated with the updated OpenSearch application.
" } }, "iamIdentityCenterOptions": { "target": "com.amazonaws.opensearch#IamIdentityCenterOptions", "traits": { - "smithy.api#documentation": "IAM Identity Center settings for the updated OpenSearch Application.
" + "smithy.api#documentation": "The IAM Identity Center configuration for the updated OpenSearch application.
" } }, "appConfigs": { "target": "com.amazonaws.opensearch#AppConfigs", "traits": { - "smithy.api#documentation": "Configurations for the updated OpenSearch Application.
" + "smithy.api#documentation": "The configuration settings for the updated OpenSearch application.
" } }, "createdAt": { "target": "com.amazonaws.opensearch#Timestamp", "traits": { - "smithy.api#documentation": "Timestamp at which the OpenSearch Application was created.
" + "smithy.api#documentation": "The timestamp when the OpenSearch application was originally created.
" } }, "lastUpdatedAt": { "target": "com.amazonaws.opensearch#Timestamp", "traits": { - "smithy.api#documentation": "Timestamp at which the OpenSearch Application was last updated.
" + "smithy.api#documentation": "The timestamp when the OpenSearch application was last updated.
" } } }, diff --git a/codegen/sdk/aws-models/outposts.json b/codegen/sdk/aws-models/outposts.json index c4d64247e03..f5357c36113 100644 --- a/codegen/sdk/aws-models/outposts.json +++ b/codegen/sdk/aws-models/outposts.json @@ -252,6 +252,16 @@ "smithy.api#pattern": "^(\\w+)$" } }, + "com.amazonaws.outposts#AssetIdInput": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 10, + "max": 10 + }, + "smithy.api#pattern": "^\\d{10}$" + } + }, "com.amazonaws.outposts#AssetIdList": { "type": "list", "member": { @@ -264,7 +274,7 @@ "AssetId": { "target": "com.amazonaws.outposts#AssetId", "traits": { - "smithy.api#documentation": "The ID of the asset.
" + "smithy.api#documentation": "The ID of the asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.
" } }, "RackId": { @@ -314,7 +324,7 @@ "AssetId": { "target": "com.amazonaws.outposts#AssetId", "traits": { - "smithy.api#documentation": "The ID of the asset.
" + "smithy.api#documentation": "The ID of the asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.
" } }, "AccountId": { @@ -788,6 +798,12 @@ "smithy.api#documentation": "The ID of the Amazon Web Services Outposts order of the host associated with the capacity task.
" } }, + "AssetId": { + "target": "com.amazonaws.outposts#AssetId", + "traits": { + "smithy.api#documentation": "The ID of the asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.
" + } + }, "CapacityTaskStatus": { "target": "com.amazonaws.outposts#CapacityTaskStatus", "traits": { @@ -1670,6 +1686,12 @@ "smithy.api#documentation": "ID of the Amazon Web Services Outposts order associated with the specified capacity task.
" } }, + "AssetId": { + "target": "com.amazonaws.outposts#AssetId", + "traits": { + "smithy.api#documentation": "The ID of the Outpost asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.
" + } + }, "RequestedInstancePools": { "target": "com.amazonaws.outposts#RequestedInstancePools", "traits": { @@ -1692,7 +1714,7 @@ "CapacityTaskStatus": { "target": "com.amazonaws.outposts#CapacityTaskStatus", "traits": { - "smithy.api#documentation": "Status of the capacity task.
\nA capacity task can have one of the following statuses:
\n\n REQUESTED - The capacity task was created and is awaiting the next step\n by Amazon Web Services Outposts.
\n IN_PROGRESS - The capacity task is running and cannot be\n cancelled.
\n WAITING_FOR_EVACUATION - The capacity task requires capacity to run. You\n must stop the recommended EC2 running instances to free up capacity for the task to\n run.
Status of the capacity task.
\nA capacity task can have one of the following statuses:
\n\n REQUESTED - The capacity task was created and is awaiting the next step\n by Amazon Web Services Outposts.
\n IN_PROGRESS - The capacity task is running and cannot be cancelled.
\n FAILED - The capacity task could not be completed.
\n COMPLETED - The capacity task has completed successfully.
\n WAITING_FOR_EVACUATION - The capacity task requires capacity to run. You must stop the recommended EC2 running instances to free up capacity for the task to run.
\n CANCELLATION_IN_PROGRESS - The capacity task has been cancelled and is in the process of cleaning up resources.
\n CANCELLED - The capacity task is cancelled.
The ID of the Outpost asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.
", + "smithy.api#httpQuery": "AssetId" + } + }, "MaxResults": { "target": "com.amazonaws.outposts#MaxResults1000", "traits": { @@ -2528,7 +2557,7 @@ "AssetId": { "target": "com.amazonaws.outposts#AssetId", "traits": { - "smithy.api#documentation": "The ID of the asset.
" + "smithy.api#documentation": "The ID of the asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.
" } }, "MacAddressList": { @@ -5624,6 +5653,12 @@ "smithy.api#documentation": "The ID of the Amazon Web Services Outposts order associated with the specified capacity task.
" } }, + "AssetId": { + "target": "com.amazonaws.outposts#AssetIdInput", + "traits": { + "smithy.api#documentation": "The ID of the Outpost asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.
" + } + }, "InstancePools": { "target": "com.amazonaws.outposts#RequestedInstancePools", "traits": { @@ -5676,6 +5711,12 @@ "smithy.api#documentation": "ID of the Amazon Web Services Outposts order of the host associated with the capacity task.
" } }, + "AssetId": { + "target": "com.amazonaws.outposts#AssetId", + "traits": { + "smithy.api#documentation": "The ID of the asset. An Outpost asset can be a single server within an Outposts rack or an Outposts server configuration.
" + } + }, "RequestedInstancePools": { "target": "com.amazonaws.outposts#RequestedInstancePools", "traits": { @@ -5779,7 +5820,7 @@ "AssetId": { "target": "com.amazonaws.outposts#AssetId", "traits": { - "smithy.api#documentation": "The ID of the Outpost server.
", + "smithy.api#documentation": "The ID of the Outpost server.
", "smithy.api#required": {} } }, @@ -6415,7 +6456,7 @@ "PowerConnector": { "target": "com.amazonaws.outposts#PowerConnector", "traits": { - "smithy.api#documentation": "The power connector that Amazon Web Services should plan to provide for connections to the hardware.\n Note the correlation between PowerPhase and PowerConnector.
Single-phase AC feed
\n\n L6-30P – (common in US); 30A; single phase
\n\n IEC309 (blue) – P+N+E, 6hr; 32 A; single\n phase
\nThree-phase AC feed
\n\n AH530P7W (red) – 3P+N+E, 7hr; 30A; three\n phase
\n\n AH532P6W (red) – 3P+N+E, 6hr; 32A; three\n phase
\n\n CS8365C – (common in US); 3P+E, 50A; three phase
\nThe power connector that Amazon Web Services should plan to provide for connections to the hardware.\n Note the correlation between PowerPhase and PowerConnector.
Single-phase AC feed
\n\n L6-30P – (common in US); 30A; single phase
\n\n IEC309 (blue) – P+N+E, 6hr; 32 A; single\n phase
\nThree-phase AC feed
\n\n AH530P7W (red) – 3P+N+E, 7hr; 30A; three phase
\n\n AH532P6W (red) – 3P+N+E, 6hr; 32A; three phase
\n\n CS8365C – (common in US); 3P+E, 50A; three phase
\nThe cryptographic usage of an ECDH derived key as defined in section A.5.2 of the TR-31 spec.
", + "smithy.api#suppress": [ + "UnstableTrait" + ] + } } }, "traits": { @@ -456,6 +466,103 @@ "smithy.api#output": {} } }, + "com.amazonaws.paymentcryptography#DeriveKeyUsage": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "TR31_B0_BASE_DERIVATION_KEY", + "name": "TR31_B0_BASE_DERIVATION_KEY" + }, + { + "value": "TR31_C0_CARD_VERIFICATION_KEY", + "name": "TR31_C0_CARD_VERIFICATION_KEY" + }, + { + "value": "TR31_D0_SYMMETRIC_DATA_ENCRYPTION_KEY", + "name": "TR31_D0_SYMMETRIC_DATA_ENCRYPTION_KEY" + }, + { + "value": "TR31_E0_EMV_MKEY_APP_CRYPTOGRAMS", + "name": "TR31_E0_EMV_MKEY_APP_CRYPTOGRAMS" + }, + { + "value": "TR31_E1_EMV_MKEY_CONFIDENTIALITY", + "name": "TR31_E1_EMV_MKEY_CONFIDENTIALITY" + }, + { + "value": "TR31_E2_EMV_MKEY_INTEGRITY", + "name": "TR31_E2_EMV_MKEY_INTEGRITY" + }, + { + "value": "TR31_E4_EMV_MKEY_DYNAMIC_NUMBERS", + "name": "TR31_E4_EMV_MKEY_DYNAMIC_NUMBERS" + }, + { + "value": "TR31_E5_EMV_MKEY_CARD_PERSONALIZATION", + "name": "TR31_E5_EMV_MKEY_CARD_PERSONALIZATION" + }, + { + "value": "TR31_E6_EMV_MKEY_OTHER", + "name": "TR31_E6_EMV_MKEY_OTHER" + }, + { + "value": "TR31_K0_KEY_ENCRYPTION_KEY", + "name": "TR31_K0_KEY_ENCRYPTION_KEY" + }, + { + "value": "TR31_K1_KEY_BLOCK_PROTECTION_KEY", + "name": "TR31_K1_KEY_BLOCK_PROTECTION_KEY" + }, + { + "value": "TR31_M3_ISO_9797_3_MAC_KEY", + "name": "TR31_M3_ISO_9797_3_MAC_KEY" + }, + { + "value": "TR31_M1_ISO_9797_1_MAC_KEY", + "name": "TR31_M1_ISO_9797_1_MAC_KEY" + }, + { + "value": "TR31_M6_ISO_9797_5_CMAC_KEY", + "name": "TR31_M6_ISO_9797_5_CMAC_KEY" + }, + { + "value": "TR31_M7_HMAC_KEY", + "name": "TR31_M7_HMAC_KEY" + }, + { + "value": "TR31_P0_PIN_ENCRYPTION_KEY", + "name": "TR31_P0_PIN_ENCRYPTION_KEY" + }, + { + "value": "TR31_P1_PIN_GENERATION_KEY", + "name": "TR31_P1_PIN_GENERATION_KEY" + }, + { + "value": "TR31_V1_IBM3624_PIN_VERIFICATION_KEY", + "name": "TR31_V1_IBM3624_PIN_VERIFICATION_KEY" + }, + { + "value": "TR31_V2_VISA_PIN_VERIFICATION_KEY", + "name": "TR31_V2_VISA_PIN_VERIFICATION_KEY" + } + ] + } + }, + "com.amazonaws.paymentcryptography#DiffieHellmanDerivationData": { + "type": "union", + "members": { + "SharedInformation": { + "target": "com.amazonaws.paymentcryptography#SharedInformation", + "traits": { + "smithy.api#documentation": "A byte string containing information that binds the ECDH derived key to the two parties involved or to the context of the key.
\nIt may include details like identities of the two parties deriving the key, context of the operation, session IDs, and optionally a nonce. It must not contain zero bytes, and re-using shared information for multiple ECDH key derivations is not recommended.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Derivation data used to derive an ECDH key.
" + } + }, "com.amazonaws.paymentcryptography#EvenHexLengthBetween16And32": { "type": "string", "traits": { @@ -486,6 +593,66 @@ "smithy.api#documentation": "The attributes for IPEK generation during export.
" } }, + "com.amazonaws.paymentcryptography#ExportDiffieHellmanTr31KeyBlock": { + "type": "structure", + "members": { + "PrivateKeyIdentifier": { + "target": "com.amazonaws.paymentcryptography#KeyArnOrKeyAliasType", + "traits": { + "smithy.api#documentation": "The keyARN of the asymmetric ECC key.
The keyARN of the certificate that signed the client's PublicKeyCertificate.
The client's public key certificate in PEM format (base64 encoded) to use for ECDH key derivation.
", + "smithy.api#required": {} + } + }, + "DeriveKeyAlgorithm": { + "target": "com.amazonaws.paymentcryptography#SymmetricKeyAlgorithm", + "traits": { + "smithy.api#documentation": "The key algorithm of the derived ECDH key.
", + "smithy.api#required": {} + } + }, + "KeyDerivationFunction": { + "target": "com.amazonaws.paymentcryptography#KeyDerivationFunction", + "traits": { + "smithy.api#documentation": "The key derivation function to use for deriving a key using ECDH.
", + "smithy.api#required": {} + } + }, + "KeyDerivationHashAlgorithm": { + "target": "com.amazonaws.paymentcryptography#KeyDerivationHashAlgorithm", + "traits": { + "smithy.api#documentation": "The hash type to use for deriving a key using ECDH.
", + "smithy.api#required": {} + } + }, + "DerivationData": { + "target": "com.amazonaws.paymentcryptography#DiffieHellmanDerivationData", + "traits": { + "smithy.api#documentation": "Derivation data used to derive an ECDH key.
", + "smithy.api#required": {} + } + }, + "KeyBlockHeaders": { + "target": "com.amazonaws.paymentcryptography#KeyBlockHeaders" + } + }, + "traits": { + "smithy.api#documentation": "Parameter information for key material export using the asymmetric ECDH key exchange method.
" + } + }, "com.amazonaws.paymentcryptography#ExportDukptInitialKey": { "type": "structure", "members": { @@ -613,6 +780,12 @@ "traits": { "smithy.api#documentation": "Parameter information for key material export using asymmetric RSA wrap and unwrap key exchange method
" } + }, + "DiffieHellmanTr31KeyBlock": { + "target": "com.amazonaws.paymentcryptography#ExportDiffieHellmanTr31KeyBlock", + "traits": { + "smithy.api#documentation": "Parameter information for key material export using the asymmetric ECDH key exchange method.
" + } } }, "traits": { @@ -1133,6 +1306,70 @@ "smithy.api#pattern": "^[0-9A-F]{20}$|^[0-9A-F]{24}$" } }, + "com.amazonaws.paymentcryptography#ImportDiffieHellmanTr31KeyBlock": { + "type": "structure", + "members": { + "PrivateKeyIdentifier": { + "target": "com.amazonaws.paymentcryptography#KeyArnOrKeyAliasType", + "traits": { + "smithy.api#documentation": "The keyARN of the asymmetric ECC key.
The keyARN of the certificate that signed the client's PublicKeyCertificate.
The client's public key certificate in PEM format (base64 encoded) to use for ECDH key derivation.
", + "smithy.api#required": {} + } + }, + "DeriveKeyAlgorithm": { + "target": "com.amazonaws.paymentcryptography#SymmetricKeyAlgorithm", + "traits": { + "smithy.api#documentation": "The key algorithm of the derived ECDH key.
", + "smithy.api#required": {} + } + }, + "KeyDerivationFunction": { + "target": "com.amazonaws.paymentcryptography#KeyDerivationFunction", + "traits": { + "smithy.api#documentation": "The key derivation function to use for deriving a key using ECDH.
", + "smithy.api#required": {} + } + }, + "KeyDerivationHashAlgorithm": { + "target": "com.amazonaws.paymentcryptography#KeyDerivationHashAlgorithm", + "traits": { + "smithy.api#documentation": "The hash type to use for deriving a key using ECDH.
", + "smithy.api#required": {} + } + }, + "DerivationData": { + "target": "com.amazonaws.paymentcryptography#DiffieHellmanDerivationData", + "traits": { + "smithy.api#documentation": "Derivation data used to derive an ECDH key.
", + "smithy.api#required": {} + } + }, + "WrappedKeyBlock": { + "target": "com.amazonaws.paymentcryptography#Tr31WrappedKeyBlock", + "traits": { + "smithy.api#documentation": "The ECDH wrapped key block to import.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Parameter information for key material import using the asymmetric ECDH key exchange method.
" + } + }, "com.amazonaws.paymentcryptography#ImportKey": { "type": "operation", "input": { @@ -1169,7 +1406,7 @@ ], "traits": { "aws.api#controlPlane": {}, - "smithy.api#documentation": "Imports symmetric keys and public key certificates in PEM format (base64 encoded) into Amazon Web Services Payment Cryptography.
\nAmazon Web Services Payment Cryptography simplifies key exchange by replacing the existing paper-based approach with a modern electronic approach. With ImportKey you can import symmetric keys using either symmetric and asymmetric key exchange mechanisms.
For symmetric key exchange, Amazon Web Services Payment Cryptography uses the ANSI X9 TR-31 norm in accordance with PCI PIN guidelines. And for asymmetric key exchange, Amazon Web Services Payment Cryptography supports ANSI X9 TR-34 norm and RSA wrap and unwrap key exchange mechanisms. Asymmetric key exchange methods are typically used to establish bi-directional trust between the two parties exhanging keys and are used for initial key exchange such as Key Encryption Key (KEK) or Zone Master Key (ZMK). After which you can import working keys using symmetric method to perform various cryptographic operations within Amazon Web Services Payment Cryptography.
\nThe TR-34 norm is intended for exchanging 3DES keys only and keys are imported in a WrappedKeyBlock format. Key attributes (such as KeyUsage, KeyAlgorithm, KeyModesOfUse, Exportability) are contained within the key block. With RSA wrap and unwrap, you can exchange both 3DES and AES-128 keys. The keys are imported in a WrappedKeyCryptogram format and you will need to specify the key attributes during import.
\nYou can also import a root public key certificate, used to sign other public key certificates, or a trusted public key certificate under an already established root public key certificate.
\n\n To import a public root key certificate\n
\nYou can also import a root public key certificate, used to sign other public key certificates, or a trusted public key certificate under an already established root public key certificate.
\n\n To import a public root key certificate\n
\nUsing this operation, you can import the public component (in PEM cerificate format) of your private root key. You can use the imported public root key certificate for digital signatures, for example signing wrapping key or signing key in TR-34, within your Amazon Web Services Payment Cryptography account.
\nSet the following parameters:
\n\n KeyMaterial: RootCertificatePublicKey\n
\n KeyClass: PUBLIC_KEY\n
\n KeyModesOfUse: Verify\n
\n KeyUsage: TR31_S0_ASYMMETRIC_KEY_FOR_DIGITAL_SIGNATURE\n
\n PublicKeyCertificate: The public key certificate in PEM format (base64 encoded) of the private root key under import.
\n To import a trusted public key certificate\n
\nThe root public key certificate must be in place and operational before you import a trusted public key certificate. Set the following parameters:
\n\n KeyMaterial: TrustedCertificatePublicKey\n
\n CertificateAuthorityPublicKeyIdentifier: KeyArn of the RootCertificatePublicKey.
\n KeyModesOfUse and KeyUsage: Corresponding to the cryptographic operations such as wrap, sign, or encrypt that you will allow the trusted public key certificate to perform.
\n PublicKeyCertificate: The trusted public key certificate in PEM format (base64 encoded) under import.
\n To import initial keys (KEK or ZMK or similar) using TR-34\n
\nUsing this operation, you can import initial key using TR-34 asymmetric key exchange. In TR-34 terminology, the sending party of the key is called Key Distribution Host (KDH) and the receiving party of the key is called Key Receiving Device (KRD). During the key import process, KDH is the user who initiates the key import and KRD is Amazon Web Services Payment Cryptography who receives the key.
\nTo initiate TR-34 key import, the KDH must obtain an import token by calling GetParametersForImport. This operation generates an encryption keypair for the purpose of key import, signs the key and returns back the wrapping key certificate (also known as KRD wrapping certificate) and the root certificate chain. The KDH must trust and install the KRD wrapping certificate on its HSM and use it to encrypt (wrap) the KDH key during TR-34 WrappedKeyBlock generation. The import token and associated KRD wrapping certificate expires after 7 days.
\nNext the KDH generates a key pair for the purpose of signing the encrypted KDH key and provides the public certificate of the signing key to Amazon Web Services Payment Cryptography. The KDH will also need to import the root certificate chain of the KDH signing certificate by calling ImportKey for RootCertificatePublicKey. For more information on TR-34 key import, see section Importing symmetric keys in the Amazon Web Services Payment Cryptography User Guide.
Set the following parameters:
\n\n KeyMaterial: Use Tr34KeyBlock parameters.
\n CertificateAuthorityPublicKeyIdentifier: The KeyARN of the certificate chain that signed the KDH signing key certificate.
\n ImportToken: Obtained from KRD by calling GetParametersForImport.
\n WrappedKeyBlock: The TR-34 wrapped key material from KDH. It contains the KDH key under import, wrapped with KRD wrapping certificate and signed by KDH signing private key. This TR-34 key block is typically generated by the KDH Hardware Security Module (HSM) outside of Amazon Web Services Payment Cryptography.
\n SigningKeyCertificate: The public key certificate in PEM format (base64 encoded) of the KDH signing key generated under the root certificate (CertificateAuthorityPublicKeyIdentifier) imported in Amazon Web Services Payment Cryptography.
\n To import initial keys (KEK or ZMK or similar) using RSA Wrap and Unwrap\n
\nUsing this operation, you can import initial key using asymmetric RSA wrap and unwrap key exchange method. To initiate import, call GetParametersForImport with KeyMaterial set to KEY_CRYPTOGRAM to generate an import token. This operation also generates an encryption keypair for the purpose of key import, signs the key and returns back the wrapping key certificate in PEM format (base64 encoded) and its root certificate chain. The import token and associated KRD wrapping certificate expires after 7 days.
You must trust and install the wrapping certificate and its certificate chain on the sending HSM and use it to wrap the key under export for WrappedKeyCryptogram generation. Next call ImportKey with KeyMaterial set to KEY_CRYPTOGRAM and provide the ImportToken and KeyAttributes for the key under import.
\n To import working keys using TR-31\n
\nAmazon Web Services Payment Cryptography uses TR-31 symmetric key exchange norm to import working keys. A KEK must be established within Amazon Web Services Payment Cryptography by using TR-34 key import or by using CreateKey. To initiate a TR-31 key import, set the following parameters:
\n\n KeyMaterial: Use Tr31KeyBlock parameters.
\n WrappedKeyBlock: The TR-31 wrapped key material. It contains the key under import, encrypted using KEK. The TR-31 key block is typically generated by a HSM outside of Amazon Web Services Payment Cryptography.
\n WrappingKeyIdentifier: The KeyArn of the KEK that Amazon Web Services Payment Cryptography uses to decrypt or unwrap the key under import.
\n Cross-account use: This operation can't be used across different Amazon Web Services accounts.
\n\n Related operations:\n
\n\n ExportKey\n
\nImports symmetric keys and public key certificates in PEM format (base64 encoded) into Amazon Web Services Payment Cryptography.
\nAmazon Web Services Payment Cryptography simplifies key exchange by replacing the existing paper-based approach with a modern electronic approach. With ImportKey you can import symmetric keys using either symmetric and asymmetric key exchange mechanisms.
For symmetric key exchange, Amazon Web Services Payment Cryptography uses the ANSI X9 TR-31 norm in accordance with PCI PIN guidelines. And for asymmetric key exchange, Amazon Web Services Payment Cryptography supports ANSI X9 TR-34 norm and RSA wrap and unwrap key exchange mechanisms. Asymmetric key exchange methods are typically used to establish bi-directional trust between the two parties exhanging keys and are used for initial key exchange such as Key Encryption Key (KEK) or Zone Master Key (ZMK). After which you can import working keys using symmetric method to perform various cryptographic operations within Amazon Web Services Payment Cryptography.
\nThe TR-34 norm is intended for exchanging 3DES keys only and keys are imported in a WrappedKeyBlock format. Key attributes (such as KeyUsage, KeyAlgorithm, KeyModesOfUse, Exportability) are contained within the key block. With RSA wrap and unwrap, you can exchange both 3DES and AES-128 keys. The keys are imported in a WrappedKeyCryptogram format and you will need to specify the key attributes during import.
\nYou can also import a root public key certificate, used to sign other public key certificates, or a trusted public key certificate under an already established root public key certificate.
\n\n To import a public root key certificate\n
\nUsing this operation, you can import the public component (in PEM cerificate format) of your private root key. You can use the imported public root key certificate for digital signatures, for example signing wrapping key or signing key in TR-34, within your Amazon Web Services Payment Cryptography account.
\nSet the following parameters:
\n\n KeyMaterial: RootCertificatePublicKey\n
\n KeyClass: PUBLIC_KEY\n
\n KeyModesOfUse: Verify\n
\n KeyUsage: TR31_S0_ASYMMETRIC_KEY_FOR_DIGITAL_SIGNATURE\n
\n PublicKeyCertificate: The public key certificate in PEM format (base64 encoded) of the private root key under import.
\n To import a trusted public key certificate\n
\nThe root public key certificate must be in place and operational before you import a trusted public key certificate. Set the following parameters:
\n\n KeyMaterial: TrustedCertificatePublicKey\n
\n CertificateAuthorityPublicKeyIdentifier: KeyArn of the RootCertificatePublicKey.
\n KeyModesOfUse and KeyUsage: Corresponding to the cryptographic operations such as wrap, sign, or encrypt that you will allow the trusted public key certificate to perform.
\n PublicKeyCertificate: The trusted public key certificate in PEM format (base64 encoded) under import.
\n To import initial keys (KEK or ZMK or similar) using TR-34\n
\nUsing this operation, you can import initial key using TR-34 asymmetric key exchange. In TR-34 terminology, the sending party of the key is called Key Distribution Host (KDH) and the receiving party of the key is called Key Receiving Device (KRD). During the key import process, KDH is the user who initiates the key import and KRD is Amazon Web Services Payment Cryptography who receives the key.
\nTo initiate TR-34 key import, the KDH must obtain an import token by calling GetParametersForImport. This operation generates an encryption keypair for the purpose of key import, signs the key and returns back the wrapping key certificate (also known as KRD wrapping certificate) and the root certificate chain. The KDH must trust and install the KRD wrapping certificate on its HSM and use it to encrypt (wrap) the KDH key during TR-34 WrappedKeyBlock generation. The import token and associated KRD wrapping certificate expires after 7 days.
\nNext the KDH generates a key pair for the purpose of signing the encrypted KDH key and provides the public certificate of the signing key to Amazon Web Services Payment Cryptography. The KDH will also need to import the root certificate chain of the KDH signing certificate by calling ImportKey for RootCertificatePublicKey. For more information on TR-34 key import, see section Importing symmetric keys in the Amazon Web Services Payment Cryptography User Guide.
Set the following parameters:
\n\n KeyMaterial: Use Tr34KeyBlock parameters.
\n CertificateAuthorityPublicKeyIdentifier: The KeyARN of the certificate chain that signed the KDH signing key certificate.
\n ImportToken: Obtained from KRD by calling GetParametersForImport.
\n WrappedKeyBlock: The TR-34 wrapped key material from KDH. It contains the KDH key under import, wrapped with KRD wrapping certificate and signed by KDH signing private key. This TR-34 key block is typically generated by the KDH Hardware Security Module (HSM) outside of Amazon Web Services Payment Cryptography.
\n SigningKeyCertificate: The public key certificate in PEM format (base64 encoded) of the KDH signing key generated under the root certificate (CertificateAuthorityPublicKeyIdentifier) imported in Amazon Web Services Payment Cryptography.
\n To import initial keys (KEK or ZMK or similar) using RSA Wrap and Unwrap\n
\nUsing this operation, you can import initial key using asymmetric RSA wrap and unwrap key exchange method. To initiate import, call GetParametersForImport with KeyMaterial set to KEY_CRYPTOGRAM to generate an import token. This operation also generates an encryption keypair for the purpose of key import, signs the key and returns back the wrapping key certificate in PEM format (base64 encoded) and its root certificate chain. The import token and associated KRD wrapping certificate expires after 7 days.
You must trust and install the wrapping certificate and its certificate chain on the sending HSM and use it to wrap the key under export for WrappedKeyCryptogram generation. Next call ImportKey with KeyMaterial set to KEY_CRYPTOGRAM and provide the ImportToken and KeyAttributes for the key under import.
\n To import working keys using TR-31\n
\nAmazon Web Services Payment Cryptography uses TR-31 symmetric key exchange norm to import working keys. A KEK must be established within Amazon Web Services Payment Cryptography by using TR-34 key import or by using CreateKey. To initiate a TR-31 key import, set the following parameters:
\n\n KeyMaterial: Use Tr31KeyBlock parameters.
\n WrappedKeyBlock: The TR-31 wrapped key material. It contains the key under import, encrypted using KEK. The TR-31 key block is typically generated by a HSM outside of Amazon Web Services Payment Cryptography.
\n WrappingKeyIdentifier: The KeyArn of the KEK that Amazon Web Services Payment Cryptography uses to decrypt or unwrap the key under import.
\n Cross-account use: This operation can't be used across different Amazon Web Services accounts.
\n\n Related operations:\n
\n\n ExportKey\n
\nParameter information for key material import using asymmetric RSA wrap and unwrap key exchange method.
" } + }, + "DiffieHellmanTr31KeyBlock": { + "target": "com.amazonaws.paymentcryptography#ImportDiffieHellmanTr31KeyBlock", + "traits": { + "smithy.api#documentation": "Parameter information for key material import using the asymmetric ECDH key exchange method.
" + } } }, "traits": { @@ -1514,6 +1757,16 @@ "UnstableTrait" ] } + }, + "DeriveKeyUsage": { + "target": "com.amazonaws.paymentcryptography#DeriveKeyUsage", + "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, + "smithy.api#documentation": "The cryptographic usage of an ECDH derived key as defined in section A.5.2 of the TR-31 spec.
", + "smithy.api#suppress": [ + "UnstableTrait" + ] + } } }, "traits": { @@ -1572,6 +1825,10 @@ { "value": "ECC_NIST_P384", "name": "ECC_NIST_P384" + }, + { + "value": "ECC_NIST_P521", + "name": "ECC_NIST_P521" } ] } @@ -1709,6 +1966,46 @@ ] } }, + "com.amazonaws.paymentcryptography#KeyDerivationFunction": { + "type": "enum", + "members": { + "NIST_SP800": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NIST_SP800" + } + }, + "ANSI_X963": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ANSI_X963" + } + } + } + }, + "com.amazonaws.paymentcryptography#KeyDerivationHashAlgorithm": { + "type": "enum", + "members": { + "SHA_256": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SHA_256" + } + }, + "SHA_384": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SHA_384" + } + }, + "SHA_512": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SHA_512" + } + } + } + }, "com.amazonaws.paymentcryptography#KeyExportability": { "type": "string", "traits": { @@ -3266,6 +3563,16 @@ "smithy.api#httpError": 503 } }, + "com.amazonaws.paymentcryptography#SharedInformation": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 2, + "max": 2048 + }, + "smithy.api#pattern": "^(?:[0-9a-fA-F][0-9a-fA-F])+$" + } + }, "com.amazonaws.paymentcryptography#StartKeyUsage": { "type": "operation", "input": { @@ -3412,6 +3719,41 @@ "smithy.api#output": {} } }, + "com.amazonaws.paymentcryptography#SymmetricKeyAlgorithm": { + "type": "enum", + "members": { + "TDES_2KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TDES_2KEY" + } + }, + "TDES_3KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TDES_3KEY" + } + }, + "AES_128": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AES_128" + } + }, + "AES_192": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AES_192" + } + }, + "AES_256": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AES_256" + } + } + } + }, "com.amazonaws.paymentcryptography#Tag": { "type": "structure", "members": { diff --git a/codegen/sdk/aws-models/pcs.json b/codegen/sdk/aws-models/pcs.json index 72cc849b29d..c791e9d5a67 100644 --- a/codegen/sdk/aws-models/pcs.json +++ b/codegen/sdk/aws-models/pcs.json @@ -858,7 +858,7 @@ "com.amazonaws.pcs#ClusterIdentifier": { "type": "string", "traits": { - "smithy.api#pattern": "^(pcs_[a-zA-Z0-9]+|[A-Za-z][A-Za-z0-9-]{1,40})$" + "smithy.api#pattern": "^(pcs_[a-zA-Z0-9]+|[A-Za-z][A-Za-z0-9-]{2,40})$" } }, "com.amazonaws.pcs#ClusterList": { @@ -871,10 +871,10 @@ "type": "string", "traits": { "smithy.api#length": { - "min": 1, + "min": 3, "max": 40 }, - "smithy.api#pattern": "^(?!pcs_)^(?![A-Za-z0-9]{10}$)[A-Za-z][A-Za-z0-9-]+$" + "smithy.api#pattern": "^(?!pcs_)^[A-Za-z][A-Za-z0-9-]+$" } }, "com.amazonaws.pcs#ClusterResource": { @@ -1213,7 +1213,7 @@ "com.amazonaws.pcs#ComputeNodeGroupIdentifier": { "type": "string", "traits": { - "smithy.api#pattern": "^(pcs_[a-zA-Z0-9]+|[A-Za-z][A-Za-z0-9-]{1,25})$" + "smithy.api#pattern": "^(pcs_[a-zA-Z0-9]+|[A-Za-z][A-Za-z0-9-]{2,25})$" } }, "com.amazonaws.pcs#ComputeNodeGroupList": { @@ -1226,10 +1226,10 @@ "type": "string", "traits": { "smithy.api#length": { - "min": 1, + "min": 3, "max": 25 }, - "smithy.api#pattern": "^(?!pcs_)^(?![A-Za-z0-9]{10}$)[A-Za-z][A-Za-z0-9-]+$" + "smithy.api#pattern": "^(?!pcs_)^[A-Za-z][A-Za-z0-9-]+$" } }, "com.amazonaws.pcs#ComputeNodeGroupResource": { @@ -2142,6 +2142,23 @@ "smithy.api#readonly": {}, "smithy.api#tags": [ "Cluster" + ], + "smithy.test#smokeTests": [ + { + "id": "GetCluster_AccessDeniedException", + "params": { + "clusterIdentifier": "pcs_donotexist" + }, + "expect": { + "failure": { + "errorId": "com.amazonaws.pcs#AccessDeniedException" + } + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-west-2" + } + } ] } }, @@ -2207,6 +2224,24 @@ "smithy.api#readonly": {}, "smithy.api#tags": [ "ComputeNodeGroup" + ], + "smithy.test#smokeTests": [ + { + "id": "GetComputeNodeGroup_AccessDeniedException", + "params": { + "clusterIdentifier": "pcs_donotexist", + "computeNodeGroupIdentifier": "pcs_donotexist" + }, + "expect": { + "failure": { + "errorId": "com.amazonaws.pcs#AccessDeniedException" + } + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-west-2" + } + } ] } }, @@ -2276,6 +2311,24 @@ "smithy.api#readonly": {}, "smithy.api#tags": [ "Queue" + ], + "smithy.test#smokeTests": [ + { + "id": "GetQueueFailure_AccessDeniedException", + "params": { + "clusterIdentifier": "pcs_donotexist", + "queueIdentifier": "pcs_donotexist" + }, + "expect": { + "failure": { + "errorId": "com.amazonaws.pcs#AccessDeniedException" + } + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-west-2" + } + } ] } }, @@ -2394,6 +2447,19 @@ "smithy.api#readonly": {}, "smithy.api#tags": [ "Cluster" + ], + "smithy.test#smokeTests": [ + { + "id": "ListClustersSuccess", + "params": {}, + "expect": { + "success": {} + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-west-2" + } + } ] } }, @@ -2480,6 +2546,23 @@ "smithy.api#readonly": {}, "smithy.api#tags": [ "ComputeNodeGroup" + ], + "smithy.test#smokeTests": [ + { + "id": "ListComputeNodeGroups_AccessDeniedException", + "params": { + "clusterIdentifier": "pcs_donotexist" + }, + "expect": { + "failure": { + "errorId": "com.amazonaws.pcs#AccessDeniedException" + } + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-west-2" + } + } ] } }, @@ -2571,6 +2654,23 @@ "smithy.api#readonly": {}, "smithy.api#tags": [ "Queue" + ], + "smithy.test#smokeTests": [ + { + "id": "ListQueueFailure_AccessDeniedException", + "params": { + "clusterIdentifier": "pcs_donotexist" + }, + "expect": { + "failure": { + "errorId": "com.amazonaws.pcs#AccessDeniedException" + } + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-west-2" + } + } ] } }, @@ -2837,7 +2937,7 @@ "com.amazonaws.pcs#QueueIdentifier": { "type": "string", "traits": { - "smithy.api#pattern": "^(pcs_[a-zA-Z0-9]+|[A-Za-z][A-Za-z0-9-]{1,25})$" + "smithy.api#pattern": "^(pcs_[a-zA-Z0-9]+|[A-Za-z][A-Za-z0-9-]{2,25})$" } }, "com.amazonaws.pcs#QueueList": { @@ -2850,10 +2950,10 @@ "type": "string", "traits": { "smithy.api#length": { - "min": 1, + "min": 3, "max": 25 }, - "smithy.api#pattern": "^(?!pcs_)^(?![A-Za-z0-9]{10}$)[A-Za-z][A-Za-z0-9-]+$" + "smithy.api#pattern": "^(?!pcs_)^[A-Za-z][A-Za-z0-9-]+$" } }, "com.amazonaws.pcs#QueueResource": { @@ -3476,6 +3576,9 @@ "errors": [ { "target": "com.amazonaws.pcs#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pcs#ServiceQuotaExceededException" } ], "traits": { diff --git a/codegen/sdk/aws-models/personalize.json b/codegen/sdk/aws-models/personalize.json index cb76424f808..74a44951f95 100644 --- a/codegen/sdk/aws-models/personalize.json +++ b/codegen/sdk/aws-models/personalize.json @@ -1851,7 +1851,8 @@ "smithy.api#length": { "min": 0, "max": 150 - } + }, + "smithy.api#pattern": "^[A-Za-z_][A-Za-z\\d_]*$" } }, "com.amazonaws.personalize#ColumnNamesList": { @@ -4295,7 +4296,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a dataset. You can't delete a dataset if an associated\n DatasetImportJob or SolutionVersion is in the\n CREATE PENDING or IN PROGRESS state. For more information on datasets, see\n CreateDataset.
Deletes a dataset. You can't delete a dataset if an associated\n DatasetImportJob or SolutionVersion is in the\n CREATE PENDING or IN PROGRESS state. For more information about deleting datasets,\n see Deleting a dataset.\n
The name of the event type to be considered for solution creation.
" + } + }, + "eventValueThreshold": { + "target": "com.amazonaws.personalize#EventTypeThresholdValue", + "traits": { + "smithy.api#documentation": "The threshold of the event type. Only events with a value greater or equal to this threshold will be considered for solution creation.
" + } + }, + "weight": { + "target": "com.amazonaws.personalize#EventTypeWeight", + "traits": { + "smithy.api#documentation": "The weight of the event type. A higher weight means higher importance of the event type for the created solution.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes the parameters of events, which are used in solution creation.
" + } + }, + "com.amazonaws.personalize#EventParametersList": { + "type": "list", + "member": { + "target": "com.amazonaws.personalize#EventParameters" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, "com.amazonaws.personalize#EventTracker": { "type": "structure", "members": { @@ -5632,6 +5671,18 @@ } } }, + "com.amazonaws.personalize#EventTypeThresholdValue": { + "type": "double" + }, + "com.amazonaws.personalize#EventTypeWeight": { + "type": "double", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 1 + } + } + }, "com.amazonaws.personalize#EventValueThreshold": { "type": "string", "traits": { @@ -5641,6 +5692,20 @@ } } }, + "com.amazonaws.personalize#EventsConfig": { + "type": "structure", + "members": { + "eventParametersList": { + "target": "com.amazonaws.personalize#EventParametersList", + "traits": { + "smithy.api#documentation": "A list of event parameters, which includes event types and their event value thresholds and weights.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes the configuration of events, which are used in solution creation.
" + } + }, "com.amazonaws.personalize#ExcludedDatasetColumns": { "type": "map", "key": { @@ -8447,6 +8512,12 @@ "smithy.api#documentation": "The AutoMLConfig object containing a list of recipes to search\n when AutoML is performed.
" } }, + "eventsConfig": { + "target": "com.amazonaws.personalize#EventsConfig", + "traits": { + "smithy.api#documentation": "Describes the configuration of an event, which includes a list of event parameters. You can specify up to 10 event parameters. Events are used in solution creation.
" + } + }, "optimizationObjective": { "target": "com.amazonaws.personalize#OptimizationObjective", "traits": { @@ -8519,6 +8590,12 @@ "members": { "autoTrainingConfig": { "target": "com.amazonaws.personalize#AutoTrainingConfig" + }, + "eventsConfig": { + "target": "com.amazonaws.personalize#EventsConfig", + "traits": { + "smithy.api#documentation": "Describes the configuration of an event, which includes a list of event parameters. You can specify up to 10 event parameters. Events are used in solution creation.
" + } } }, "traits": { @@ -8938,7 +9015,8 @@ "min": 1, "max": 128 }, - "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$", + "smithy.api#sensitive": {} } }, "com.amazonaws.personalize#TagKeys": { @@ -9018,7 +9096,8 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$", + "smithy.api#sensitive": {} } }, "com.amazonaws.personalize#Tags": { diff --git a/codegen/sdk/aws-models/polly.json b/codegen/sdk/aws-models/polly.json index 21deabd0933..7eea92e0b34 100644 --- a/codegen/sdk/aws-models/polly.json +++ b/codegen/sdk/aws-models/polly.json @@ -3559,6 +3559,12 @@ "traits": { "smithy.api#enumValue": "Jasmine" } + }, + "Jihye": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Jihye" + } } } }, diff --git a/codegen/sdk/aws-models/qbusiness.json b/codegen/sdk/aws-models/qbusiness.json index a98c232dc88..0b35aeb506f 100644 --- a/codegen/sdk/aws-models/qbusiness.json +++ b/codegen/sdk/aws-models/qbusiness.json @@ -6845,6 +6845,12 @@ "traits": { "smithy.api#documentation": "If the maxResults response was incomplete because there is more data to\n retrieve, Amazon Q Business returns a pagination token in the response. You can use this\n pagination token to retrieve the next set of Amazon Q Business chat controls\n configured.
The hallucination reduction settings for your application.
" + } } }, "traits": { @@ -8209,6 +8215,37 @@ "target": "com.amazonaws.qbusiness#GroupSummary" } }, + "com.amazonaws.qbusiness#HallucinationReductionConfiguration": { + "type": "structure", + "members": { + "hallucinationReductionControl": { + "target": "com.amazonaws.qbusiness#HallucinationReductionControl", + "traits": { + "smithy.api#documentation": "Controls whether hallucination reduction has been enabled or disabled for your\n application. The default status is DISABLED.
Configuration information required to setup hallucination reduction. For more\n information, see hallucination reduction.
\nThe hallucination reduction feature won't work if chat orchestration controls are\n enabled for your application.
\nThe type of AI-generated message in a TextOutputEvent. Amazon Q Business\n currently supports two types of messages:
\n RESPONSE - The Amazon Q Business system response.
\n GROUNDED_RESPONSE - The corrected, hallucination-reduced,\n response returned by Amazon Q Business. Available only if hallucination reduction is\n supported and configured for the application and detected in the end user chat\n query by Amazon Q Business.
The configuration details for CREATOR_MODE.
The hallucination reduction settings for your application.
" + } } }, "traits": { @@ -14557,6 +14623,9 @@ { "target": "com.amazonaws.qbusiness#AccessDeniedException" }, + { + "target": "com.amazonaws.qbusiness#ConflictException" + }, { "target": "com.amazonaws.qbusiness#InternalServerException" }, diff --git a/codegen/sdk/aws-models/qconnect.json b/codegen/sdk/aws-models/qconnect.json index 193ad796261..52f4aaa5c1f 100644 --- a/codegen/sdk/aws-models/qconnect.json +++ b/codegen/sdk/aws-models/qconnect.json @@ -850,7 +850,7 @@ "modelId": { "target": "com.amazonaws.qconnect#AIPromptModelIdentifier", "traits": { - "smithy.api#documentation": "The identifier of the model used for this AI Prompt. Model Ids supported are:\n CLAUDE_3_HAIKU_20240307_V1.
The identifier of the model used for this AI Prompt. Model Ids supported are:\n anthropic.claude-3-haiku-20240307-v1:0.
The identifier of the model used for this AI Prompt. Model Ids supported are:\n CLAUDE_3_HAIKU_20240307_V1.
The identifier of the model used for this AI Prompt. Model Ids supported are:\n anthropic.claude-3-haiku-20240307-v1:0.
The locale to which specifies the language and region settings that determine the response\n language for QueryAssistant.
\nChanging this locale to anything other than en_US will turn off\n recommendations triggered by contact transcripts for agent assistance, as this feature is\n not supported in multiple languages.
The locale to which specifies the language and region settings that determine the response\n language for QueryAssistant.
\nChanging this locale to anything other than en_US, en_GB, or\n en_AU will turn off recommendations triggered by contact transcripts for\n agent assistance, as this feature is not supported in multiple languages.
The identifier of the model used for this AI Prompt. Model Ids supported are:\n CLAUDE_3_HAIKU_20240307_V1\n
The identifier of the model used for this AI Prompt. Model Ids supported are:\n anthropic.claude-3-haiku-20240307-v1:0\n
Removes the AI Agent that is set for use by defafult on an Amazon Q in Connect\n Assistant.
", + "smithy.api#documentation": "Removes the AI Agent that is set for use by default on an Amazon Q in Connect\n Assistant.
", "smithy.api#http": { "uri": "/assistants/{assistantId}/aiagentConfiguration", "method": "DELETE", @@ -14028,7 +14028,7 @@ } }, "traits": { - "smithy.api#documentation": "The conversation history data to included in conversation context data before the the\n Amazon Q in Connect session..
" + "smithy.api#documentation": "The conversation history data to included in conversation context data before the\n Amazon Q in Connect session.
" } }, "com.amazonaws.qconnect#SelfServiceConversationHistoryList": { @@ -15407,7 +15407,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the AI Agent that is set for use by defafult on an Amazon Q in Connect\n Assistant.
", + "smithy.api#documentation": "Updates the AI Agent that is set for use by default on an Amazon Q in Connect\n Assistant.
", "smithy.api#http": { "uri": "/assistants/{assistantId}/aiagentConfiguration", "method": "POST" diff --git a/codegen/sdk/aws-models/quicksight.json b/codegen/sdk/aws-models/quicksight.json index f8962fea7dc..3034fc8ee66 100644 --- a/codegen/sdk/aws-models/quicksight.json +++ b/codegen/sdk/aws-models/quicksight.json @@ -586,6 +586,52 @@ "smithy.api#documentation": "The parameters for OpenSearch.
" } }, + "com.amazonaws.quicksight#AmazonQInQuickSightConsoleConfigurations": { + "type": "structure", + "members": { + "DataQnA": { + "target": "com.amazonaws.quicksight#DataQnAConfigurations", + "traits": { + "smithy.api#documentation": "Adds generative Q&A capabilitiees to an embedded Amazon QuickSight console.
" + } + }, + "GenerativeAuthoring": { + "target": "com.amazonaws.quicksight#GenerativeAuthoringConfigurations", + "traits": { + "smithy.api#documentation": "Adds the generative BI authoring experience to an embedded Amazon QuickSight console.
" + } + }, + "ExecutiveSummary": { + "target": "com.amazonaws.quicksight#ExecutiveSummaryConfigurations", + "traits": { + "smithy.api#documentation": "Adds the executive summaries feature to an embedded Amazon QuickSight console.
" + } + }, + "DataStories": { + "target": "com.amazonaws.quicksight#DataStoriesConfigurations", + "traits": { + "smithy.api#documentation": "Adds the data stories feature to an embedded Amazon QuickSight console.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A collection of Amazon Q feature configurations in an embedded Amazon QuickSight console.
" + } + }, + "com.amazonaws.quicksight#AmazonQInQuickSightDashboardConfigurations": { + "type": "structure", + "members": { + "ExecutiveSummary": { + "target": "com.amazonaws.quicksight#ExecutiveSummaryConfigurations", + "traits": { + "smithy.api#documentation": "A generated executive summary of an embedded Amazon QuickSight dashboard.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A collection of Amazon Q feature configurations in an embedded Amazon QuickSight dashboard.
" + } + }, "com.amazonaws.quicksight#Analysis": { "type": "structure", "members": { @@ -1677,6 +1723,12 @@ "traits": { "smithy.api#enumValue": "Name" } + }, + "REFRESH_FAILURE_EMAIL_ALERT_STATUS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RefreshFailureEmailAlertStatus" + } } } }, @@ -2528,6 +2580,9 @@ "traits": { "smithy.api#documentation": "A new name for the dataset.
" } + }, + "DataSetRefreshProperties": { + "target": "com.amazonaws.quicksight#DataSetRefreshProperties" } }, "traits": { @@ -3574,6 +3629,24 @@ "traits": { "smithy.api#documentation": "Determines the week start day for an analysis.
" } + }, + "QBusinessInsightsStatus": { + "target": "com.amazonaws.quicksight#QBusinessInsightsStatus", + "traits": { + "smithy.api#documentation": "Determines whether insight summaries from Amazon Q Business are allowed in Dashboard Q&A.
" + } + }, + "ExcludedDataSetArns": { + "target": "com.amazonaws.quicksight#DataSetArnsList", + "traits": { + "smithy.api#documentation": "A list of dataset ARNS to exclude from Dashboard Q&A.
" + } + }, + "CustomActionDefaults": { + "target": "com.amazonaws.quicksight#VisualCustomActionDefaults", + "traits": { + "smithy.api#documentation": "A list of visual custom actions for the analysis.
" + } } }, "traits": { @@ -9147,6 +9220,12 @@ "traits": { "smithy.api#documentation": "The configuration for the performance optimization of the dataset that contains a UniqueKey configuration.
The usage of the dataset. RLS_RULES must be specified for RLS permission datasets.
(Enterprise edition only) Creates a new namespace for you to use with Amazon QuickSight.
\nA namespace allows you to isolate the Amazon QuickSight users and groups that are registered\n for that namespace. Users that access the namespace can share assets only with other\n users or groups in the same namespace. They can't see users and groups in other\n namespaces. You can create a namespace after your Amazon Web Services account is subscribed to\n Amazon QuickSight. The namespace must be unique within the Amazon Web Services account. By default, there is a\n limit of 100 namespaces per Amazon Web Services account. To increase your limit, create a ticket with\n Amazon Web Services Support.
", + "smithy.api#documentation": "(Enterprise edition only) Creates a new namespace for you to use with Amazon QuickSight.
\nA namespace allows you to isolate the Amazon QuickSight users and groups that are registered\n for that namespace. Users that access the namespace can share assets only with other\n users or groups in the same namespace. They can't see users and groups in other\n namespaces. You can create a namespace after your Amazon Web Services account is subscribed to\n Amazon QuickSight. The namespace must be unique within the Amazon Web Services account. By default, there is a\n limit of 100 namespaces per Amazon Web Services account. To increase your limit, create a ticket with\n Amazon Web ServicesSupport.
", "smithy.api#http": { "method": "POST", "uri": "/accounts/{AwsAccountId}", @@ -12350,6 +12429,12 @@ "traits": { "smithy.api#documentation": "The data point tool tip options of a dashboard.
" } + }, + "DataQAEnabledOption": { + "target": "com.amazonaws.quicksight#DataQAEnabledOption", + "traits": { + "smithy.api#documentation": "Adds Q&A capabilities to an Amazon QuickSight dashboard. If no topic is linked, Dashboard Q&A uses the data values that are rendered on the dashboard. End users can use Dashboard Q&A to ask for different slices of the data that they see on the dashboard. If a topic is linked, Topic Q&A is used.
" + } } }, "traits": { @@ -13319,6 +13404,36 @@ "smithy.api#documentation": "The data point tooltip options.
" } }, + "com.amazonaws.quicksight#DataQAEnabledOption": { + "type": "structure", + "members": { + "AvailabilityStatus": { + "target": "com.amazonaws.quicksight#DashboardBehavior", + "traits": { + "smithy.api#documentation": "The status of the Data Q&A option on the dashboard.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Adds Q&A capabilities to a dashboard. If no topic is linked, Dashboard Q&A uses the data values that are rendered on the dashboard. End users can use Dashboard Q&A to ask for different slices of the data that they see on the dashboard. If a topic is linked, Topic Q&A is enabled.
" + } + }, + "com.amazonaws.quicksight#DataQnAConfigurations": { + "type": "structure", + "members": { + "Enabled": { + "target": "com.amazonaws.quicksight#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "The generative Q&A settings of an embedded Amazon QuickSight console.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The generative Q&A settings of an embedded Amazon QuickSight console.
" + } + }, "com.amazonaws.quicksight#DataSet": { "type": "structure", "members": { @@ -13430,6 +13545,12 @@ "traits": { "smithy.api#documentation": "The performance optimization configuration of a dataset.
" } + }, + "UseAs": { + "target": "com.amazonaws.quicksight#DataSetUseAs", + "traits": { + "smithy.api#documentation": "The usage of the dataset.
" + } } }, "traits": { @@ -13645,8 +13766,13 @@ "RefreshConfiguration": { "target": "com.amazonaws.quicksight#RefreshConfiguration", "traits": { - "smithy.api#documentation": "The refresh configuration for a dataset.
", - "smithy.api#required": {} + "smithy.api#documentation": "The refresh configuration for a dataset.
" + } + }, + "FailureConfiguration": { + "target": "com.amazonaws.quicksight#RefreshFailureConfiguration", + "traits": { + "smithy.api#documentation": "The failure configuration for a dataset.
" } } }, @@ -13776,6 +13902,12 @@ "smithy.api#default": false, "smithy.api#documentation": "A value that indicates if the dataset has column level permission configured.
" } + }, + "UseAs": { + "target": "com.amazonaws.quicksight#DataSetUseAs", + "traits": { + "smithy.api#documentation": "The usage of the dataset.
" + } } }, "traits": { @@ -13810,6 +13942,17 @@ "smithy.api#documentation": "The usage configuration to apply to child datasets that reference this dataset as a source.
" } }, + "com.amazonaws.quicksight#DataSetUseAs": { + "type": "enum", + "members": { + "RLS_RULES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RLS_RULES" + } + } + } + }, "com.amazonaws.quicksight#DataSource": { "type": "structure", "members": { @@ -14477,6 +14620,22 @@ } } }, + "com.amazonaws.quicksight#DataStoriesConfigurations": { + "type": "structure", + "members": { + "Enabled": { + "target": "com.amazonaws.quicksight#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "The data story settings of an embedded Amazon QuickSight console.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The data story settings of an embedded Amazon QuickSight console.
" + } + }, "com.amazonaws.quicksight#Database": { "type": "string", "traits": { @@ -24794,6 +24953,22 @@ "smithy.api#documentation": "The exclude period of TimeRangeFilter or RelativeDatesFilter.
The executive summary settings of an embedded Amazon QuickSight console or dashboard.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The executive summary settings of an embedded Amazon QuickSight console or dashboard.
" + } + }, "com.amazonaws.quicksight#ExplicitHierarchy": { "type": "structure", "members": { @@ -24920,7 +25095,7 @@ "target": "com.amazonaws.quicksight#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "A boolean that indicates whether a FailedKeyRegistrationEntry resulted from user error. If the value of this property is True, the error was caused by user error. If the value of this property is False, the error occurred on the backend. If your job continues fail and with a False\n SenderFault value, contact Amazon Web Services Support.
A boolean that indicates whether a FailedKeyRegistrationEntry resulted from user error. If the value of this property is True, the error was caused by user error. If the value of this property is False, the error occurred on the backend. If your job continues fail and with a False\n SenderFault value, contact Amazon Web ServicesSupport.
The generative BI authoring settings of an embedded Amazon QuickSight console.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The generative BI authoring settings of an embedded Amazon QuickSight console.
" + } + }, "com.amazonaws.quicksight#GeoSpatialColumnGroup": { "type": "structure", "members": { @@ -39555,6 +39746,13 @@ "smithy.api#documentation": "The database.
", "smithy.api#required": {} } + }, + "UseServiceName": { + "target": "com.amazonaws.quicksight#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "A Boolean value that indicates whether the Database uses a service name or an SID. If this value is left blank, the default value is SID. If this value is set to false, the value is SID.
Predicts existing visuals or generates new visuals to answer a given query.
", + "smithy.api#documentation": "Predicts existing visuals or generates new visuals to answer a given query.
\nThis API uses trusted identity propagation to ensure that an end user is authenticated and receives the embed URL that is specific to that user. The IAM Identity Center application that the user has logged into needs to have trusted Identity Propagation enabled for Amazon QuickSight with the scope value set to quicksight:read. Before you use this action, make sure that you have configured the relevant Amazon QuickSight resource and permissions.
We recommend enabling the QSearchStatus API to unlock the full potential of PredictQnA. When QSearchStatus is enabled, it first checks the specified dashboard for any existing visuals that match the question. If no matching visuals are found, PredictQnA uses generative Q&A to provide an answer. To update the QSearchStatus, see UpdateQuickSightQSearchConfiguration.
The parameters for Amazon RDS.
" } }, + "com.amazonaws.quicksight#RecentSnapshotsConfigurations": { + "type": "structure", + "members": { + "Enabled": { + "target": "com.amazonaws.quicksight#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "The recent snapshots configuration for an embedded Amazon QuickSight dashboard.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The recent snapshots configuration for an embedded Amazon QuickSight dashboard.
" + } + }, "com.amazonaws.quicksight#RecoveryWindowInDays": { "type": "long", "traits": { @@ -44916,6 +45147,51 @@ "smithy.api#documentation": "The refresh configuration of a dataset.
" } }, + "com.amazonaws.quicksight#RefreshFailureAlertStatus": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.quicksight#RefreshFailureConfiguration": { + "type": "structure", + "members": { + "EmailAlert": { + "target": "com.amazonaws.quicksight#RefreshFailureEmailAlert", + "traits": { + "smithy.api#documentation": "The email alert configuration for a dataset refresh failure.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The failure configuration of a dataset.
" + } + }, + "com.amazonaws.quicksight#RefreshFailureEmailAlert": { + "type": "structure", + "members": { + "AlertStatus": { + "target": "com.amazonaws.quicksight#RefreshFailureAlertStatus", + "traits": { + "smithy.api#documentation": "The status value that determines if email alerts are sent.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The configuration settings for the email alerts that are sent when a dataset refresh fails.
" + } + }, "com.amazonaws.quicksight#RefreshFrequency": { "type": "structure", "members": { @@ -45248,6 +45524,30 @@ "traits": { "smithy.api#documentation": "The shared view settings of an embedded dashboard.
" } + }, + "AmazonQInQuickSight": { + "target": "com.amazonaws.quicksight#AmazonQInQuickSightConsoleConfigurations", + "traits": { + "smithy.api#documentation": "The Amazon Q configurations of an embedded Amazon QuickSight console.
" + } + }, + "Schedules": { + "target": "com.amazonaws.quicksight#SchedulesConfigurations", + "traits": { + "smithy.api#documentation": "The schedules configuration for an embedded Amazon QuickSight dashboard.
" + } + }, + "RecentSnapshots": { + "target": "com.amazonaws.quicksight#RecentSnapshotsConfigurations", + "traits": { + "smithy.api#documentation": "The recent snapshots configuration for an embedded Amazon QuickSight dashboard.
" + } + }, + "ThresholdAlerts": { + "target": "com.amazonaws.quicksight#ThresholdAlertsConfigurations", + "traits": { + "smithy.api#documentation": "The threshold alerts configuration for an embedded Amazon QuickSight dashboard.
" + } } }, "traits": { @@ -45295,6 +45595,30 @@ "traits": { "smithy.api#documentation": "The bookmarks configuration for an embedded dashboard in Amazon QuickSight.
" } + }, + "AmazonQInQuickSight": { + "target": "com.amazonaws.quicksight#AmazonQInQuickSightDashboardConfigurations", + "traits": { + "smithy.api#documentation": "The Amazon Q configurations of an embedded Amazon QuickSight dashboard.
" + } + }, + "Schedules": { + "target": "com.amazonaws.quicksight#SchedulesConfigurations", + "traits": { + "smithy.api#documentation": "The schedules configuration for an embedded Amazon QuickSight dashboard.
" + } + }, + "RecentSnapshots": { + "target": "com.amazonaws.quicksight#RecentSnapshotsConfigurations", + "traits": { + "smithy.api#documentation": "The recent snapshots configuration for an Amazon QuickSight embedded dashboard
" + } + }, + "ThresholdAlerts": { + "target": "com.amazonaws.quicksight#ThresholdAlertsConfigurations", + "traits": { + "smithy.api#documentation": "The threshold alerts configuration for an Amazon QuickSight embedded dashboard.
" + } } }, "traits": { @@ -46893,6 +47217,22 @@ "smithy.api#documentation": "The refresh on entity for weekly or monthly schedules.
" } }, + "com.amazonaws.quicksight#SchedulesConfigurations": { + "type": "structure", + "members": { + "Enabled": { + "target": "com.amazonaws.quicksight#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "The schedules configuration for an embedded Amazon QuickSight dashboard.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The schedules configuration for an embedded Amazon QuickSight dashboard.
" + } + }, "com.amazonaws.quicksight#ScrollBarOptions": { "type": "structure", "members": { @@ -48646,6 +48986,12 @@ "traits": { "smithy.api#documentation": "The layout content type of the sheet. Choose one of the following options:
\n\n PAGINATED: Creates a sheet for a paginated report.
\n INTERACTIVE: Creates a sheet for an interactive dashboard.
A list of visual custom actions for the sheet.
" + } } }, "traits": { @@ -51681,6 +52027,12 @@ "traits": { "smithy.api#documentation": "The settings for the pinned columns of a table visual.
" } + }, + "TransposedTableOptions": { + "target": "com.amazonaws.quicksight#TransposedTableOptionList", + "traits": { + "smithy.api#documentation": "The TableOptions of a transposed table.
The options that determine the thousands separator configuration.
" } }, + "com.amazonaws.quicksight#ThresholdAlertsConfigurations": { + "type": "structure", + "members": { + "Enabled": { + "target": "com.amazonaws.quicksight#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "The threshold alerts configuration for an embedded Amazon QuickSight dashboard.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The threshold alerts configuration for an embedded Amazon QuickSight dashboard.
" + } + }, "com.amazonaws.quicksight#ThrottlingException": { "type": "structure", "members": { @@ -55848,6 +56216,73 @@ } } }, + "com.amazonaws.quicksight#TransposedColumnIndex": { + "type": "integer", + "traits": { + "smithy.api#documentation": "The integer value of a column index in the transposed table.
", + "smithy.api#range": { + "min": 0, + "max": 9999 + } + } + }, + "com.amazonaws.quicksight#TransposedColumnType": { + "type": "enum", + "members": { + "ROW_HEADER_COLUMN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ROW_HEADER_COLUMN" + } + }, + "VALUE_COLUMN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VALUE_COLUMN" + } + } + } + }, + "com.amazonaws.quicksight#TransposedTableOption": { + "type": "structure", + "members": { + "ColumnIndex": { + "target": "com.amazonaws.quicksight#TransposedColumnIndex", + "traits": { + "smithy.api#documentation": "The index of a columns in a transposed table. The index range is 0-9999.
" + } + }, + "ColumnWidth": { + "target": "com.amazonaws.quicksight#PixelLength", + "traits": { + "smithy.api#documentation": "The width of a column in a transposed table.
" + } + }, + "ColumnType": { + "target": "com.amazonaws.quicksight#TransposedColumnType", + "traits": { + "smithy.api#documentation": "The column type of the column in a transposed table. Choose one of the following options:
\n\n ROW_HEADER_COLUMN: Refers to the leftmost column of the row header in the transposed table.
\n VALUE_COLUMN: Refers to all value columns in the transposed table.
The column option of the transposed table.
" + } + }, + "com.amazonaws.quicksight#TransposedTableOptionList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#TransposedTableOption" + }, + "traits": { + "smithy.api#documentation": "A list of TransposedTableOption configurations.
A custom action defined on a visual.
" } }, + "com.amazonaws.quicksight#VisualCustomActionDefaults": { + "type": "structure", + "members": { + "highlightOperation": { + "target": "com.amazonaws.quicksight#VisualHighlightOperation", + "traits": { + "smithy.api#documentation": "A list of highlight operations available for visuals in an analysis or sheet.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A list of custom actions applied to visuals in an analysis or sheet.
" + } + }, "com.amazonaws.quicksight#VisualCustomActionList": { "type": "list", "member": { @@ -62360,6 +62809,44 @@ } } }, + "com.amazonaws.quicksight#VisualHighlightOperation": { + "type": "structure", + "members": { + "Trigger": { + "target": "com.amazonaws.quicksight#VisualHighlightTrigger", + "traits": { + "smithy.api#documentation": "Specifies whether a highlight operation is initiated by a click or hover, or whether it's disabled.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Defines what initiates a highlight operation on a visual, such as a click or hover.
" + } + }, + "com.amazonaws.quicksight#VisualHighlightTrigger": { + "type": "enum", + "members": { + "DATA_POINT_CLICK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATA_POINT_CLICK" + } + }, + "DATA_POINT_HOVER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATA_POINT_HOVER" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, "com.amazonaws.quicksight#VisualInteractionOptions": { "type": "structure", "members": { diff --git a/codegen/sdk/aws-models/rds.json b/codegen/sdk/aws-models/rds.json index bf0037ad7d5..7c3339ff2e8 100644 --- a/codegen/sdk/aws-models/rds.json +++ b/codegen/sdk/aws-models/rds.json @@ -27651,7 +27651,7 @@ } ], "traits": { - "smithy.api#documentation": "Restores a DB cluster to an arbitrary point in time. Users can restore to any point\n in time before LatestRestorableTime for up to\n BackupRetentionPeriod days. The target DB cluster is created from the\n source DB cluster with the same configuration as the original DB cluster, except that\n the new DB cluster is created with the default DB security group.
For Aurora, this operation only restores the DB cluster, not the DB instances for that DB\n cluster. You must invoke the CreateDBInstance operation to create DB\n instances for the restored DB cluster, specifying the identifier of the restored DB\n cluster in DBClusterIdentifier. You can create DB instances only after\n the RestoreDBClusterToPointInTime operation has completed and the DB\n cluster is available.
For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.
\nFor more information on Multi-AZ DB clusters, see Multi-AZ DB\n cluster deployments in the Amazon RDS User\n Guide.\n
", + "smithy.api#documentation": "Restores a DB cluster to an arbitrary point in time. Users can restore to any point\n in time before LatestRestorableTime for up to\n BackupRetentionPeriod days. The target DB cluster is created from the\n source DB cluster with the same configuration as the original DB cluster, except that\n the new DB cluster is created with the default DB security group. Unless the \n RestoreType is set to copy-on-write, the restore may occur in a \n different Availability Zone (AZ) from the original DB cluster. The AZ where RDS restores \n the DB cluster depends on the AZs in the specified subnet group.
For Aurora, this operation only restores the DB cluster, not the DB instances for that DB\n cluster. You must invoke the CreateDBInstance operation to create DB\n instances for the restored DB cluster, specifying the identifier of the restored DB\n cluster in DBClusterIdentifier. You can create DB instances only after\n the RestoreDBClusterToPointInTime operation has completed and the DB\n cluster is available.
For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.
\nFor more information on Multi-AZ DB clusters, see Multi-AZ DB\n cluster deployments in the Amazon RDS User\n Guide.\n
", "smithy.api#examples": [ { "title": "To restore a DB cluster to a specified time", diff --git a/codegen/sdk/aws-models/route-53.json b/codegen/sdk/aws-models/route-53.json index f7fcc13e80e..7cd12e93b28 100644 --- a/codegen/sdk/aws-models/route-53.json +++ b/codegen/sdk/aws-models/route-53.json @@ -10920,6 +10920,18 @@ "traits": { "smithy.api#enumValue": "ap-southeast-7" } + }, + "us_gov_east_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "us-gov-east-1" + } + }, + "us_gov_west_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "us-gov-west-1" + } } }, "traits": { diff --git a/codegen/sdk/aws-models/route53-recovery-control-config.json b/codegen/sdk/aws-models/route53-recovery-control-config.json index c7bebe1a8cd..622b76cd575 100644 --- a/codegen/sdk/aws-models/route53-recovery-control-config.json +++ b/codegen/sdk/aws-models/route53-recovery-control-config.json @@ -181,6 +181,12 @@ "traits": { "smithy.api#documentation": "The Amazon Web Services account ID of the cluster owner.
" } + }, + "NetworkType": { + "target": "com.amazonaws.route53recoverycontrolconfig#NetworkType", + "traits": { + "smithy.api#documentation": "The network type of the cluster. NetworkType can be one of the following: IPV4, DUALSTACK.
" + } } }, "traits": { @@ -338,6 +344,12 @@ "traits": { "smithy.api#documentation": "The tags associated with the cluster.
" } + }, + "NetworkType": { + "target": "com.amazonaws.route53recoverycontrolconfig#NetworkType", + "traits": { + "smithy.api#documentation": "The network type of the cluster. NetworkType can be one of the following: IPV4, DUALSTACK.
" + } } }, "traits": { @@ -1986,6 +1998,26 @@ } } }, + "com.amazonaws.route53recoverycontrolconfig#NetworkType": { + "type": "enum", + "members": { + "IPV4": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IPV4" + } + }, + "DUALSTACK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DUALSTACK" + } + } + }, + "traits": { + "smithy.api#documentation": "The network type of a cluster. NetworkType can be one of the following:
IPV4: Cluster endpoints support IPv4 only.
DUALSTACK: Cluster endpoints support both IPv4 and IPv6.
" + } + }, "com.amazonaws.route53recoverycontrolconfig#NewAssertionRule": { "type": "structure", "members": { @@ -2175,6 +2207,9 @@ { "target": "com.amazonaws.route53recoverycontrolconfig#UntagResource" }, + { + "target": "com.amazonaws.route53recoverycontrolconfig#UpdateCluster" + }, { "target": "com.amazonaws.route53recoverycontrolconfig#UpdateControlPanel" }, @@ -2197,17 +2232,14 @@ "name": "route53-recovery-control-config" }, "aws.protocols#restJson1": {}, + "smithy.api#auth": [ + "aws.auth#sigv4" + ], "smithy.api#documentation": "Recovery Control Configuration API Reference for Amazon Route 53 Application Recovery Controller
", "smithy.api#title": "AWS Route53 Recovery Control Config", "smithy.rules#endpointRuleSet": { "version": "1.0", "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, "UseDualStack": { "builtIn": "AWS::UseDualStack", "required": true, @@ -2227,6 +2259,12 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" } }, "rules": [ @@ -2258,316 +2296,466 @@ "type": "error" }, { - "conditions": [ + "conditions": [], + "rules": [ { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, - true - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree" } ], "type": "tree" }, { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], + "conditions": [], "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { "ref": "Region" } - ], - "assign": "PartitionResult" + ] } ], "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false ] }, { "fn": "booleanEquals", "argv": [ - true, + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://route53-recovery-control-config.us-west-2.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws" ] - } - ], - "rules": [ + }, { - "conditions": [], - "endpoint": { - "url": "https://route53-recovery-control-config-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "type": "tree" + "endpoint": { + "url": "https://arc-recovery-control-config.us-west-2.api.aws", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2" + } + ] + }, + "headers": {} + }, + "type": "endpoint" }, { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] }, - true - ] - } - ], - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] + "ref": "UseDualStack" }, true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53-recovery-control-config-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://route53-recovery-control-config-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ], "type": "tree" }, { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] }, - true - ] - } - ], - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", + "ref": "UseDualStack" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] }, - "supportsDualStack" + true ] } - ] - } - ], - "rules": [ + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53-recovery-control-config-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://route53-recovery-control-config.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ], "type": "tree" }, { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ + "conditions": [ { - "ref": "Region" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://route53-recovery-control-config.us-west-2.amazonaws.com", - "properties": { - "authSchemes": [ { - "name": "sigv4", - "signingName": "route53-recovery-control-config", - "signingRegion": "us-west-2" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53-recovery-control-config.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53-recovery-control-config.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "endpoint": { + "url": "https://route53-recovery-control-config.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" } ], "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ], "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] }, "smithy.rules#endpointTests": { "testCases": [ { - "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region not set and fips disabled", "expect": { "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "route53-recovery-control-config", - "signingRegion": "us-west-2" - } - ] - }, - "url": "https://route53-recovery-control-config.us-west-2.amazonaws.com" + "url": "https://example.com" } }, "params": { - "Region": "aws-global", + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://route53-recovery-control-config-fips.us-east-1.api.aws" } }, @@ -2581,6 +2769,14 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://route53-recovery-control-config-fips.us-east-1.amazonaws.com" } }, @@ -2594,7 +2790,15 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://route53-recovery-control-config.us-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2" + } + ] + }, + "url": "https://arc-recovery-control-config.us-west-2.api.aws" } }, "params": { @@ -2607,7 +2811,15 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://route53-recovery-control-config.us-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-west-2" + } + ] + }, + "url": "https://route53-recovery-control-config.us-west-2.amazonaws.com" } }, "params": { @@ -2617,105 +2829,169 @@ } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://route53-recovery-control-config-fips.cn-north-1.api.amazonwebservices.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://route53-recovery-control-config-fips.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://route53-recovery-control-config-fips.cn-north-1.amazonaws.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://route53-recovery-control-config-fips.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://route53-recovery-control-config.cn-north-1.api.amazonwebservices.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://route53-recovery-control-config.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://route53-recovery-control-config.cn-north-1.amazonaws.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://route53-recovery-control-config.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://route53-recovery-control-config-fips.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://route53-recovery-control-config-fips.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://route53-recovery-control-config-fips.us-gov-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://route53-recovery-control-config-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://route53-recovery-control-config.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://route53-recovery-control-config.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://route53-recovery-control-config.us-gov-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://route53-recovery-control-config.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } @@ -2735,6 +3011,14 @@ "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, "url": "https://route53-recovery-control-config-fips.us-iso-east-1.c2s.ic.gov" } }, @@ -2759,6 +3043,14 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, "url": "https://route53-recovery-control-config.us-iso-east-1.c2s.ic.gov" } }, @@ -2783,6 +3075,14 @@ "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, "url": "https://route53-recovery-control-config-fips.us-isob-east-1.sc2s.sgov.gov" } }, @@ -2807,6 +3107,14 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, "url": "https://route53-recovery-control-config.us-isob-east-1.sc2s.sgov.gov" } }, @@ -2817,54 +3125,131 @@ } }, { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://route53-recovery-control-config-fips.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-east-1", + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "eu-isoe-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://route53-recovery-control-config.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { + "Region": "eu-isoe-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://route53-recovery-control-config-fips.us-isof-south-1.csp.hci.ic.gov" + } }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isof-south-1", "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" + "UseDualStack": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://route53-recovery-control-config.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -3176,6 +3561,82 @@ "smithy.api#output": {} } }, + "com.amazonaws.route53recoverycontrolconfig#UpdateCluster": { + "type": "operation", + "input": { + "target": "com.amazonaws.route53recoverycontrolconfig#UpdateClusterRequest" + }, + "output": { + "target": "com.amazonaws.route53recoverycontrolconfig#UpdateClusterResponse" + }, + "errors": [ + { + "target": "com.amazonaws.route53recoverycontrolconfig#AccessDeniedException" + }, + { + "target": "com.amazonaws.route53recoverycontrolconfig#ConflictException" + }, + { + "target": "com.amazonaws.route53recoverycontrolconfig#InternalServerException" + }, + { + "target": "com.amazonaws.route53recoverycontrolconfig#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.route53recoverycontrolconfig#ThrottlingException" + }, + { + "target": "com.amazonaws.route53recoverycontrolconfig#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Updates an existing cluster. You can only update the network type of a cluster.
", + "smithy.api#http": { + "method": "PUT", + "uri": "/cluster", + "code": 200 + } + } + }, + "com.amazonaws.route53recoverycontrolconfig#UpdateClusterRequest": { + "type": "structure", + "members": { + "ClusterArn": { + "target": "com.amazonaws.route53recoverycontrolconfig#__stringMin1Max256PatternAZaZ09", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the cluster.
", + "smithy.api#required": {} + } + }, + "NetworkType": { + "target": "com.amazonaws.route53recoverycontrolconfig#NetworkType", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The network type of the cluster. NetworkType can be one of the following: IPV4, DUALSTACK.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The details of the cluster that you're updating.
", + "smithy.api#input": {} + } + }, + "com.amazonaws.route53recoverycontrolconfig#UpdateClusterResponse": { + "type": "structure", + "members": { + "Cluster": { + "target": "com.amazonaws.route53recoverycontrolconfig#Cluster", + "traits": { + "smithy.api#documentation": "The cluster that was updated.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.route53recoverycontrolconfig#UpdateControlPanel": { "type": "operation", "input": { diff --git a/codegen/sdk/aws-models/s3-control.json b/codegen/sdk/aws-models/s3-control.json index 7406a1d5e22..04311d31496 100644 --- a/codegen/sdk/aws-models/s3-control.json +++ b/codegen/sdk/aws-models/s3-control.json @@ -87,6 +87,9 @@ { "target": "com.amazonaws.s3control#DeleteAccessPointPolicyForObjectLambda" }, + { + "target": "com.amazonaws.s3control#DeleteAccessPointScope" + }, { "target": "com.amazonaws.s3control#DeleteBucket" }, @@ -165,6 +168,9 @@ { "target": "com.amazonaws.s3control#GetAccessPointPolicyStatusForObjectLambda" }, + { + "target": "com.amazonaws.s3control#GetAccessPointScope" + }, { "target": "com.amazonaws.s3control#GetBucket" }, @@ -225,6 +231,9 @@ { "target": "com.amazonaws.s3control#ListAccessPoints" }, + { + "target": "com.amazonaws.s3control#ListAccessPointsForDirectoryBuckets" + }, { "target": "com.amazonaws.s3control#ListAccessPointsForObjectLambda" }, @@ -261,6 +270,9 @@ { "target": "com.amazonaws.s3control#PutAccessPointPolicyForObjectLambda" }, + { + "target": "com.amazonaws.s3control#PutAccessPointScope" + }, { "target": "com.amazonaws.s3control#PutBucketLifecycleConfiguration" }, @@ -396,6 +408,11 @@ "required": false, "documentation": "When an Access Point ARN is provided and this flag is enabled, the SDK MUST use the ARN's region when constructing the endpoint instead of the client's configured region.", "type": "Boolean" + }, + "UseS3ExpressControlEndpoint": { + "required": false, + "documentation": "Internal parameter to indicate whether S3Express operation should use control plane, (ex. ListDirectoryAccessPoints)", + "type": "Boolean" } }, "rules": [ @@ -414,99 +431,41 @@ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" + "ref": "UseFIPS" }, - "snow" + true ] }, { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { - "ref": "Endpoint" + "ref": "Region" } - ] + ], + "assign": "partitionResult" }, { - "fn": "parseURL", + "fn": "stringEquals", "argv": [ { - "ref": "Endpoint" - } - ], - "assign": "url" - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", + "fn": "getAttr", "argv": [ { - "ref": "Region" - } - ], - "assign": "partitionResult" - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "S3 Snow does not support DualStack", - "type": "error" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "S3 Snow does not support FIPS", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": "{url#scheme}://{url#authority}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] + "ref": "partitionResult" }, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" + "name" + ] + }, + "aws-cn" + ] } ], - "type": "tree" + "error": "Partition does not support FIPS", + "type": "error" }, { "conditions": [ @@ -533,36 +492,6 @@ } ], "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - }, - "aws-cn" - ] - } - ], - "error": "Partition does not support FIPS", - "type": "error" - }, { "conditions": [ { @@ -855,59 +784,175 @@ ] }, { - "fn": "aws.parseArn", + "fn": "substring", "argv": [ { "ref": "AccessPointName" - } + }, + 0, + 7, + true ], - "assign": "accessPointArn" + "assign": "accessPointSuffix" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "accessPointSuffix" + }, + "--xa-s3" + ] } ], "rules": [ { "conditions": [ { - "fn": "getAttr", + "fn": "aws.partition", "argv": [ { - "ref": "accessPointArn" - }, - "resourceId[0]" + "ref": "Region" + } ], - "assign": "arnType" - }, + "assign": "partitionResult" + } + ], + "rules": [ { - "fn": "not", - "argv": [ + "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ { - "ref": "arnType" + "ref": "UseDualStack" }, - "" + true ] } - ] - } - ], - "rules": [ + ], + "error": "S3Express does not support Dual-stack.", + "type": "error" + }, { "conditions": [ + { + "fn": "substring", + "argv": [ + { + "ref": "AccessPointName" + }, + 7, + 15, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "AccessPointName" + }, + 15, + 17, + true + ], + "assign": "s3expressAvailabilityZoneDelim" + }, { "fn": "stringEquals", "argv": [ { - "fn": "getAttr", + "ref": "s3expressAvailabilityZoneDelim" + }, + "--" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "accessPointArn" + "ref": "UseFIPS" }, - "service" + true + ] + } + ], + "endpoint": { + "url": "https://s3express-control-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } ] }, - "s3-outposts" + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://s3express-control.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "substring", + "argv": [ + { + "ref": "AccessPointName" + }, + 7, + 16, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "AccessPointName" + }, + 16, + 18, + true + ], + "assign": "s3expressAvailabilityZoneDelim" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "s3expressAvailabilityZoneDelim" + }, + "--" ] } ], @@ -915,18 +960,587 @@ { "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "accessPointArn" + "ref": "UseFIPS" }, - "resourceId[1]" - ], - "assign": "outpostId" + true + ] } ], - "rules": [ - { + "endpoint": { + "url": "https://s3express-control-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://s3express-control.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "substring", + "argv": [ + { + "ref": "AccessPointName" + }, + 7, + 20, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "AccessPointName" + }, + 20, + 22, + true + ], + "assign": "s3expressAvailabilityZoneDelim" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "s3expressAvailabilityZoneDelim" + }, + "--" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3express-control-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://s3express-control.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "substring", + "argv": [ + { + "ref": "AccessPointName" + }, + 7, + 21, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "AccessPointName" + }, + 21, + 23, + true + ], + "assign": "s3expressAvailabilityZoneDelim" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "s3expressAvailabilityZoneDelim" + }, + "--" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3express-control-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://s3express-control.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "substring", + "argv": [ + { + "ref": "AccessPointName" + }, + 7, + 27, + true + ], + "assign": "s3expressAvailabilityZoneId" + }, + { + "fn": "substring", + "argv": [ + { + "ref": "AccessPointName" + }, + 27, + 29, + true + ], + "assign": "s3expressAvailabilityZoneDelim" + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "s3expressAvailabilityZoneDelim" + }, + "--" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3express-control-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://s3express-control.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Unrecognized S3Express Access Point name format.", + "type": "error" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "UseS3ExpressControlEndpoint" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseS3ExpressControlEndpoint" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "partitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3express-control-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://s3express-control.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "snow" + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "partitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "S3 Snow does not support DualStack", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "S3 Snow does not support FIPS", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": "{url#scheme}://{url#authority}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccessPointName" + } + ] + }, + { + "fn": "aws.parseArn", + "argv": [ + { + "ref": "AccessPointName" + } + ], + "assign": "accessPointArn" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "resourceId[0]" + ], + "assign": "arnType" + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "arnType" + }, + "" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "service" + ] + }, + "s3-outposts" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "resourceId[1]" + ], + "assign": "outpostId" + } + ], + "rules": [ + { "conditions": [ { "fn": "isValidHostLabel", @@ -2120,36 +2734,6 @@ } ], "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - }, - "aws-cn" - ] - } - ], - "error": "Partition does not support FIPS", - "type": "error" - }, { "conditions": [ { @@ -2723,19 +3307,131 @@ "type": "tree" } ], - "type": "tree" + "type": "tree" + }, + { + "conditions": [], + "error": "Region must be set", + "type": "error" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "Vanilla outposts without ARN region + access point ARN@us-west-2", + "expect": { + "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-outposts.us-west-2.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2" + }, + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + }, + { + "builtInParams": { + "AWS::Region": "us-west-2" + }, + "operationName": "DeleteAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + } + ], + "params": { + "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "Vanilla outposts with ARN region + access point ARN@us-west-2", + "expect": { + "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-east-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-outposts.us-east-1.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2" + }, + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + }, + { + "builtInParams": { + "AWS::Region": "us-west-2" + }, + "operationName": "DeleteAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + } + ], + "params": { + "AccessPointName": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } }, { - "conditions": [], - "error": "Region must be set", - "type": "error" - } - ] - }, - "smithy.rules#endpointTests": { - "testCases": [ - { - "documentation": "Vanilla outposts without ARN region + access point ARN@us-west-2", + "documentation": "accept an access point ARN@us-west-2", "expect": { "endpoint": { "headers": { @@ -2791,7 +3487,7 @@ } }, { - "documentation": "Vanilla outposts with ARN region + access point ARN@us-west-2", + "documentation": "vanilla outposts china@cn-north-1", "expect": { "endpoint": { "headers": { @@ -2807,47 +3503,47 @@ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-east-1", + "signingRegion": "cn-north-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.us-east-1.amazonaws.com" + "url": "https://s3-outposts.cn-north-1.amazonaws.com.cn" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "cn-north-1" }, "operationName": "GetAccessPoint", "operationParams": { - "Name": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Name": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012" } }, { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "cn-north-1" }, "operationName": "DeleteAccessPoint", "operationParams": { - "Name": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Name": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012" } } ], "params": { - "AccessPointName": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccessPointName": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012", - "Region": "us-west-2", + "Region": "cn-north-1", "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "accept an access point ARN@us-west-2", + "documentation": "gov region@us-west-2", "expect": { "endpoint": { "headers": { @@ -2903,7 +3599,123 @@ } }, { - "documentation": "vanilla outposts china@cn-north-1", + "documentation": "gov cloud with fips@us-west-2", + "expect": { + "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-outposts-fips.us-west-2.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::UseFIPS": true + }, + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + }, + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::UseFIPS": true + }, + "operationName": "DeleteAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + } + ], + "params": { + "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "govcloud with fips + arn region@us-gov-west-1", + "expect": { + "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-gov-east-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-outposts-fips.us-gov-east-1.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-gov-west-1", + "AWS::UseFIPS": true + }, + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + }, + { + "builtInParams": { + "AWS::Region": "us-gov-west-1", + "AWS::UseFIPS": true + }, + "operationName": "DeleteAccessPoint", + "operationParams": { + "Name": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + } + ], + "params": { + "AccessPointName": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012", + "Region": "us-gov-west-1", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "gov region@cn-north-1", "expect": { "endpoint": { "headers": { @@ -2950,16 +3762,112 @@ } ], "params": { - "AccessPointName": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccessPointName": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012", + "Region": "cn-north-1", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "gov cloud with fips@cn-north-1", + "expect": { + "error": "Partition does not support FIPS" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true + }, + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true + }, + "operationName": "DeleteAccessPoint", + "operationParams": { + "Name": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + } + ], + "params": { + "AccessPointName": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012", + "Region": "cn-north-1", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "govcloud with fips + arn region@us-gov-west-1", + "expect": { + "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-gov-east-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-outposts-fips.us-gov-east-1.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-gov-west-1", + "AWS::UseFIPS": true + }, + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + }, + { + "builtInParams": { + "AWS::Region": "us-gov-west-1", + "AWS::UseFIPS": true + }, + "operationName": "DeleteAccessPoint", + "operationParams": { + "Name": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + } + ], + "params": { + "AccessPointName": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012", - "Region": "cn-north-1", + "Region": "us-gov-west-1", "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false + "UseFIPS": true } }, { - "documentation": "gov region@us-west-2", + "documentation": "gov region@af-south-1", "expect": { "endpoint": { "headers": { @@ -2975,47 +3883,47 @@ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-west-2", + "signingRegion": "af-south-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.us-west-2.amazonaws.com" + "url": "https://s3-outposts.af-south-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "af-south-1" }, "operationName": "GetAccessPoint", "operationParams": { - "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Name": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012" } }, { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "af-south-1" }, "operationName": "DeleteAccessPoint", "operationParams": { - "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Name": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012" } } ], "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccessPointName": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012", - "Region": "us-west-2", + "Region": "af-south-1", "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "gov cloud with fips@us-west-2", + "documentation": "gov cloud with fips@af-south-1", "expect": { "endpoint": { "headers": { @@ -3031,42 +3939,42 @@ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-west-2", + "signingRegion": "af-south-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts-fips.us-west-2.amazonaws.com" + "url": "https://s3-outposts-fips.af-south-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", + "AWS::Region": "af-south-1", "AWS::UseFIPS": true }, "operationName": "GetAccessPoint", "operationParams": { - "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Name": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012" } }, { "builtInParams": { - "AWS::Region": "us-west-2", + "AWS::Region": "af-south-1", "AWS::UseFIPS": true }, "operationName": "DeleteAccessPoint", "operationParams": { - "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Name": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012" } } ], "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccessPointName": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012", - "Region": "us-west-2", + "Region": "af-south-1", "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": true @@ -3131,121 +4039,233 @@ } }, { - "documentation": "gov region@cn-north-1", + "documentation": "CreateBucket + OutpostId = outposts endpoint@us-east-2", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-east-2", + "disableDoubleEncoding": true + } ] }, + "url": "https://s3-outposts.us-east-2.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-2" + }, + "operationName": "CreateBucket", + "operationParams": { + "Bucket": "blah", + "OutpostId": "123" + } + } + ], + "params": { + "Bucket": "blah", + "OutpostId": "123", + "Region": "us-east-2", + "RequiresAccountId": false, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "CreateBucket + OutpostId with fips = outposts endpoint@us-east-2", + "expect": { + "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "cn-north-1", + "signingRegion": "us-east-2", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.cn-north-1.amazonaws.com.cn" + "url": "https://s3-outposts-fips.us-east-2.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1" + "AWS::Region": "us-east-2", + "AWS::UseFIPS": true }, - "operationName": "GetAccessPoint", + "operationName": "CreateBucket", "operationParams": { - "Name": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" + "Bucket": "blah", + "OutpostId": "123" } - }, + } + ], + "params": { + "Bucket": "blah", + "OutpostId": "123", + "Region": "us-east-2", + "RequiresAccountId": false, + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "CreateBucket without OutpostId = regular endpoint@us-east-2", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-east-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-control.us-east-2.amazonaws.com" + } + }, + "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1" + "AWS::Region": "us-east-2" }, - "operationName": "DeleteAccessPoint", + "operationName": "CreateBucket", "operationParams": { - "Name": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" + "Bucket": "blah" } } ], "params": { - "AccessPointName": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012", - "Region": "cn-north-1", - "RequiresAccountId": true, + "Bucket": "blah", + "Region": "us-east-2", + "RequiresAccountId": false, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "gov cloud with fips@cn-north-1", + "documentation": "ListRegionalBuckets + OutpostId = outposts endpoint@us-east-2", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-east-2", + "disableDoubleEncoding": true + } ] }, + "url": "https://s3-outposts.us-east-2.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-2" + }, + "operationName": "ListRegionalBuckets", + "operationParams": { + "AccountId": "123456789012", + "OutpostId": "op-123" + } + } + ], + "params": { + "AccountId": "123456789012", + "OutpostId": "op-123", + "Region": "us-east-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "ListRegionalBuckets without OutpostId = regular endpoint@us-east-2", + "expect": { + "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "cn-north-1", + "signingName": "s3", + "signingRegion": "us-east-2", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts-fips.cn-north-1.amazonaws.com.cn" + "url": "https://123456789012.s3-control.us-east-2.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1", - "AWS::UseFIPS": true + "AWS::Region": "us-east-2" }, - "operationName": "GetAccessPoint", + "operationName": "ListRegionalBuckets", "operationParams": { - "Name": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012" } - }, + } + ], + "params": { + "AccountId": "123456789012", + "Region": "us-east-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "ListRegionalBucket + OutpostId with fips = outposts endpoint@us-east-2", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-east-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-outposts-fips.us-east-2.amazonaws.com" + } + }, + "operationInputs": [ { "builtInParams": { - "AWS::Region": "cn-north-1", + "AWS::Region": "us-east-2", "AWS::UseFIPS": true }, - "operationName": "DeleteAccessPoint", + "operationName": "ListRegionalBuckets", "operationParams": { - "Name": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" + "AccountId": "123456789012", + "OutpostId": "op-123" } } ], "params": { - "AccessPointName": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012", - "Region": "cn-north-1", + "OutpostId": "op-123", + "Region": "us-east-2", "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": true } }, { - "documentation": "govcloud with fips + arn region@us-gov-west-1", + "documentation": "outpost access points support dualstack@us-west-2", "expect": { "endpoint": { "headers": { @@ -3261,49 +4281,49 @@ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-gov-east-1", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts-fips.us-gov-east-1.amazonaws.com" + "url": "https://s3-outposts.us-west-2.api.aws" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-gov-west-1", - "AWS::UseFIPS": true + "AWS::Region": "us-west-2", + "AWS::UseDualStack": true }, "operationName": "GetAccessPoint", "operationParams": { - "Name": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012" } }, { "builtInParams": { - "AWS::Region": "us-gov-west-1", - "AWS::UseFIPS": true + "AWS::Region": "us-west-2", + "AWS::UseDualStack": true }, "operationName": "DeleteAccessPoint", "operationParams": { - "Name": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012" } } ], "params": { - "AccessPointName": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012", - "Region": "us-gov-west-1", + "Region": "us-west-2", "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": true + "UseDualStack": true, + "UseFIPS": false } }, { - "documentation": "gov region@af-south-1", + "documentation": "outpost access points support dualstack@af-south-1", "expect": { "endpoint": { "headers": { @@ -3324,13 +4344,14 @@ } ] }, - "url": "https://s3-outposts.af-south-1.amazonaws.com" + "url": "https://s3-outposts.af-south-1.api.aws" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1" + "AWS::Region": "af-south-1", + "AWS::UseDualStack": true }, "operationName": "GetAccessPoint", "operationParams": { @@ -3340,7 +4361,8 @@ }, { "builtInParams": { - "AWS::Region": "af-south-1" + "AWS::Region": "af-south-1", + "AWS::UseDualStack": true }, "operationName": "DeleteAccessPoint", "operationParams": { @@ -3354,12 +4376,12 @@ "AccountId": "123456789012", "Region": "af-south-1", "RequiresAccountId": true, - "UseDualStack": false, + "UseDualStack": true, "UseFIPS": false } }, { - "documentation": "gov cloud with fips@af-south-1", + "documentation": "outpost access points support fips + dualstack@af-south-1", "expect": { "endpoint": { "headers": { @@ -3380,14 +4402,15 @@ } ] }, - "url": "https://s3-outposts-fips.af-south-1.amazonaws.com" + "url": "https://s3-outposts-fips.af-south-1.api.aws" } }, "operationInputs": [ { "builtInParams": { "AWS::Region": "af-south-1", - "AWS::UseFIPS": true + "AWS::UseFIPS": true, + "AWS::UseDualStack": true }, "operationName": "GetAccessPoint", "operationParams": { @@ -3398,7 +4421,8 @@ { "builtInParams": { "AWS::Region": "af-south-1", - "AWS::UseFIPS": true + "AWS::UseFIPS": true, + "AWS::UseDualStack": true }, "operationName": "DeleteAccessPoint", "operationParams": { @@ -3412,12 +4436,78 @@ "AccountId": "123456789012", "Region": "af-south-1", "RequiresAccountId": true, - "UseDualStack": false, + "UseDualStack": true, "UseFIPS": true } }, { - "documentation": "govcloud with fips + arn region@us-gov-west-1", + "documentation": "invalid ARN: must be include outpost ID@us-west-2", + "expect": { + "error": "Invalid ARN: The Outpost Id was not set" + }, + "params": { + "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost", + "AccountId": "123456789012", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "invalid ARN: must specify access point@us-west-2", + "expect": { + "error": "Invalid ARN: Expected a 4-component resource" + }, + "params": { + "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "invalid ARN@us-west-2", + "expect": { + "error": "Invalid ARN: Expected a 4-component resource" + }, + "params": { + "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:myaccesspoint", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "when set, AccountId drives AP construction@us-west-2", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://myid-1234.s3-control.us-west-2.amazonaws.com" + } + }, + "params": { + "AccessPointName": "myaccesspoint", + "AccountId": "myid-1234", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "Account ID set inline and in ARN but they both match@us-west-2", "expect": { "endpoint": { "headers": { @@ -3433,162 +4523,257 @@ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-gov-east-1", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts-fips.us-gov-east-1.amazonaws.com" + "url": "https://s3-outposts.us-west-2.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-gov-west-1", - "AWS::UseFIPS": true + "AWS::Region": "us-west-2", + "AWS::S3Control::UseArnRegion": false }, "operationName": "GetAccessPoint", "operationParams": { - "Name": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" + "AccountId": "123456789012", + "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint" } - }, + } + ], + "params": { + "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseArnRegion": false, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "Account ID set inline and in ARN and they do not match@us-west-2", + "expect": { + "error": "Invalid ARN: the accountId specified in the ARN (`123456789012`) does not match the parameter (`999999999999`)" + }, + "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-gov-west-1", - "AWS::UseFIPS": true + "AWS::Region": "us-west-2", + "AWS::S3Control::UseArnRegion": false }, - "operationName": "DeleteAccessPoint", + "operationName": "GetAccessPoint", "operationParams": { - "Name": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" + "AccountId": "999999999999", + "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint" } } ], "params": { - "AccessPointName": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012", - "Region": "us-gov-west-1", + "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "999999999999", + "Region": "us-west-2", "RequiresAccountId": true, + "UseArnRegion": false, "UseDualStack": false, - "UseFIPS": true + "UseFIPS": false } }, { - "documentation": "CreateBucket + OutpostId = outposts endpoint@us-east-2", + "documentation": "get access point prefixed with account id using endpoint url@us-west-2", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-east-2", + "signingName": "s3", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.us-east-2.amazonaws.com" + "url": "https://123456789012.control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-2" + "AWS::Region": "us-west-2", + "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" }, - "operationName": "CreateBucket", + "operationName": "GetAccessPoint", "operationParams": { - "Bucket": "blah", - "OutpostId": "123" + "AccountId": "123456789012", + "Name": "apname" } } ], "params": { - "Bucket": "blah", - "OutpostId": "123", - "Region": "us-east-2", - "RequiresAccountId": false, + "AccessPointName": "apname", + "AccountId": "123456789012", + "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", + "Region": "us-west-2", + "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "CreateBucket + OutpostId with fips = outposts endpoint@us-east-2", + "documentation": "endpoint url with s3-outposts@us-west-2", "expect": { "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-east-2", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts-fips.us-east-2.amazonaws.com" + "url": "https://beta.example.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-2", - "AWS::UseFIPS": true + "AWS::Region": "us-west-2", + "SDK::Endpoint": "https://beta.example.com" }, - "operationName": "CreateBucket", + "operationName": "GetAccessPoint", "operationParams": { - "Bucket": "blah", - "OutpostId": "123" + "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" } } ], "params": { - "Bucket": "blah", - "OutpostId": "123", - "Region": "us-east-2", + "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012", + "Endpoint": "https://beta.example.com", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "access point name with a bucket arn@us-west-2", + "expect": { + "error": "Expected an outpost type `accesspoint`, found `bucket`" + }, + "params": { + "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Endpoint": "beta.example.com", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "bucket arn with access point name@us-west-2", + "expect": { + "error": "Invalid ARN: Expected an outpost type `bucket`, found `accesspoint`" + }, + "params": { + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Endpoint": "beta.example.com", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "create bucket with outposts@us-west-2", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://beta.example.com" + } + }, + "params": { + "Bucket": "bucketname", + "Endpoint": "https://beta.example.com", + "OutpostId": "op-123", + "Region": "us-west-2", "RequiresAccountId": false, "UseDualStack": false, - "UseFIPS": true + "UseFIPS": false } }, { - "documentation": "CreateBucket without OutpostId = regular endpoint@us-east-2", + "documentation": "get bucket with endpoint_url@us-west-2", "expect": { "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-east-2", + "signingName": "s3-outposts", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://s3-control.us-east-2.amazonaws.com" + "url": "https://beta.example.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-2" + "AWS::Region": "us-west-2", + "SDK::Endpoint": "https://beta.example.com" }, - "operationName": "CreateBucket", + "operationName": "GetBucket", "operationParams": { - "Bucket": "blah" + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { - "Bucket": "blah", - "Region": "us-east-2", - "RequiresAccountId": false, + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Endpoint": "https://beta.example.com", + "Region": "us-west-2", + "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "ListRegionalBuckets + OutpostId = outposts endpoint@us-east-2", + "documentation": "ListRegionalBucket + OutpostId endpoint url@us-east-2", "expect": { "endpoint": { "properties": { @@ -3601,13 +4786,14 @@ } ] }, - "url": "https://s3-outposts.us-east-2.amazonaws.com" + "url": "https://beta.example.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-2" + "AWS::Region": "us-east-2", + "SDK::Endpoint": "https://beta.example.com" }, "operationName": "ListRegionalBuckets", "operationParams": { @@ -3618,6 +4804,7 @@ ], "params": { "AccountId": "123456789012", + "Endpoint": "https://beta.example.com", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -3626,43 +4813,48 @@ } }, { - "documentation": "ListRegionalBuckets without OutpostId = regular endpoint@us-east-2", + "documentation": "ListRegionalBucket + OutpostId + fips + endpoint url@us-east-2", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", + "signingName": "s3-outposts", "signingRegion": "us-east-2", "disableDoubleEncoding": true } ] }, - "url": "https://123456789012.s3-control.us-east-2.amazonaws.com" + "url": "https://beta.example.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-2" + "AWS::Region": "us-east-2", + "AWS::UseFIPS": true, + "SDK::Endpoint": "https://beta.example.com" }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123456789012" + "AccountId": "123456789012", + "OutpostId": "op-123" } } ], "params": { "AccountId": "123456789012", + "Endpoint": "https://beta.example.com", + "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false + "UseFIPS": true } }, { - "documentation": "ListRegionalBucket + OutpostId with fips = outposts endpoint@us-east-2", + "documentation": "ListRegionalBucket + OutpostId + fips + dualstack@us-east-2", "expect": { "endpoint": { "properties": { @@ -3675,14 +4867,15 @@ } ] }, - "url": "https://s3-outposts-fips.us-east-2.amazonaws.com" + "url": "https://s3-outposts-fips.us-east-2.api.aws" } }, "operationInputs": [ { "builtInParams": { "AWS::Region": "us-east-2", - "AWS::UseFIPS": true + "AWS::UseFIPS": true, + "AWS::UseDualStack": true }, "operationName": "ListRegionalBuckets", "operationParams": { @@ -3696,62 +4889,59 @@ "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, - "UseDualStack": false, + "UseDualStack": true, "UseFIPS": true } }, { - "documentation": "outpost access points support dualstack@us-west-2", + "documentation": "CreateBucket + OutpostId endpoint url@us-east-2", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-west-2", + "signingRegion": "us-east-2", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.us-west-2.api.aws" + "url": "https://beta.example.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::UseDualStack": true - }, - "operationName": "GetAccessPoint", - "operationParams": { - "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" - } - }, - { - "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::UseDualStack": true + "AWS::Region": "us-east-2", + "AWS::UseFIPS": true, + "SDK::Endpoint": "https://beta.example.com" }, - "operationName": "DeleteAccessPoint", + "operationName": "CreateBucket", "operationParams": { - "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" + "Bucket": "blah", + "OutpostId": "123" } } ], + "params": { + "Bucket": "blah", + "Endpoint": "https://beta.example.com", + "OutpostId": "123", + "Region": "us-east-2", + "RequiresAccountId": false, + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "dualstack cannot be used with outposts when an endpoint URL is set@us-west-2.", + "expect": { + "error": "Invalid Configuration: DualStack and custom endpoint are not supported" + }, "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012", + "Endpoint": "https://s3-outposts.us-west-2.api.aws", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": true, @@ -3759,7 +4949,7 @@ } }, { - "documentation": "outpost access points support dualstack@af-south-1", + "documentation": "vanilla bucket arn requires account id@us-west-2", "expect": { "endpoint": { "headers": { @@ -3775,49 +4965,37 @@ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "af-south-1", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.af-south-1.api.aws" + "url": "https://s3-outposts.us-west-2.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::UseDualStack": true - }, - "operationName": "GetAccessPoint", - "operationParams": { - "Name": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" - } - }, - { - "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::UseDualStack": true + "AWS::Region": "us-west-2" }, - "operationName": "DeleteAccessPoint", + "operationName": "CreateAccessPoint", "operationParams": { - "Name": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Name": "apname", "AccountId": "123456789012" } } ], "params": { - "AccessPointName": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012", - "Region": "af-south-1", + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Region": "us-west-2", "RequiresAccountId": true, - "UseDualStack": true, + "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "outpost access points support fips + dualstack@af-south-1", + "documentation": "bucket arn with UseArnRegion = true (arn region supercedes client configured region)@us-west-2", "expect": { "endpoint": { "headers": { @@ -3833,83 +5011,28 @@ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "af-south-1", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts-fips.af-south-1.api.aws" + "url": "https://s3-outposts.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::UseFIPS": true, - "AWS::UseDualStack": true - }, - "operationName": "GetAccessPoint", - "operationParams": { - "Name": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" - } - }, - { - "builtInParams": { - "AWS::Region": "af-south-1", - "AWS::UseFIPS": true, - "AWS::UseDualStack": true + "AWS::Region": "us-west-2" }, - "operationName": "DeleteAccessPoint", + "operationName": "GetBucket", "operationParams": { - "Name": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", "AccountId": "123456789012" } } ], "params": { - "AccessPointName": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012", - "Region": "af-south-1", - "RequiresAccountId": true, - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "invalid ARN: must be include outpost ID@us-west-2", - "expect": { - "error": "Invalid ARN: The Outpost Id was not set" - }, - "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost", - "AccountId": "123456789012", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "invalid ARN: must specify access point@us-west-2", - "expect": { - "error": "Invalid ARN: Expected a 4-component resource" - }, - "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "invalid ARN@us-west-2", - "expect": { - "error": "Invalid ARN: Expected a 4-component resource" - }, - "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:myaccesspoint", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -3917,33 +5040,52 @@ } }, { - "documentation": "when set, AccountId drives AP construction@us-west-2", + "documentation": "bucket ARN in gov partition (non-fips)@us-gov-east-1", "expect": { "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-west-2", + "signingName": "s3-outposts", + "signingRegion": "us-gov-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://myid-1234.s3-control.us-west-2.amazonaws.com" + "url": "https://s3-outposts.us-gov-east-1.amazonaws.com" } }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-gov-east-1" + }, + "operationName": "GetBucket", + "operationParams": { + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" + } + } + ], "params": { - "AccessPointName": "myaccesspoint", - "AccountId": "myid-1234", - "Region": "us-west-2", + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Region": "us-gov-east-1", "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "Account ID set inline and in ARN but they both match@us-west-2", + "documentation": "bucket ARN in gov partition with FIPS@us-gov-west-1", "expect": { "endpoint": { "headers": { @@ -3959,107 +5101,83 @@ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-west-2", + "signingRegion": "us-gov-west-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.us-west-2.amazonaws.com" - } - }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::S3Control::UseArnRegion": false - }, - "operationName": "GetAccessPoint", - "operationParams": { - "AccountId": "123456789012", - "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint" - } + "url": "https://s3-outposts-fips.us-gov-west-1.amazonaws.com" } - ], - "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseArnRegion": false, - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "Account ID set inline and in ARN and they do not match@us-west-2", - "expect": { - "error": "Invalid ARN: the accountId specified in the ARN (`123456789012`) does not match the parameter (`999999999999`)" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::S3Control::UseArnRegion": false + "AWS::Region": "us-gov-west-1", + "AWS::UseFIPS": true }, - "operationName": "GetAccessPoint", + "operationName": "GetBucket", "operationParams": { - "AccountId": "999999999999", - "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "999999999999", - "Region": "us-west-2", + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Region": "us-gov-west-1", "RequiresAccountId": true, - "UseArnRegion": false, "UseDualStack": false, - "UseFIPS": false + "UseFIPS": true } }, { - "documentation": "get access point prefixed with account id using endpoint url@us-west-2", + "documentation": "bucket ARN in aws partition with FIPS@us-east-2", "expect": { "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-west-2", + "signingName": "s3-outposts", + "signingRegion": "us-east-2", "disableDoubleEncoding": true } ] }, - "url": "https://123456789012.control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + "url": "https://s3-outposts-fips.us-east-2.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "SDK::Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com" + "AWS::Region": "us-east-2", + "AWS::UseFIPS": true }, - "operationName": "GetAccessPoint", + "operationName": "GetBucket", "operationParams": { - "AccountId": "123456789012", - "Name": "apname" + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { - "AccessPointName": "apname", - "AccountId": "123456789012", - "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "Region": "us-west-2", + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Region": "us-east-2", "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false + "UseFIPS": true } }, { - "documentation": "endpoint url with s3-outposts@us-west-2", + "documentation": "bucket ARN in aws partition with fips + dualstack@us-east-2", "expect": { "endpoint": { "headers": { @@ -4075,94 +5193,84 @@ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-west-2", + "signingRegion": "us-east-2", "disableDoubleEncoding": true } ] }, - "url": "https://beta.example.com" + "url": "https://s3-outposts-fips.us-east-2.api.aws" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "SDK::Endpoint": "https://beta.example.com" + "AWS::Region": "us-east-2", + "AWS::UseFIPS": true, + "AWS::UseDualStack": true }, - "operationName": "GetAccessPoint", + "operationName": "GetBucket", "operationParams": { - "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "AccountId": "123456789012" } } ], "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012", - "Endpoint": "https://beta.example.com", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "access point name with a bucket arn@us-west-2", - "expect": { - "error": "Expected an outpost type `accesspoint`, found `bucket`" - }, - "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Endpoint": "beta.example.com", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "bucket arn with access point name@us-west-2", - "expect": { - "error": "Invalid ARN: Expected an outpost type `bucket`, found `accesspoint`" - }, - "params": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "Endpoint": "beta.example.com", - "Region": "us-west-2", + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Region": "us-east-2", "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false + "UseDualStack": true, + "UseFIPS": true } }, { - "documentation": "create bucket with outposts@us-west-2", + "documentation": "vanilla bucket arn requires account id@cn-north-1", "expect": { "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-west-2", + "signingRegion": "cn-north-1", "disableDoubleEncoding": true } ] }, - "url": "https://beta.example.com" + "url": "https://s3-outposts.cn-north-1.amazonaws.com.cn" } }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1" + }, + "operationName": "CreateAccessPoint", + "operationParams": { + "Bucket": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Name": "apname", + "AccountId": "123456789012" + } + } + ], "params": { - "Bucket": "bucketname", - "Endpoint": "https://beta.example.com", - "OutpostId": "op-123", - "Region": "us-west-2", - "RequiresAccountId": false, + "Bucket": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Region": "cn-north-1", + "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "get bucket with endpoint_url@us-west-2", + "documentation": "bucket arn with UseArnRegion = true (arn region supercedes client configured region)@us-west-2", "expect": { "endpoint": { "headers": { @@ -4178,30 +5286,28 @@ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-west-2", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://beta.example.com" + "url": "https://s3-outposts.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "SDK::Endpoint": "https://beta.example.com" + "AWS::Region": "us-west-2" }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", "AccountId": "123456789012" } } ], "params": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Endpoint": "https://beta.example.com", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -4209,90 +5315,108 @@ } }, { - "documentation": "ListRegionalBucket + OutpostId endpoint url@us-east-2", + "documentation": "bucket ARN in gov partition (non-fips)@us-gov-east-1", "expect": { "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-east-2", + "signingRegion": "us-gov-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://beta.example.com" + "url": "https://s3-outposts.us-gov-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-2", - "SDK::Endpoint": "https://beta.example.com" + "AWS::Region": "us-gov-east-1" }, - "operationName": "ListRegionalBuckets", + "operationName": "GetBucket", "operationParams": { - "AccountId": "123456789012", - "OutpostId": "op-123" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { - "AccountId": "123456789012", - "Endpoint": "https://beta.example.com", - "OutpostId": "op-123", - "Region": "us-east-2", + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Region": "us-gov-east-1", "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "ListRegionalBucket + OutpostId + fips + endpoint url@us-east-2", + "documentation": "bucket ARN in gov partition with FIPS@us-gov-west-1", "expect": { "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-east-2", + "signingRegion": "us-gov-west-1", "disableDoubleEncoding": true } ] }, - "url": "https://beta.example.com" + "url": "https://s3-outposts-fips.us-gov-west-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-2", - "AWS::UseFIPS": true, - "SDK::Endpoint": "https://beta.example.com" + "AWS::Region": "us-gov-west-1", + "AWS::UseFIPS": true }, - "operationName": "ListRegionalBuckets", + "operationName": "GetBucket", "operationParams": { - "AccountId": "123456789012", - "OutpostId": "op-123" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { - "AccountId": "123456789012", - "Endpoint": "https://beta.example.com", - "OutpostId": "op-123", - "Region": "us-east-2", + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Region": "us-gov-west-1", "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": true } }, { - "documentation": "ListRegionalBucket + OutpostId + fips + dualstack@us-east-2", + "documentation": "bucket ARN in aws partition with FIPS@us-east-2", "expect": { "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, "properties": { "authSchemes": [ { @@ -4303,81 +5427,70 @@ } ] }, - "url": "https://s3-outposts-fips.us-east-2.api.aws" + "url": "https://s3-outposts-fips.us-east-2.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { "AWS::Region": "us-east-2", - "AWS::UseFIPS": true, - "AWS::UseDualStack": true + "AWS::UseFIPS": true }, - "operationName": "ListRegionalBuckets", + "operationName": "GetBucket", "operationParams": { - "AccountId": "123456789012", - "OutpostId": "op-123" + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { - "AccountId": "123456789012", - "OutpostId": "op-123", + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Region": "us-east-2", "RequiresAccountId": true, - "UseDualStack": true, + "UseDualStack": false, "UseFIPS": true } }, { - "documentation": "CreateBucket + OutpostId endpoint url@us-east-2", + "documentation": "Outposts support dualstack @us-west-2", "expect": { "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-east-2", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://beta.example.com" + "url": "https://s3-outposts.us-west-2.api.aws" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-2", - "AWS::UseFIPS": true, - "SDK::Endpoint": "https://beta.example.com" + "AWS::Region": "us-west-2", + "AWS::UseDualStack": true }, - "operationName": "CreateBucket", + "operationName": "GetBucket", "operationParams": { - "Bucket": "blah", - "OutpostId": "123" + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { - "Bucket": "blah", - "Endpoint": "https://beta.example.com", - "OutpostId": "123", - "Region": "us-east-2", - "RequiresAccountId": false, - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "dualstack cannot be used with outposts when an endpoint URL is set@us-west-2.", - "expect": { - "error": "Invalid Configuration: DualStack and custom endpoint are not supported" - }, - "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "Endpoint": "https://s3-outposts.us-west-2.api.aws", + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": true, @@ -4385,7 +5498,7 @@ } }, { - "documentation": "vanilla bucket arn requires account id@us-west-2", + "documentation": "vanilla bucket arn requires account id@af-south-1", "expect": { "endpoint": { "headers": { @@ -4401,30 +5514,30 @@ { "name": "sigv4", "signingName": "s3-outposts", - "signingRegion": "us-west-2", + "signingRegion": "af-south-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.us-west-2.amazonaws.com" + "url": "https://s3-outposts.af-south-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "af-south-1" }, "operationName": "CreateAccessPoint", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Bucket": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Name": "apname", "AccountId": "123456789012" } } ], "params": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-west-2", + "Bucket": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Region": "af-south-1", "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": false @@ -4613,608 +5726,495 @@ } }, { - "documentation": "bucket ARN in aws partition with fips + dualstack@us-east-2", + "documentation": "Invalid ARN: missing outpost id and bucket@us-west-2", + "expect": { + "error": "Invalid ARN: The Outpost Id was not set" + }, + "params": { + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "Invalid ARN: missing bucket@us-west-2", + "expect": { + "error": "Invalid ARN: Expected a 4-component resource" + }, + "params": { + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "Invalid ARN: missing outpost and bucket ids@us-west-2", + "expect": { + "error": "Invalid ARN: Expected a 4-component resource" + }, + "params": { + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:bucket", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "Invalid ARN: missing bucket id@us-west-2", + "expect": { + "error": "Invalid ARN: expected a bucket name" + }, + "params": { + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "account id inserted into hostname@us-west-2", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-east-2", + "signingName": "s3", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts-fips.us-east-2.api.aws" + "url": "https://1234567890.s3-control.us-west-2.amazonaws.com" } }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "us-east-2", - "AWS::UseFIPS": true, - "AWS::UseDualStack": true + "params": { + "AccountId": "1234567890", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "account id prefix with dualstack@us-east-1", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-east-1", + "disableDoubleEncoding": true + } + ] }, - "operationName": "GetBucket", - "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "AccountId": "123456789012" - } + "url": "https://1234567890.s3-control.dualstack.us-east-1.amazonaws.com" } - ], + }, "params": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-east-2", + "AccountId": "1234567890", + "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": true, - "UseFIPS": true + "UseFIPS": false } }, { - "documentation": "vanilla bucket arn requires account id@cn-north-1", + "documentation": "account id prefix with fips@us-east-1", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "cn-north-1", + "signingName": "s3", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.cn-north-1.amazonaws.com.cn" + "url": "https://1234567890.s3-control-fips.us-east-1.amazonaws.com" } }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "cn-north-1" - }, - "operationName": "CreateAccessPoint", - "operationParams": { - "Bucket": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Name": "apname", - "AccountId": "123456789012" - } - } - ], "params": { - "Bucket": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "cn-north-1", + "AccountId": "1234567890", + "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false + "UseFIPS": true } }, { - "documentation": "bucket arn with UseArnRegion = true (arn region supercedes client configured region)@us-west-2", + "documentation": "custom account id prefix with fips@us-east-1", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", + "signingName": "s3", "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.us-east-1.amazonaws.com" + "url": "https://123456789012.s3-control-fips.us-east-1.amazonaws.com" } }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "us-west-2" - }, - "operationName": "GetBucket", - "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "AccountId": "123456789012" - } - } - ], "params": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-west-2", + "AccountId": "123456789012", + "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false + "UseFIPS": true } }, { - "documentation": "bucket ARN in gov partition (non-fips)@us-gov-east-1", + "documentation": "standard url @ us-east-1", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-gov-east-1", + "signingName": "s3", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.us-gov-east-1.amazonaws.com" + "url": "https://s3-control.us-east-1.amazonaws.com" } }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "us-gov-east-1" - }, - "operationName": "GetBucket", - "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "AccountId": "123456789012" - } - } - ], "params": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-gov-east-1", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false + "Region": "us-east-1" } }, { - "documentation": "bucket ARN in gov partition with FIPS@us-gov-west-1", + "documentation": "fips url @ us-east-1", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-gov-west-1", + "signingName": "s3", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts-fips.us-gov-west-1.amazonaws.com" + "url": "https://s3-control-fips.us-east-1.amazonaws.com" } }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "us-gov-west-1", - "AWS::UseFIPS": true - }, - "operationName": "GetBucket", - "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "AccountId": "123456789012" - } - } - ], "params": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-gov-west-1", - "RequiresAccountId": true, - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true } }, { - "documentation": "bucket ARN in aws partition with FIPS@us-east-2", + "documentation": "dualstack url @ us-east-1", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-east-2", + "signingName": "s3", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts-fips.us-east-2.amazonaws.com" + "url": "https://s3-control.dualstack.us-east-1.amazonaws.com" } }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "us-east-2", - "AWS::UseFIPS": true + "params": { + "Region": "us-east-1", + "UseDualStack": true + } + }, + { + "documentation": "fips,dualstack url @ us-east-1", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-east-1", + "disableDoubleEncoding": true + } + ] }, - "operationName": "GetBucket", - "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "AccountId": "123456789012" - } + "url": "https://s3-control-fips.dualstack.us-east-1.amazonaws.com" } - ], + }, "params": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-east-2", - "RequiresAccountId": true, - "UseDualStack": false, + "Region": "us-east-1", + "UseDualStack": true, "UseFIPS": true } }, { - "documentation": "Outposts support dualstack @us-west-2", + "documentation": "standard url @ cn-north-1", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-west-2", + "signingName": "s3", + "signingRegion": "cn-north-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.us-west-2.api.aws" + "url": "https://s3-control.cn-north-1.amazonaws.com.cn" } }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::UseDualStack": true - }, - "operationName": "GetBucket", - "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "AccountId": "123456789012" - } - } - ], "params": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-west-2", - "RequiresAccountId": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "fips @ cn-north-1", + "expect": { + "error": "Partition does not support FIPS" + }, + "params": { + "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": true } }, { - "documentation": "vanilla bucket arn requires account id@af-south-1", + "documentation": "custom account id prefix @us-east-1", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "af-south-1", + "signingName": "s3", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.af-south-1.amazonaws.com" + "url": "https://123456789012.s3-control.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "af-south-1" + "AWS::Region": "us-east-1" }, - "operationName": "CreateAccessPoint", + "operationName": "ListRegionalBuckets", "operationParams": { - "Bucket": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Name": "apname", "AccountId": "123456789012" } } ], "params": { - "Bucket": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "af-south-1", + "AccountId": "123456789012", + "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "bucket arn with UseArnRegion = true (arn region supercedes client configured region)@us-west-2", + "documentation": "invalid account id prefix @us-east-1", "expect": { - "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-east-1", - "disableDoubleEncoding": true - } - ] - }, - "url": "https://s3-outposts.us-east-1.amazonaws.com" - } + "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`." }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "us-east-1" }, - "operationName": "GetBucket", + "operationName": "ListRegionalBuckets", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "AccountId": "123456789012" + "AccountId": "/?invalid¬-host*label" } } ], "params": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-west-2", + "AccountId": "/?invalid¬-host*label", + "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "bucket ARN in gov partition (non-fips)@us-gov-east-1", + "documentation": "custom account id prefix with fips@us-east-1", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-gov-east-1", + "signingName": "s3", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.us-gov-east-1.amazonaws.com" + "url": "https://123456789012.s3-control-fips.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-gov-east-1" + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true }, - "operationName": "GetBucket", + "operationName": "ListRegionalBuckets", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", "AccountId": "123456789012" } } ], "params": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-gov-east-1", + "AccountId": "123456789012", + "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false + "UseFIPS": true } }, { - "documentation": "bucket ARN in gov partition with FIPS@us-gov-west-1", + "documentation": "custom account id prefix with dualstack,fips@us-east-1", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-gov-west-1", + "signingName": "s3", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts-fips.us-gov-west-1.amazonaws.com" + "url": "https://123456789012.s3-control-fips.dualstack.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-gov-west-1", - "AWS::UseFIPS": true + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true, + "AWS::UseDualStack": true }, - "operationName": "GetBucket", + "operationName": "ListRegionalBuckets", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", "AccountId": "123456789012" } } ], "params": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-gov-west-1", + "AccountId": "123456789012", + "Region": "us-east-1", "RequiresAccountId": true, - "UseDualStack": false, + "UseDualStack": true, "UseFIPS": true } }, { - "documentation": "bucket ARN in aws partition with FIPS@us-east-2", + "documentation": "custom account id with custom endpoint", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-east-2", + "signingName": "s3", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts-fips.us-east-2.amazonaws.com" + "url": "https://123456789012.example.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-2", - "AWS::UseFIPS": true + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://example.com" }, - "operationName": "GetBucket", + "operationName": "ListRegionalBuckets", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "AccountId": "123456789012" } } ], "params": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-east-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "Invalid ARN: missing outpost id and bucket@us-west-2", - "expect": { - "error": "Invalid ARN: The Outpost Id was not set" - }, - "params": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost", - "Region": "us-west-2", + "AccountId": "123456789012", + "Region": "us-east-1", "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "Invalid ARN: missing bucket@us-west-2", + "documentation": "RequiresAccountId with AccountId unset", "expect": { - "error": "Invalid ARN: Expected a 4-component resource" + "error": "AccountId is required but not set" }, "params": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false + "Region": "us-east-1", + "RequiresAccountId": true } }, { - "documentation": "Invalid ARN: missing outpost and bucket ids@us-west-2", + "documentation": "RequiresAccountId with AccountId unset and custom endpoint", "expect": { - "error": "Invalid ARN: Expected a 4-component resource" + "error": "AccountId is required but not set" }, "params": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:bucket", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false + "Region": "us-east-1", + "Endpoint": "https://beta.example.com", + "RequiresAccountId": true } }, { - "documentation": "Invalid ARN: missing bucket id@us-west-2", + "documentation": "RequiresAccountId with invalid AccountId and custom endpoint", "expect": { - "error": "Invalid ARN: expected a bucket name" + "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`." }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://beta.example.com" + }, + "operationName": "ListRegionalBuckets", + "operationParams": { + "AccountId": "/?invalid¬-host*label" + } + } + ], "params": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false + "Region": "us-east-1", + "Endpoint": "https://beta.example.com", + "AccountId": "/?invalid¬-host*label", + "RequiresAccountId": true } }, { - "documentation": "account id inserted into hostname@us-west-2", + "documentation": "account id with custom endpoint, fips", "expect": { "endpoint": { "properties": { @@ -5222,24 +6222,37 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "us-west-2", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://1234567890.s3-control.us-west-2.amazonaws.com" + "url": "https://123456789012.example.com" } }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true, + "SDK::Endpoint": "https://example.com" + }, + "operationName": "ListRegionalBuckets", + "operationParams": { + "AccountId": "123456789012" + } + } + ], "params": { - "AccountId": "1234567890", - "Region": "us-west-2", + "AccountId": "123456789012", + "Region": "us-east-1", "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false + "Endpoint": "https://example.com", + "UseFIPS": true } }, { - "documentation": "account id prefix with dualstack@us-east-1", + "documentation": "custom endpoint, fips", "expect": { "endpoint": { "properties": { @@ -5252,19 +6265,17 @@ } ] }, - "url": "https://1234567890.s3-control.dualstack.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "AccountId": "1234567890", "Region": "us-east-1", - "RequiresAccountId": true, - "UseDualStack": true, - "UseFIPS": false + "Endpoint": "https://example.com", + "UseFIPS": true } }, { - "documentation": "account id prefix with fips@us-east-1", + "documentation": "custom endpoint, fips", "expect": { "endpoint": { "properties": { @@ -5277,19 +6288,122 @@ } ] }, - "url": "https://1234567890.s3-control-fips.us-east-1.amazonaws.com" + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "custom endpoint, DualStack", + "expect": { + "error": "Invalid Configuration: DualStack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "Endpoint": "https://example.com", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "region not set", + "expect": { + "error": "Region must be set" + } + }, + { + "documentation": "invalid partition", + "expect": { + "error": "Invalid region: region was not a valid DNS name." + }, + "params": { + "Region": "invalid-region 42" + } + }, + { + "documentation": "ListRegionalBuckets + OutpostId without accountId set.", + "expect": { + "error": "AccountId is required but not set" + }, + "params": { + "OutpostId": "op-123", + "Region": "us-east-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "ListRegionalBuckets + OutpostId with invalid accountId set.", + "expect": { + "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`." + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-2" + }, + "operationName": "ListRegionalBuckets", + "operationParams": { + "OutpostId": "op-123", + "AccountId": "/?invalid¬-host*label" + } } + ], + "params": { + "AccountId": "/?invalid¬-host*label", + "OutpostId": "op-123", + "Region": "us-east-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "accesspoint set but missing accountId", + "expect": { + "error": "AccountId is required but not set" + }, + "params": { + "AccessPointName": "myaccesspoint", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "outpost accesspoint ARN with missing accountId", + "expect": { + "error": "Invalid ARN: missing account ID" + }, + "params": { + "AccessPointName": "arn:aws:s3-outposts:us-west-2::outpost:op-01234567890123456:outpost:op1", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "bucket ARN with missing accountId", + "expect": { + "error": "Invalid ARN: missing account ID" }, "params": { - "AccountId": "1234567890", - "Region": "us-east-1", + "AccessPointName": "arn:aws:s3-outposts:us-west-2::outpost:op-01234567890123456:bucket:mybucket", + "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": true + "UseFIPS": false } }, { - "documentation": "custom account id prefix with fips@us-east-1", + "documentation": "endpoint url with accesspoint (non-arn)", "expect": { "endpoint": { "properties": { @@ -5297,398 +6411,568 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "us-east-1", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://123456789012.s3-control-fips.us-east-1.amazonaws.com" + "url": "https://123456789012.beta.example.com" } }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "SDK::Endpoint": "https://beta.example.com" + }, + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "apname", + "AccountId": "123456789012" + } + } + ], "params": { + "AccessPointName": "apname", + "Endpoint": "https://beta.example.com", "AccountId": "123456789012", - "Region": "us-east-1", + "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": true + "UseFIPS": false } }, { - "documentation": "standard url @ us-east-1", + "documentation": "access point name with an accesspoint arn@us-west-2", "expect": { "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-east-1", + "signingName": "s3-outposts", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://s3-control.us-east-1.amazonaws.com" + "url": "https://beta.example.com" } }, "params": { - "Region": "us-east-1" + "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Endpoint": "https://beta.example.com", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false } }, { - "documentation": "fips url @ us-east-1", + "documentation": "DualStack + Custom endpoint is not supported(non-arn)", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-east-1", - "disableDoubleEncoding": true - } - ] + "error": "Invalid Configuration: DualStack and custom endpoint are not supported" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::UseDualStack": true, + "SDK::Endpoint": "https://beta.example.com" }, - "url": "https://s3-control-fips.us-east-1.amazonaws.com" + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "apname", + "AccountId": "123456789012" + } } - }, + ], "params": { - "Region": "us-east-1", - "UseFIPS": true + "AccessPointName": "apname", + "Endpoint": "https://beta.example.com", + "AccountId": "123456789012", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": true, + "UseFIPS": false } }, { - "documentation": "dualstack url @ us-east-1", + "documentation": "get bucket with custom endpoint and dualstack is not supported@us-west-2", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-east-1", - "disableDoubleEncoding": true - } - ] + "error": "Invalid Configuration: DualStack and custom endpoint are not supported" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::UseDualStack": true, + "SDK::Endpoint": "https://s3-outposts.us-west-2.api.aws" }, - "url": "https://s3-control.dualstack.us-east-1.amazonaws.com" + "operationName": "GetBucket", + "operationParams": { + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" + } } - }, + ], "params": { - "Region": "us-east-1", - "UseDualStack": true + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Endpoint": "https://s3-outposts.us-west-2.api.aws", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": true, + "UseFIPS": false } }, { - "documentation": "fips,dualstack url @ us-east-1", + "documentation": "ListRegionalBuckets + OutpostId with fips in CN.", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-east-1", - "disableDoubleEncoding": true - } - ] + "error": "Partition does not support FIPS" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true }, - "url": "https://s3-control-fips.dualstack.us-east-1.amazonaws.com" + "operationName": "ListRegionalBuckets", + "operationParams": { + "OutpostId": "op-123", + "AccountId": "0123456789012" + } } - }, + ], "params": { - "Region": "us-east-1", - "UseDualStack": true, + "AccountId": "0123456789012", + "OutpostId": "op-123", + "Region": "cn-north-1", + "RequiresAccountId": true, + "UseDualStack": false, "UseFIPS": true } }, { - "documentation": "standard url @ cn-north-1", + "documentation": "ListRegionalBuckets + invalid OutpostId.", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "cn-north-1", - "disableDoubleEncoding": true - } - ] + "error": "OutpostId must only contain a-z, A-Z, 0-9 and `-`." + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-1" }, - "url": "https://s3-control.cn-north-1.amazonaws.com.cn" + "operationName": "ListRegionalBuckets", + "operationParams": { + "OutpostId": "?outpost/invalid+", + "AccountId": "0123456789012" + } } + ], + "params": { + "AccountId": "0123456789012", + "OutpostId": "?outpost/invalid+", + "Region": "us-west-1", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "bucket ARN with mismatched accountId", + "expect": { + "error": "Invalid ARN: the accountId specified in the ARN (`999999`) does not match the parameter (`0123456789012`)" }, "params": { - "Region": "cn-north-1" + "Bucket": "arn:aws:s3-outposts:us-west-2:999999:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "0123456789012", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false } }, { - "documentation": "fips @ cn-north-1", + "documentation": "OutpostId with invalid region", "expect": { - "error": "Partition does not support FIPS" + "error": "Invalid region: region was not a valid DNS name." }, "params": { - "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "OutpostId": "op-123", + "Region": "invalid-region 42", + "AccountId": "0123456", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false } }, { - "documentation": "custom account id prefix @us-east-1", + "documentation": "OutpostId with RequireAccountId unset", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-east-1", + "signingName": "s3-outposts", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://123456789012.s3-control.us-east-1.amazonaws.com" + "url": "https://s3-outposts.us-west-2.amazonaws.com" } }, + "params": { + "OutpostId": "op-123", + "Region": "us-west-2", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "Outpost Accesspoint ARN with arn region and client region mismatch with UseArnRegion=false", + "expect": { + "error": "Invalid configuration: region from ARN `us-east-1` does not match client region `us-west-2` and UseArnRegion is `false`" + }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-1" + "AWS::Region": "us-west-2", + "AWS::S3Control::UseArnRegion": false + }, + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + }, + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::S3Control::UseArnRegion": false }, - "operationName": "ListRegionalBuckets", + "operationName": "DeleteAccessPoint", "operationParams": { + "Name": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012" } } ], "params": { + "AccessPointName": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012", - "Region": "us-east-1", + "Region": "us-west-2", "RequiresAccountId": true, + "UseArnRegion": false, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "invalid account id prefix @us-east-1", + "documentation": "Outpost Bucket ARN with arn region and client region mismatch with UseArnRegion=false", "expect": { - "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`." + "error": "Invalid configuration: region from ARN `us-east-1` does not match client region `us-west-2` and UseArnRegion is `false`" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-1" + "AWS::Region": "us-west-2", + "SDK::Endpoint": "https://beta.example.com", + "AWS::S3Control::UseArnRegion": false }, - "operationName": "ListRegionalBuckets", + "operationName": "GetBucket", "operationParams": { - "AccountId": "/?invalid¬-host*label" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { - "AccountId": "/?invalid¬-host*label", - "Region": "us-east-1", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Endpoint": "https://beta.example.com", + "Region": "us-west-2", "RequiresAccountId": true, + "UseArnRegion": false, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "custom account id prefix with fips@us-east-1", + "documentation": "Accesspoint ARN with region mismatch and UseArnRegion unset", "expect": { "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", + "signingName": "s3-outposts", "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://123456789012.s3-control-fips.us-east-1.amazonaws.com" + "url": "https://s3-outposts.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-1", - "AWS::UseFIPS": true + "AWS::Region": "us-west-2" }, - "operationName": "ListRegionalBuckets", + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + }, + { + "builtInParams": { + "AWS::Region": "us-west-2" + }, + "operationName": "DeleteAccessPoint", "operationParams": { + "Name": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012" } } ], "params": { + "AccessPointName": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012", - "Region": "us-east-1", + "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": true + "UseFIPS": false } }, { - "documentation": "custom account id prefix with dualstack,fips@us-east-1", + "documentation": "Bucket ARN with region mismatch and UseArnRegion unset", "expect": { "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", + "signingName": "s3-outposts", "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://123456789012.s3-control-fips.dualstack.us-east-1.amazonaws.com" + "url": "https://s3-outposts.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-1", - "AWS::UseFIPS": true, - "AWS::UseDualStack": true + "AWS::Region": "us-west-2" }, - "operationName": "ListRegionalBuckets", + "operationName": "GetBucket", "operationParams": { + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", "AccountId": "123456789012" } } ], "params": { - "AccountId": "123456789012", - "Region": "us-east-1", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Region": "us-west-2", "RequiresAccountId": true, - "UseDualStack": true, - "UseFIPS": true + "UseDualStack": false, + "UseFIPS": false } }, { - "documentation": "custom account id with custom endpoint", + "documentation": "Outpost Bucket ARN with partition mismatch with UseArnRegion=true", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-east-1", - "disableDoubleEncoding": true - } - ] - }, - "url": "https://123456789012.example.com" - } + "error": "Client was configured for partition `aws` but ARN has `aws-cn`" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-1", - "SDK::Endpoint": "https://example.com" + "AWS::Region": "us-west-2", + "AWS::S3Control::UseArnRegion": true }, - "operationName": "ListRegionalBuckets", + "operationName": "GetBucket", "operationParams": { + "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", "AccountId": "123456789012" } } ], "params": { - "AccountId": "123456789012", - "Region": "us-east-1", + "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Region": "us-west-2", "RequiresAccountId": true, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "RequiresAccountId with AccountId unset", - "expect": { - "error": "AccountId is required but not set" - }, - "params": { - "Region": "us-east-1", - "RequiresAccountId": true - } - }, - { - "documentation": "RequiresAccountId with AccountId unset and custom endpoint", - "expect": { - "error": "AccountId is required but not set" - }, - "params": { - "Region": "us-east-1", - "Endpoint": "https://beta.example.com", - "RequiresAccountId": true + "UseArnRegion": true, + "UseDualStack": false, + "UseFIPS": false } }, { - "documentation": "RequiresAccountId with invalid AccountId and custom endpoint", + "documentation": "Accesspoint ARN with partition mismatch and UseArnRegion=true", "expect": { - "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`." + "error": "Client was configured for partition `aws` but ARN has `aws-cn`" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-1", - "SDK::Endpoint": "https://beta.example.com" + "AWS::Region": "us-west-2", + "AWS::S3Control::UseArnRegion": true }, - "operationName": "ListRegionalBuckets", + "operationName": "GetAccessPoint", "operationParams": { - "AccountId": "/?invalid¬-host*label" + "Name": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + }, + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::S3Control::UseArnRegion": true + }, + "operationName": "DeleteAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" } } ], "params": { - "Region": "us-east-1", - "Endpoint": "https://beta.example.com", - "AccountId": "/?invalid¬-host*label", - "RequiresAccountId": true + "AccessPointName": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012", + "Region": "us-west-2", + "RequiresAccountId": true, + "UseDualStack": false, + "UseArnRegion": true, + "UseFIPS": false } }, { - "documentation": "account id with custom endpoint, fips", + "documentation": "Accesspoint ARN with region mismatch, UseArnRegion=false and custom endpoint", + "expect": { + "error": "Invalid configuration: region from ARN `cn-north-1` does not match client region `us-west-2` and UseArnRegion is `false`" + }, + "params": { + "AccessPointName": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "Region": "us-west-2", + "Endpoint": "https://example.com", + "RequiresAccountId": true, + "UseDualStack": false, + "UseArnRegion": false, + "UseFIPS": false + } + }, + { + "documentation": "outpost bucket arn@us-west-2", "expect": { "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-east-1", + "signingName": "s3-outposts", + "signingRegion": "us-west-2", "disableDoubleEncoding": true } ] }, - "url": "https://123456789012.example.com" + "url": "https://s3-outposts.us-west-2.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-east-1", - "AWS::UseFIPS": true, - "SDK::Endpoint": "https://example.com" + "AWS::Region": "us-west-2" }, - "operationName": "ListRegionalBuckets", + "operationName": "GetBucketVersioning", "operationParams": { + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "AccountId": "123456789012" } + }, + { + "builtInParams": { + "AWS::Region": "us-west-2" + }, + "operationName": "PutBucketVersioning", + "operationParams": { + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012", + "VersioningConfiguration": { + "Status": "Enabled" + } + } } ], "params": { - "AccountId": "123456789012", - "Region": "us-east-1", + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Region": "us-west-2", "RequiresAccountId": true, - "Endpoint": "https://example.com", - "UseFIPS": true + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "S3 Snow Control with bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "snow", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://10.0.1.12:433" + } + }, + "params": { + "Region": "snow", + "Bucket": "bucketName", + "Endpoint": "https://10.0.1.12:433", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "custom endpoint, fips", + "documentation": "S3 Snow Control without bucket", "expect": { "endpoint": { "properties": { @@ -5696,22 +6980,23 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "us-east-1", + "signingRegion": "snow", "disableDoubleEncoding": true } ] }, - "url": "https://example.com" + "url": "https://10.0.1.12:433" } }, "params": { - "Region": "us-east-1", - "Endpoint": "https://example.com", - "UseFIPS": true + "Region": "snow", + "Endpoint": "https://10.0.1.12:433", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "custom endpoint, fips", + "documentation": "S3 Snow Control with bucket and without port", "expect": { "endpoint": { "properties": { @@ -5719,792 +7004,882 @@ { "name": "sigv4", "signingName": "s3", - "signingRegion": "us-east-1", + "signingRegion": "snow", "disableDoubleEncoding": true } ] }, - "url": "https://example.com" + "url": "https://10.0.1.12" } }, "params": { - "Region": "us-east-1", - "Endpoint": "https://example.com", - "UseFIPS": true - } - }, - { - "documentation": "custom endpoint, DualStack", - "expect": { - "error": "Invalid Configuration: DualStack and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "Endpoint": "https://example.com", + "Region": "snow", + "Bucket": "bucketName", + "Endpoint": "https://10.0.1.12", "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "region not set", - "expect": { - "error": "Region must be set" - } - }, - { - "documentation": "invalid partition", - "expect": { - "error": "Invalid region: region was not a valid DNS name." - }, - "params": { - "Region": "invalid-region 42" - } - }, - { - "documentation": "ListRegionalBuckets + OutpostId without accountId set.", - "expect": { - "error": "AccountId is required but not set" - }, - "params": { - "OutpostId": "op-123", - "Region": "us-east-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false + "UseDualStack": false } }, { - "documentation": "ListRegionalBuckets + OutpostId with invalid accountId set.", + "documentation": "S3 Snow Control with bucket and with DNS", "expect": { - "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`." - }, - "operationInputs": [ - { - "builtInParams": { - "AWS::Region": "us-east-2" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "snow", + "disableDoubleEncoding": true + } + ] }, - "operationName": "ListRegionalBuckets", - "operationParams": { - "OutpostId": "op-123", - "AccountId": "/?invalid¬-host*label" - } + "url": "http://s3snow.com" } - ], - "params": { - "AccountId": "/?invalid¬-host*label", - "OutpostId": "op-123", - "Region": "us-east-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "accesspoint set but missing accountId", - "expect": { - "error": "AccountId is required but not set" }, "params": { - "AccessPointName": "myaccesspoint", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false + "Region": "snow", + "Bucket": "bucketName", + "Endpoint": "http://s3snow.com", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "outpost accesspoint ARN with missing accountId", + "documentation": "S3 Snow Control with FIPS enabled", "expect": { - "error": "Invalid ARN: missing account ID" + "error": "S3 Snow does not support FIPS" }, "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2::outpost:op-01234567890123456:outpost:op1", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false + "Region": "snow", + "Bucket": "bucketName", + "Endpoint": "https://10.0.1.12:433", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "bucket ARN with missing accountId", + "documentation": "S3 Snow Control with Dualstack enabled", "expect": { - "error": "Invalid ARN: missing account ID" + "error": "S3 Snow does not support DualStack" }, "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2::outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false + "Region": "snow", + "Bucket": "bucketName", + "Endpoint": "https://10.0.1.12:433", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "endpoint url with accesspoint (non-arn)", + "documentation": "Access Point APIs on express bucket routed to s3express-control", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-west-2", + "signingName": "s3express", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://123456789012.beta.example.com" + "url": "https://s3express-control.us-east-1.amazonaws.com" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "SDK::Endpoint": "https://beta.example.com" + "AWS::Region": "us-east-1" + }, + "operationName": "CreateAccessPoint", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "Bucket": "mybucket--abcd-ab1--x-s3", + "AccountId": "871317572157", + "Scope": { + "Prefixes": [], + "Permissions": [] + } + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1" }, "operationName": "GetAccessPoint", "operationParams": { - "Name": "apname", - "AccountId": "123456789012" + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" } - } - ], - "params": { - "AccessPointName": "apname", - "Endpoint": "https://beta.example.com", - "AccountId": "123456789012", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "access point name with an accesspoint arn@us-west-2", - "expect": { - "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] + }, + { + "builtInParams": { + "AWS::Region": "us-east-1" }, - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-west-2", - "disableDoubleEncoding": true - } - ] + "operationName": "DeleteAccessPoint", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1" }, - "url": "https://beta.example.com" - } - }, - "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "Endpoint": "https://beta.example.com", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "DualStack + Custom endpoint is not supported(non-arn)", - "expect": { - "error": "Invalid Configuration: DualStack and custom endpoint are not supported" - }, - "operationInputs": [ + "operationName": "PutAccessPointScope", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157", + "Scope": { + "Prefixes": [], + "Permissions": [] + } + } + }, { "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::UseDualStack": true, - "SDK::Endpoint": "https://beta.example.com" + "AWS::Region": "us-east-1" + }, + "operationName": "GetAccessPointScope", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1" + }, + "operationName": "DeleteAccessPointScope", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1" + }, + "operationName": "PutAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157", + "Policy": "my-policy" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1" }, - "operationName": "GetAccessPoint", + "operationName": "GetAccessPointPolicy", "operationParams": { - "Name": "apname", - "AccountId": "123456789012" + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" } - } - ], - "params": { - "AccessPointName": "apname", - "Endpoint": "https://beta.example.com", - "AccountId": "123456789012", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "get bucket with custom endpoint and dualstack is not supported@us-west-2", - "expect": { - "error": "Invalid Configuration: DualStack and custom endpoint are not supported" - }, - "operationInputs": [ + }, { "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::UseDualStack": true, - "SDK::Endpoint": "https://s3-outposts.us-west-2.api.aws" + "AWS::Region": "us-east-1" }, - "operationName": "GetBucket", + "operationName": "DeleteAccessPointPolicy", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "AccountId": "123456789012" + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" } - } - ], - "params": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Endpoint": "https://s3-outposts.us-west-2.api.aws", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "ListRegionalBuckets + OutpostId with fips in CN.", - "expect": { - "error": "Partition does not support FIPS" - }, - "operationInputs": [ + }, { "builtInParams": { - "AWS::Region": "cn-north-1", - "AWS::UseFIPS": true + "AWS::Region": "us-east-1" }, - "operationName": "ListRegionalBuckets", + "operationName": "GetAccessPointPolicyStatus", "operationParams": { - "OutpostId": "op-123", - "AccountId": "0123456789012" + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" } } ], "params": { - "AccountId": "0123456789012", - "OutpostId": "op-123", - "Region": "cn-north-1", + "AccountId": "871317572157", + "AccessPointName": "myaccesspoint--abcd-ab1--xa-s3", + "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": true + "UseFIPS": false } }, { - "documentation": "ListRegionalBuckets + invalid OutpostId.", + "documentation": "Access Point APIs on express bucket routed to s3express-control for List", "expect": { - "error": "OutpostId must only contain a-z, A-Z, 0-9 and `-`." + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "us-east-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3express-control.us-east-1.amazonaws.com" + } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-1" + "AWS::Region": "us-east-1" }, - "operationName": "ListRegionalBuckets", + "operationName": "ListAccessPointsForDirectoryBuckets", "operationParams": { - "OutpostId": "?outpost/invalid+", - "AccountId": "0123456789012" + "DirectoryBucket": "mybucket--abcd-ab1--x-s3", + "AccountId": "871317572157" } } ], "params": { - "AccountId": "0123456789012", - "OutpostId": "?outpost/invalid+", - "Region": "us-west-1", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "bucket ARN with mismatched accountId", - "expect": { - "error": "Invalid ARN: the accountId specified in the ARN (`999999`) does not match the parameter (`0123456789012`)" - }, - "params": { - "Bucket": "arn:aws:s3-outposts:us-west-2:999999:outpost:op-01234567890123456:bucket:mybucket", - "AccountId": "0123456789012", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "OutpostId with invalid region", - "expect": { - "error": "Invalid region: region was not a valid DNS name." - }, - "params": { - "OutpostId": "op-123", - "Region": "invalid-region 42", - "AccountId": "0123456", + "AccountId": "871317572157", + "Region": "us-east-1", + "UseS3ExpressControlEndpoint": true, "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "OutpostId with RequireAccountId unset", + "documentation": "Access Point APIs on express bucket routed to s3express-control for FIPS", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-west-2", + "signingName": "s3express", + "signingRegion": "us-east-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.us-west-2.amazonaws.com" + "url": "https://s3express-control-fips.us-east-1.amazonaws.com" } }, - "params": { - "OutpostId": "op-123", - "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "Outpost Accesspoint ARN with arn region and client region mismatch with UseArnRegion=false", - "expect": { - "error": "Invalid configuration: region from ARN `us-east-1` does not match client region `us-west-2` and UseArnRegion is `false`" - }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::S3Control::UseArnRegion": false + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true + }, + "operationName": "CreateAccessPoint", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "Bucket": "mybucket--abcd-ab1--x-s3", + "AccountId": "871317572157", + "Scope": { + "Prefixes": [], + "Permissions": [] + } + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true }, "operationName": "GetAccessPoint", "operationParams": { - "Name": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" } }, { "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::S3Control::UseArnRegion": false + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true }, "operationName": "DeleteAccessPoint", "operationParams": { - "Name": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true + }, + "operationName": "PutAccessPointScope", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157", + "Scope": { + "Prefixes": [], + "Permissions": [] + } + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true + }, + "operationName": "GetAccessPointScope", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true + }, + "operationName": "DeleteAccessPointScope", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true + }, + "operationName": "PutAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157", + "Policy": "my-policy" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true + }, + "operationName": "GetAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true + }, + "operationName": "DeleteAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true + }, + "operationName": "GetAccessPointPolicyStatus", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" } } ], "params": { - "AccessPointName": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012", - "Region": "us-west-2", + "AccountId": "871317572157", + "AccessPointName": "myaccesspoint--abcd-ab1--xa-s3", + "Region": "us-east-1", "RequiresAccountId": true, - "UseArnRegion": false, "UseDualStack": false, - "UseFIPS": false + "UseFIPS": true } }, { - "documentation": "Outpost Bucket ARN with arn region and client region mismatch with UseArnRegion=false", + "documentation": "Access Point APIs on express bucket routed to s3express-control for FIPS for List", "expect": { - "error": "Invalid configuration: region from ARN `us-east-1` does not match client region `us-west-2` and UseArnRegion is `false`" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "us-east-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3express-control-fips.us-east-1.amazonaws.com" + } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "SDK::Endpoint": "https://beta.example.com", - "AWS::S3Control::UseArnRegion": false + "AWS::Region": "us-east-1", + "AWS::UseFIPS": true }, - "operationName": "GetBucket", + "operationName": "ListAccessPointsForDirectoryBuckets", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "AccountId": "123456789012" + "DirectoryBucket": "mybucket--abcd-ab1--x-s3", + "AccountId": "871317572157" } } ], "params": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Endpoint": "https://beta.example.com", - "Region": "us-west-2", + "AccountId": "871317572157", + "Region": "us-east-1", + "UseS3ExpressControlEndpoint": true, "RequiresAccountId": true, - "UseArnRegion": false, "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "Accesspoint ARN with region mismatch and UseArnRegion unset", - "expect": { - "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, + "UseFIPS": true + } + }, + { + "documentation": "Access Point APIs on express bucket routed to s3express-control for china region", + "expect": { + "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-east-1", + "signingName": "s3express", + "signingRegion": "cn-north-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.us-east-1.amazonaws.com" + "url": "https://s3express-control.cn-north-1.amazonaws.com.cn" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "cn-north-1" + }, + "operationName": "CreateAccessPoint", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "Bucket": "mybucket--abcd-ab1--x-s3", + "AccountId": "871317572157", + "Scope": { + "Prefixes": [], + "Permissions": [] + } + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1" }, "operationName": "GetAccessPoint", "operationParams": { - "Name": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" } }, { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "cn-north-1" }, "operationName": "DeleteAccessPoint", "operationParams": { - "Name": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1" + }, + "operationName": "PutAccessPointScope", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157", + "Scope": { + "Prefixes": [], + "Permissions": [] + } + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1" + }, + "operationName": "GetAccessPointScope", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1" + }, + "operationName": "DeleteAccessPointScope", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1" + }, + "operationName": "PutAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157", + "Policy": "my-policy" + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1" + }, + "operationName": "GetAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1" + }, + "operationName": "DeleteAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1" + }, + "operationName": "GetAccessPointPolicyStatus", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" } } ], "params": { - "AccessPointName": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012", - "Region": "us-west-2", + "AccessPointName": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157", + "Region": "cn-north-1", "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "Bucket ARN with region mismatch and UseArnRegion unset", + "documentation": "Access Point APIs on express bucket routed to s3express-control for china region for List", "expect": { "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] - }, "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-east-1", + "signingName": "s3express", + "signingRegion": "cn-north-1", "disableDoubleEncoding": true } ] }, - "url": "https://s3-outposts.us-east-1.amazonaws.com" + "url": "https://s3express-control.cn-north-1.amazonaws.com.cn" } }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "cn-north-1" }, - "operationName": "GetBucket", + "operationName": "ListAccessPointsForDirectoryBuckets", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "AccountId": "123456789012" + "DirectoryBucket": "mybucket--abcd-ab1--x-s3", + "AccountId": "871317572157" } } ], "params": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-west-2", + "AccountId": "871317572157", + "Region": "cn-north-1", + "UseS3ExpressControlEndpoint": true, "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": false } }, { - "documentation": "Outpost Bucket ARN with partition mismatch with UseArnRegion=true", + "documentation": "Error when Access Point APIs on express bucket routed to s3express-control for china and FIPS", "expect": { - "error": "Client was configured for partition `aws` but ARN has `aws-cn`" + "error": "Partition does not support FIPS" }, "operationInputs": [ { "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::S3Control::UseArnRegion": true + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true }, - "operationName": "GetBucket", + "operationName": "CreateAccessPoint", "operationParams": { - "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "AccountId": "123456789012" + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "Bucket": "mybucket--abcd-ab1--x-s3", + "AccountId": "871317572157", + "Scope": { + "Prefixes": [], + "Permissions": [] + } } - } - ], - "params": { - "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseArnRegion": true, - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "Accesspoint ARN with partition mismatch and UseArnRegion=true", - "expect": { - "error": "Client was configured for partition `aws` but ARN has `aws-cn`" - }, - "operationInputs": [ + }, { "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::S3Control::UseArnRegion": true + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true }, "operationName": "GetAccessPoint", "operationParams": { - "Name": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" } }, { "builtInParams": { - "AWS::Region": "us-west-2", - "AWS::S3Control::UseArnRegion": true + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true }, "operationName": "DeleteAccessPoint", "operationParams": { - "Name": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012" + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true + }, + "operationName": "ListAccessPointsForDirectoryBuckets", + "operationParams": { + "DirectoryBucket": "mybucket--abcd-ab1--x-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true + }, + "operationName": "PutAccessPointScope", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157", + "Scope": { + "Prefixes": [], + "Permissions": [] + } + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true + }, + "operationName": "GetAccessPointScope", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true + }, + "operationName": "DeleteAccessPointScope", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true + }, + "operationName": "PutAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157", + "Policy": "my-policy" + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true + }, + "operationName": "GetAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true + }, + "operationName": "DeleteAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::UseFIPS": true + }, + "operationName": "GetAccessPointPolicyStatus", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" } } ], "params": { - "AccessPointName": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "123456789012", - "Region": "us-west-2", + "AccountId": "871317572157", + "Region": "cn-north-1", "RequiresAccountId": true, "UseDualStack": false, - "UseArnRegion": true, - "UseFIPS": false + "UseFIPS": true } }, { - "documentation": "Accesspoint ARN with region mismatch, UseArnRegion=false and custom endpoint", + "documentation": "Error Access Point APIs on express bucket routed to s3express-control invalid zone", "expect": { - "error": "Invalid configuration: region from ARN `cn-north-1` does not match client region `us-west-2` and UseArnRegion is `false`" + "error": "Unrecognized S3Express Access Point name format." }, - "params": { - "AccessPointName": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "Region": "us-west-2", - "Endpoint": "https://example.com", - "RequiresAccountId": true, - "UseDualStack": false, - "UseArnRegion": false, - "UseFIPS": false - } - }, - { - "documentation": "outpost bucket arn@us-west-2", - "expect": { - "endpoint": { - "headers": { - "x-amz-account-id": [ - "123456789012" - ], - "x-amz-outpost-id": [ - "op-01234567890123456" - ] + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1" }, - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "us-west-2", - "disableDoubleEncoding": true - } - ] + "operationName": "CreateAccessPoint", + "operationParams": { + "Name": "myaccesspoint-garbage-zone--xa-s3", + "Bucket": "mybucket-garbage-zone-garbage-zone", + "AccountId": "871317572157", + "Scope": { + "Prefixes": [], + "Permissions": [] + } + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1" }, - "url": "https://s3-outposts.us-west-2.amazonaws.com" - } - }, - "operationInputs": [ + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "myaccesspoint-garbage-zone--xa-s3", + "AccountId": "871317572157" + } + }, { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "us-east-1" }, - "operationName": "GetBucketVersioning", + "operationName": "DeleteAccessPoint", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "AccountId": "123456789012" + "Name": "myaccesspoint-garbage-zone--xa-s3", + "AccountId": "871317572157" } }, { "builtInParams": { - "AWS::Region": "us-west-2" + "AWS::Region": "us-east-1" }, - "operationName": "PutBucketVersioning", + "operationName": "PutAccessPointScope", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "AccountId": "123456789012", - "VersioningConfiguration": { - "Status": "Enabled" + "Name": "myaccesspoint-garbage-zone--xa-s3", + "AccountId": "871317572157", + "Scope": { + "Prefixes": [], + "Permissions": [] } } - } - ], - "params": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "S3 Snow Control with bucket", - "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "snow", - "disableDoubleEncoding": true - } - ] + }, + { + "builtInParams": { + "AWS::Region": "us-east-1" }, - "url": "https://10.0.1.12:433" - } - }, - "params": { - "Region": "snow", - "Bucket": "bucketName", - "Endpoint": "https://10.0.1.12:433", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "S3 Snow Control without bucket", - "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "snow", - "disableDoubleEncoding": true - } - ] + "operationName": "GetAccessPointScope", + "operationParams": { + "Name": "myaccesspoint-garbage-zone--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1" }, - "url": "https://10.0.1.12:433" - } - }, - "params": { - "Region": "snow", - "Endpoint": "https://10.0.1.12:433", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "S3 Snow Control with bucket and without port", - "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "snow", - "disableDoubleEncoding": true - } - ] + "operationName": "DeleteAccessPointScope", + "operationParams": { + "Name": "myaccesspoint-garbage-zone--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1" }, - "url": "https://10.0.1.12" - } - }, - "params": { - "Region": "snow", - "Bucket": "bucketName", - "Endpoint": "https://10.0.1.12", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "S3 Snow Control with bucket and with DNS", - "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "snow", - "disableDoubleEncoding": true - } - ] + "operationName": "PutAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint-garbage-zone--xa-s3", + "AccountId": "871317572157", + "Policy": "my-policy" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1" }, - "url": "http://s3snow.com" + "operationName": "GetAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint-garbage-zone--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1" + }, + "operationName": "DeleteAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint-garbage-zone--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1" + }, + "operationName": "GetAccessPointPolicyStatus", + "operationParams": { + "Name": "myaccesspoint-garbage-zone--xa-s3", + "AccountId": "871317572157" + } } - }, - "params": { - "Region": "snow", - "Bucket": "bucketName", - "Endpoint": "http://s3snow.com", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "S3 Snow Control with FIPS enabled", - "expect": { - "error": "S3 Snow does not support FIPS" - }, - "params": { - "Region": "snow", - "Bucket": "bucketName", - "Endpoint": "https://10.0.1.12:433", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "S3 Snow Control with Dualstack enabled", - "expect": { - "error": "S3 Snow does not support DualStack" - }, + ], "params": { - "Region": "snow", - "Bucket": "bucketName", - "Endpoint": "https://10.0.1.12:433", - "UseFIPS": false, - "UseDualStack": true + "AccessPointName": "myaccesspoint-garbage-zone--xa-s3", + "AccountId": "871317572157", + "Region": "us-east-1", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false } } ], @@ -7709,7 +9084,7 @@ "target": "com.amazonaws.s3control#CreateAccessPointResult" }, "traits": { - "smithy.api#documentation": "This operation is not supported by directory buckets.
\nCreates an access point and associates it with the specified bucket. For more information, see\n Managing\n Data Access with Amazon S3 Access Points in the\n Amazon S3 User Guide.
\n \nS3 on Outposts only supports VPC-style access points.
\nFor more information, see Accessing Amazon S3 on Outposts using\n virtual private cloud (VPC) only access points in the\n Amazon S3 User Guide.
\nAll Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.
The following actions are related to CreateAccessPoint:
\n GetAccessPoint\n
\n\n DeleteAccessPoint\n
\n\n ListAccessPoints\n
\nCreates an access point and associates it to a specified bucket. For more information, see\n Managing\n access to shared datasets in general purpose buckets with access points or Managing\n access to shared datasets in directory buckets with access points in the\n Amazon S3 User Guide.
\n \nS3 on Outposts only supports VPC-style access points.
\nFor more information, see Accessing Amazon S3 on Outposts using\n virtual private cloud (VPC) only access points in the\n Amazon S3 User Guide.
\nAll Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.
The following actions are related to CreateAccessPoint:
\n GetAccessPoint\n
\n\n DeleteAccessPoint\n
\n\n ListAccessPoints\n
\nThe name you want to assign to this access point.
", + "smithy.api#documentation": "The name you want to assign to this access point.
\nFor directory buckets, the access point name must consist of a base name that you provide and suffix that includes the ZoneID (Amazon Web Services Availability Zone or Local Zone) of your bucket location, followed by --xa-s3. For more information, see Managing access to shared datasets in directory buckets with access points in the Amazon S3 User Guide.
The Amazon Web Services account ID associated with the S3 bucket associated with this access point.
\nFor same account access point when your bucket and access point belong to the same account owner, the BucketAccountId is not required. \n For cross-account access point when your bucket and access point are not in the same account, the BucketAccountId is required.\n
For directory buckets, you can filter access control to specific prefixes, API operations, or a combination of both. For more information, see Managing access to shared datasets in directory buckets with access points in the Amazon S3 User Guide.
\nScope is not supported for access points for general purpose buckets.
\nThis operation is not supported by directory buckets.
\nDeletes the specified access point.
\nAll Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.
The following actions are related to DeleteAccessPoint:
\n CreateAccessPoint\n
\n\n GetAccessPoint\n
\n\n ListAccessPoints\n
\nDeletes the specified access point.
\nAll Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.
The following actions are related to DeleteAccessPoint:
\n CreateAccessPoint\n
\n\n GetAccessPoint\n
\n\n ListAccessPoints\n
\nThis operation is not supported by directory buckets.
\nDeletes the access point policy for the specified access point.
\n \nAll Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.
The following actions are related to DeleteAccessPointPolicy:
\n PutAccessPointPolicy\n
\n\n GetAccessPointPolicy\n
\nDeletes the access point policy for the specified access point.
\n \nAll Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.
The following actions are related to DeleteAccessPointPolicy:
\n PutAccessPointPolicy\n
\n\n GetAccessPointPolicy\n
\nThe Amazon Web Services account ID for the account that owns the specified access point.
", + "smithy.api#hostLabel": {}, + "smithy.api#httpHeader": "x-amz-account-id", + "smithy.api#required": {}, + "smithy.rules#contextParam": { + "name": "AccountId" + } + } + }, + "Name": { + "target": "com.amazonaws.s3control#AccessPointName", + "traits": { + "smithy.api#documentation": "The name of the access point you want to delete.
\nFor using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.
\nFor using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:. For example, to access the access point reports-ap through Outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.
\n Deletes an existing access point scope for a directory bucket.
\nWhen you delete the scope of an access point, all prefixes and permissions are deleted.
\nTo use this operation, you must have the permission to perform the\n s3express:DeleteAccessPointScope action.
For information about REST API errors, see REST error responses.
", + "smithy.api#http": { + "method": "DELETE", + "uri": "/v20180820/accesspoint/{Name}/scope", + "code": 200 + }, + "smithy.rules#staticContextParams": { + "RequiresAccountId": { + "value": true + }, + "UseS3ExpressControlEndpoint": { + "value": true + } + } } }, - "com.amazonaws.s3control#DeleteAccessPointRequest": { + "com.amazonaws.s3control#DeleteAccessPointScopeRequest": { "type": "structure", "members": { "AccountId": { "target": "com.amazonaws.s3control#AccountId", "traits": { - "smithy.api#documentation": "The Amazon Web Services account ID for the account that owns the specified access point.
", + "smithy.api#documentation": "\n The Amazon Web Services account ID that owns the access point with the scope that you want to delete.\n
", "smithy.api#hostLabel": {}, "smithy.api#httpHeader": "x-amz-account-id", "smithy.api#required": {}, @@ -8787,7 +10227,7 @@ "Name": { "target": "com.amazonaws.s3control#AccessPointName", "traits": { - "smithy.api#documentation": "The name of the access point you want to delete.
\nFor using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.
\nFor using this parameter with S3 on Outposts with the Amazon Web Services SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:. For example, to access the access point reports-ap through Outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.
\n The name of the access point with the scope that you want to delete.\n
", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -10394,7 +11834,7 @@ "target": "com.amazonaws.s3control#GetAccessPointResult" }, "traits": { - "smithy.api#documentation": "This operation is not supported by directory buckets.
\nReturns configuration information about the specified access point.
\n \nAll Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.
The following actions are related to GetAccessPoint:
\n CreateAccessPoint\n
\n\n DeleteAccessPoint\n
\n\n ListAccessPoints\n
\nReturns configuration information about the specified access point.
\n \nAll Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.
The following actions are related to GetAccessPoint:
\n CreateAccessPoint\n
\n\n DeleteAccessPoint\n
\n\n ListAccessPoints\n
\nThis operation is not supported by directory buckets.
\nReturns the access point policy associated with the specified access point.
\nThe following actions are related to GetAccessPointPolicy:
\n PutAccessPointPolicy\n
\nReturns the access point policy associated with the specified access point.
\nThe following actions are related to GetAccessPointPolicy:
\n PutAccessPointPolicy\n
\n\n Returns the access point scope for a directory bucket.
\nTo use this operation, you must have the permission to perform the\n s3express:GetAccessPointScope action.
For information about REST API errors, see REST error responses.
", + "smithy.api#http": { + "method": "GET", + "uri": "/v20180820/accesspoint/{Name}/scope", + "code": 200 + }, + "smithy.rules#staticContextParams": { + "RequiresAccountId": { + "value": true + }, + "UseS3ExpressControlEndpoint": { + "value": true + } + } + } + }, + "com.amazonaws.s3control#GetAccessPointScopeRequest": { + "type": "structure", + "members": { + "AccountId": { + "target": "com.amazonaws.s3control#AccountId", + "traits": { + "smithy.api#documentation": "\n The Amazon Web Services account ID that owns the access point with the scope that you want to retrieve.\n
", + "smithy.api#hostLabel": {}, + "smithy.api#httpHeader": "x-amz-account-id", + "smithy.api#required": {}, + "smithy.rules#contextParam": { + "name": "AccountId" + } + } + }, + "Name": { + "target": "com.amazonaws.s3control#AccessPointName", + "traits": { + "smithy.api#documentation": "The name of the access point with the scope you want to retrieve.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.rules#contextParam": { + "name": "AccessPointName" + } + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.s3control#GetAccessPointScopeResult": { + "type": "structure", + "members": { + "Scope": { + "target": "com.amazonaws.s3control#Scope", + "traits": { + "smithy.api#documentation": "The contents of the access point scope.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.s3control#GetBucket": { "type": "operation", "input": { @@ -13774,6 +15284,99 @@ } } }, + "com.amazonaws.s3control#ListAccessPointsForDirectoryBuckets": { + "type": "operation", + "input": { + "target": "com.amazonaws.s3control#ListAccessPointsForDirectoryBucketsRequest" + }, + "output": { + "target": "com.amazonaws.s3control#ListAccessPointsForDirectoryBucketsResult" + }, + "traits": { + "smithy.api#documentation": "Returns a list of the access points that are owned by the Amazon Web Services account and that are associated with the specified directory bucket.
\nTo list access points for general purpose buckets, see ListAccesspoints.
\nTo use this operation, you must have the permission to perform the\n s3express:ListAccessPointsForDirectoryBuckets action.
For information about REST API errors, see REST error responses.
", + "smithy.api#http": { + "method": "GET", + "uri": "/v20180820/accesspointfordirectory", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "AccessPointList", + "pageSize": "MaxResults" + }, + "smithy.rules#staticContextParams": { + "RequiresAccountId": { + "value": true + }, + "UseS3ExpressControlEndpoint": { + "value": true + } + } + } + }, + "com.amazonaws.s3control#ListAccessPointsForDirectoryBucketsRequest": { + "type": "structure", + "members": { + "AccountId": { + "target": "com.amazonaws.s3control#AccountId", + "traits": { + "smithy.api#documentation": "The Amazon Web Services account ID that owns the access points.
", + "smithy.api#hostLabel": {}, + "smithy.api#httpHeader": "x-amz-account-id", + "smithy.api#required": {}, + "smithy.rules#contextParam": { + "name": "AccountId" + } + } + }, + "DirectoryBucket": { + "target": "com.amazonaws.s3control#BucketName", + "traits": { + "smithy.api#documentation": "The name of the directory bucket associated with the access points you want to list.
", + "smithy.api#httpQuery": "directoryBucket" + } + }, + "NextToken": { + "target": "com.amazonaws.s3control#NonEmptyMaxLength1024String", + "traits": { + "smithy.api#documentation": "\n If NextToken is returned, there are more access points available than requested in the maxResults value. The value of NextToken is a\n unique pagination token for each page. Make the call again using the returned token to\n retrieve the next page. Keep all other arguments unchanged. Each pagination token expires\n after 24 hours.\n
The maximum number of access points that you would like returned in the ListAccessPointsForDirectoryBuckets response. If the directory bucket is associated with more than this number of access points, the results include the pagination token NextToken. Make another call using the NextToken to retrieve more results.
Contains identification and configuration information for one or more access points associated with the directory bucket.
" + } + }, + "NextToken": { + "target": "com.amazonaws.s3control#NonEmptyMaxLength1024String", + "traits": { + "smithy.api#documentation": "\n If NextToken is returned, there are more access points available than requested in the maxResults value. The value of NextToken is a\n unique pagination token for each page. Make the call again using the returned token to\n retrieve the next page. Keep all other arguments unchanged. Each pagination token expires\n after 24 hours.\n
A container for the prefix-level storage metrics for S3 Storage Lens.
" } }, + "com.amazonaws.s3control#PrefixesList": { + "type": "list", + "member": { + "target": "com.amazonaws.s3control#Prefix", + "traits": { + "smithy.api#xmlName": "Prefix" + } + } + }, "com.amazonaws.s3control#Priority": { "type": "integer", "traits": { @@ -15910,7 +17522,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "This operation is not supported by directory buckets.
\nAssociates an access policy with the specified access point. Each access point can have only one policy,\n so a request made to this API replaces any existing policy associated with the specified\n access point.
\n \nAll Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.
The following actions are related to PutAccessPointPolicy:
\n GetAccessPointPolicy\n
\nAssociates an access policy with the specified access point. Each access point can have only one policy,\n so a request made to this API replaces any existing policy associated with the specified\n access point.
\n \nAll Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.
The following actions are related to PutAccessPointPolicy:
\n GetAccessPointPolicy\n
\nThe policy that you want to apply to the specified access point. For more information about access point\n policies, see Managing data access with Amazon S3\n access points in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The policy that you want to apply to the specified access point. For more information about access point\n policies, see Managing access to shared datasets in general purpose buckets with\n access points or Managing access to shared datasets in directory bucekts with access points in the Amazon S3 User Guide.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.s3control#PutAccessPointScope": { + "type": "operation", + "input": { + "target": "com.amazonaws.s3control#PutAccessPointScopeRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "traits": { + "smithy.api#documentation": "Creates or replaces the access point scope for a directory bucket. You can use the access point scope to restrict access to specific prefixes, API operations, or a combination of both.
\nYou can specify any amount of prefixes, but the total length of characters of all prefixes must be less than 256 bytes in size.
\nTo use this operation, you must have the permission to perform the\n s3express:PutAccessPointScope action.
For information about REST API errors, see REST error responses.
", + "smithy.api#http": { + "method": "PUT", + "uri": "/v20180820/accesspoint/{Name}/scope", + "code": 200 + }, + "smithy.rules#staticContextParams": { + "RequiresAccountId": { + "value": true + }, + "UseS3ExpressControlEndpoint": { + "value": true + } + } + } + }, + "com.amazonaws.s3control#PutAccessPointScopeRequest": { + "type": "structure", + "members": { + "AccountId": { + "target": "com.amazonaws.s3control#AccountId", + "traits": { + "smithy.api#documentation": "\n The Amazon Web Services account ID that owns the access point with scope that you want to create or replace.\n
", + "smithy.api#hostLabel": {}, + "smithy.api#httpHeader": "x-amz-account-id", + "smithy.api#required": {}, + "smithy.rules#contextParam": { + "name": "AccountId" + } + } + }, + "Name": { + "target": "com.amazonaws.s3control#AccessPointName", + "traits": { + "smithy.api#documentation": "The name of the access point with the scope that you want to create or replace.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.rules#contextParam": { + "name": "AccessPointName" + } + } + }, + "Scope": { + "target": "com.amazonaws.s3control#Scope", + "traits": { + "smithy.api#documentation": "Object prefixes, API operations, or a combination of both.
", "smithy.api#required": {} } } @@ -18339,6 +20014,90 @@ "smithy.api#xmlName": "SSE-S3" } }, + "com.amazonaws.s3control#Scope": { + "type": "structure", + "members": { + "Prefixes": { + "target": "com.amazonaws.s3control#PrefixesList", + "traits": { + "smithy.api#documentation": "You can specify any amount of prefixes, but the total length of characters of all prefixes must be less than 256 bytes in size.
", + "smithy.api#xmlName": "Prefixes" + } + }, + "Permissions": { + "target": "com.amazonaws.s3control#ScopePermissionList", + "traits": { + "smithy.api#documentation": "You can include one or more API operations as permissions.
", + "smithy.api#xmlName": "Permissions" + } + } + }, + "traits": { + "smithy.api#documentation": "You can use the access point scope to restrict access to specific prefixes, API operations, or a combination of both.
\nFor more information, see Manage the scope of your access points for directory buckets.\n
" + } + }, + "com.amazonaws.s3control#ScopePermission": { + "type": "enum", + "members": { + "GetObject": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GetObject" + } + }, + "GetObjectAttributes": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GetObjectAttributes" + } + }, + "ListMultipartUploadParts": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ListMultipartUploadParts" + } + }, + "ListBucket": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ListBucket" + } + }, + "ListBucketMultipartUploads": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ListBucketMultipartUploads" + } + }, + "PutObject": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PutObject" + } + }, + "DeleteObject": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DeleteObject" + } + }, + "AbortMultipartUpload": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AbortMultipartUpload" + } + } + } + }, + "com.amazonaws.s3control#ScopePermissionList": { + "type": "list", + "member": { + "target": "com.amazonaws.s3control#ScopePermission", + "traits": { + "smithy.api#xmlName": "Permission" + } + } + }, "com.amazonaws.s3control#SecretAccessKey": { "type": "string", "traits": { diff --git a/codegen/sdk/aws-models/s3.json b/codegen/sdk/aws-models/s3.json index 3f1bdd8f8cf..0bb7c038590 100644 --- a/codegen/sdk/aws-models/s3.json +++ b/codegen/sdk/aws-models/s3.json @@ -100,7 +100,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name to which the upload was taking place.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name to which the upload was taking place.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
Name of the bucket to which the multipart upload was initiated.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
Name of the bucket to which the multipart upload was initiated.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the destination bucket.
\n\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
Copying objects across different Amazon Web Services Regions isn't supported when the source or destination bucket is in Amazon Web Services Local Zones. The source and destination buckets must have the same parent Amazon Web Services Region. Otherwise, \n you get an HTTP 400 Bad Request error with the error code InvalidRequest.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must use the Outpost bucket access point ARN or the access point alias for the destination bucket. \n \n You can only copy objects within the same Outpost bucket. It's not supported to copy objects across different Amazon Web Services Outposts, between buckets on the same Outposts, or between Outposts buckets and any other bucket types. \n For more information about S3 on Outposts, see What is S3 on Outposts? in the S3 on Outposts guide. \n When you use this action with S3 on Outposts through the REST API, you must direct requests to the S3 on Outposts hostname, in the format \n \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. The hostname isn't required when you use the Amazon Web Services CLI or SDKs.\n
The name of the destination bucket.
\n\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
Copying objects across different Amazon Web Services Regions isn't supported when the source or destination bucket is in Amazon Web Services Local Zones. The source and destination buckets must have the same parent Amazon Web Services Region. Otherwise, \n you get an HTTP 400 Bad Request error with the error code InvalidRequest.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must use the Outpost bucket access point ARN or the access point alias for the destination bucket. \n \n You can only copy objects within the same Outpost bucket. It's not supported to copy objects across different Amazon Web Services Outposts, between buckets on the same Outposts, or between Outposts buckets and any other bucket types. \n For more information about S3 on Outposts, see What is S3 on Outposts? in the S3 on Outposts guide. \n When you use this action with S3 on Outposts through the REST API, you must direct requests to the S3 on Outposts hostname, in the format \n \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. The hostname isn't required when you use the Amazon Web Services CLI or SDKs.\n
If the x-amz-storage-class header is not used, the copied object will be\n stored in the STANDARD Storage Class by default. The STANDARD\n storage class provides high durability and high availability. Depending on performance\n needs, you can specify a different Storage Class.
\n Directory buckets -\n For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. \nUnsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request.
\n Amazon S3 on Outposts - S3 on Outposts only\n uses the OUTPOSTS Storage Class.
You can use the CopyObject action to change the storage class of an object\n that is already stored in Amazon S3 by using the x-amz-storage-class header. For\n more information, see Storage Classes in the\n Amazon S3 User Guide.
Before using an object as a source object for the copy operation, you must restore a\n copy of it if it meets any of the following conditions:
\nThe storage class of the source object is GLACIER or\n DEEP_ARCHIVE.
The storage class of the source object is INTELLIGENT_TIERING and\n it's S3 Intelligent-Tiering access tier is Archive Access or\n Deep Archive Access.
For more information, see RestoreObject and Copying\n Objects in the Amazon S3 User Guide.
", + "smithy.api#documentation": "If the x-amz-storage-class header is not used, the copied object will be\n stored in the STANDARD Storage Class by default. The STANDARD\n storage class provides high durability and high availability. Depending on performance\n needs, you can specify a different Storage Class.
\n Directory buckets -\n Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones. \nUnsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request.
\n Amazon S3 on Outposts - S3 on Outposts only\n uses the OUTPOSTS Storage Class.
You can use the CopyObject action to change the storage class of an object\n that is already stored in Amazon S3 by using the x-amz-storage-class header. For\n more information, see Storage Classes in the\n Amazon S3 User Guide.
Before using an object as a source object for the copy operation, you must restore a\n copy of it if it meets any of the following conditions:
\nThe storage class of the source object is GLACIER or\n DEEP_ARCHIVE.
The storage class of the source object is INTELLIGENT_TIERING and\n it's S3 Intelligent-Tiering access tier is Archive Access or\n Deep Archive Access.
For more information, see RestoreObject and Copying\n Objects in the Amazon S3 User Guide.
", "smithy.api#httpHeader": "x-amz-storage-class" } }, @@ -20767,7 +23256,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The name of the bucket where the multipart upload is initiated and where the object is\n uploaded.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket where the multipart upload is initiated and where the object is\n uploaded.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. For more information, see\n Storage\n Classes in the Amazon S3 User Guide.
\nFor directory buckets, only the S3 Express One Zone storage class is supported to store\n newly created objects.
\nAmazon S3 on Outposts only uses the OUTPOSTS Storage Class.
\nBy default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. For more information, see\n Storage\n Classes in the Amazon S3 User Guide.
\nDirectory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones.
Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.
\nThe bucket name of the bucket containing the object.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name of the bucket containing the object.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name containing the objects from which to remove the tags.
\n\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name containing the objects from which to remove the tags.
\n\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name containing the objects to delete.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name containing the objects to delete.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name to get the bucket policy for.
\n\n Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must also follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide\n
\n Access points - When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.
\n\n Object Lambda access points - When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.
Access points and Object Lambda access points are not supported by directory buckets.
\nThe bucket name to get the bucket policy for.
\n\n Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must also follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide\n
\n Access points - When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.
\n\n Object Lambda access points - When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.
Object Lambda access points are not supported by directory buckets.
\nRetrieves an object from Amazon S3.
\nIn the GetObject request, specify the full key name for the object.
\n General purpose buckets - Both the virtual-hosted-style\n requests and the path-style requests are supported. For a virtual hosted-style request\n example, if you have the object photos/2006/February/sample.jpg, specify the\n object key name as /photos/2006/February/sample.jpg. For a path-style request\n example, if you have the object photos/2006/February/sample.jpg in the bucket\n named examplebucket, specify the object key name as\n /examplebucket/photos/2006/February/sample.jpg. For more information about\n request types, see HTTP Host\n Header Bucket Specification in the Amazon S3 User Guide.
\n Directory buckets -\n Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named amzn-s3-demo-bucket--usw2-az1--x-s3, specify the object key name as /photos/2006/February/sample.jpg. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name\n . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the\n Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - You\n must have the required permissions in a policy. To use\n GetObject, you must have the READ access to the\n object (or version). If you grant READ access to the anonymous\n user, the GetObject operation returns the object without using\n an authorization header. For more information, see Specifying permissions in a policy in the\n Amazon S3 User Guide.
If you include a versionId in your request header, you must\n have the s3:GetObjectVersion permission to access a specific\n version of an object. The s3:GetObject permission is not\n required in this scenario.
If you request the current version of an object without a specific\n versionId in the request header, only the\n s3:GetObject permission is required. The\n s3:GetObjectVersion permission is not required in this\n scenario.
If the object that you request doesn’t exist, the error that Amazon S3 returns\n depends on whether you also have the s3:ListBucket\n permission.
If you have the s3:ListBucket permission on the\n bucket, Amazon S3 returns an HTTP status code 404 Not Found\n error.
If you don’t have the s3:ListBucket permission, Amazon S3\n returns an HTTP status code 403 Access Denied\n error.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .
If\n the\n object is encrypted using SSE-KMS, you must also have the\n kms:GenerateDataKey and kms:Decrypt permissions\n in IAM identity-based policies and KMS key policies for the KMS\n key.
If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval\n storage class, the S3 Glacier Deep Archive storage class, the\n S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier,\n before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an\n InvalidObjectState error. For information about restoring archived\n objects, see Restoring Archived\n Objects in the Amazon S3 User Guide.
\n Directory buckets -\n For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. \nUnsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request.
Encryption request headers, like x-amz-server-side-encryption,\n should not be sent for the GetObject requests, if your object uses\n server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your\n GetObject requests for the object that uses these types of keys,\n you’ll get an HTTP 400 Bad Request error.
\n Directory buckets -\n For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
\nThere are times when you want to override certain response header values of a\n GetObject response. For example, you might override the\n Content-Disposition response header value through your\n GetObject request.
You can override values for a set of response headers. These modified response\n header values are included only in a successful response, that is, when the HTTP\n status code 200 OK is returned. The headers you can override using\n the following query parameters in the request are a subset of the headers that\n Amazon S3 accepts when you create an object.
The response headers that you can override for the GetObject\n response are Cache-Control, Content-Disposition,\n Content-Encoding, Content-Language,\n Content-Type, and Expires.
To override values for a set of response headers in the GetObject\n response, you can use the following query parameters in the request.
\n response-cache-control\n
\n response-content-disposition\n
\n response-content-encoding\n
\n response-content-language\n
\n response-content-type\n
\n response-expires\n
When you use these parameters, you must sign the request by using either an\n Authorization header or a presigned URL. These parameters cannot be used with\n an unsigned (anonymous) request.
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket-name.s3express-zone-id.region-code.amazonaws.com.
The following operations are related to GetObject:
\n ListBuckets\n
\n\n GetObjectAcl\n
\nRetrieves an object from Amazon S3.
\nIn the GetObject request, specify the full key name for the object.
\n General purpose buckets - Both the virtual-hosted-style\n requests and the path-style requests are supported. For a virtual hosted-style request\n example, if you have the object photos/2006/February/sample.jpg, specify the\n object key name as /photos/2006/February/sample.jpg. For a path-style request\n example, if you have the object photos/2006/February/sample.jpg in the bucket\n named examplebucket, specify the object key name as\n /examplebucket/photos/2006/February/sample.jpg. For more information about\n request types, see HTTP Host\n Header Bucket Specification in the Amazon S3 User Guide.
\n Directory buckets -\n Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named amzn-s3-demo-bucket--usw2-az1--x-s3, specify the object key name as /photos/2006/February/sample.jpg. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name\n . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the\n Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the\n Amazon S3 User Guide.
\n General purpose bucket permissions - You\n must have the required permissions in a policy. To use\n GetObject, you must have the READ access to the\n object (or version). If you grant READ access to the anonymous\n user, the GetObject operation returns the object without using\n an authorization header. For more information, see Specifying permissions in a policy in the\n Amazon S3 User Guide.
If you include a versionId in your request header, you must\n have the s3:GetObjectVersion permission to access a specific\n version of an object. The s3:GetObject permission is not\n required in this scenario.
If you request the current version of an object without a specific\n versionId in the request header, only the\n s3:GetObject permission is required. The\n s3:GetObjectVersion permission is not required in this\n scenario.
If the object that you request doesn’t exist, the error that Amazon S3 returns\n depends on whether you also have the s3:ListBucket\n permission.
If you have the s3:ListBucket permission on the\n bucket, Amazon S3 returns an HTTP status code 404 Not Found\n error.
If you don’t have the s3:ListBucket permission, Amazon S3\n returns an HTTP status code 403 Access Denied\n error.
\n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .
If\n the\n object is encrypted using SSE-KMS, you must also have the\n kms:GenerateDataKey and kms:Decrypt permissions\n in IAM identity-based policies and KMS key policies for the KMS\n key.
If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval\n storage class, the S3 Glacier Deep Archive storage class, the\n S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier,\n before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an\n InvalidObjectState error. For information about restoring archived\n objects, see Restoring Archived\n Objects in the Amazon S3 User Guide.
\n Directory buckets -\n Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones. \nUnsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request.
Encryption request headers, like x-amz-server-side-encryption,\n should not be sent for the GetObject requests, if your object uses\n server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your\n GetObject requests for the object that uses these types of keys,\n you’ll get an HTTP 400 Bad Request error.
\n Directory buckets -\n For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
\nThere are times when you want to override certain response header values of a\n GetObject response. For example, you might override the\n Content-Disposition response header value through your\n GetObject request.
You can override values for a set of response headers. These modified response\n header values are included only in a successful response, that is, when the HTTP\n status code 200 OK is returned. The headers you can override using\n the following query parameters in the request are a subset of the headers that\n Amazon S3 accepts when you create an object.
The response headers that you can override for the GetObject\n response are Cache-Control, Content-Disposition,\n Content-Encoding, Content-Language,\n Content-Type, and Expires.
To override values for a set of response headers in the GetObject\n response, you can use the following query parameters in the request.
\n response-cache-control\n
\n response-content-disposition\n
\n response-content-encoding\n
\n response-content-language\n
\n response-content-type\n
\n response-expires\n
When you use these parameters, you must sign the request by using either an\n Authorization header or a presigned URL. These parameters cannot be used with\n an unsigned (anonymous) request.
\n\n Directory buckets - The HTTP Host header syntax is \n Bucket-name.s3express-zone-id.region-code.amazonaws.com.
The following operations are related to GetObject:
\n ListBuckets\n
\n\n GetObjectAcl\n
\nThe bucket name that contains the object for which to get the ACL information.
\n\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name that contains the object for which to get the ACL information.
\n\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -24989,7 +27478,7 @@ "StorageClass": { "target": "com.amazonaws.s3#StorageClass", "traits": { - "smithy.api#documentation": "Provides the storage class information of the object. Amazon S3 returns this header for all\n objects except for S3 Standard storage class objects.
\nFor more information, see Storage Classes.
\n\n Directory buckets -\n Only the S3 Express One Zone storage class is supported by directory buckets to store objects.
\nProvides the storage class information of the object. Amazon S3 returns this header for all\n objects except for S3 Standard storage class objects.
\nFor more information, see Storage Classes.
\n\n Directory buckets -\n Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones.
The name of the bucket that contains the object.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket that contains the object.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name containing the object whose legal hold status you want to retrieve.
\n\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name containing the object whose legal hold status you want to retrieve.
\n\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -25259,7 +27748,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket whose Object Lock configuration you want to retrieve.
\n\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket whose Object Lock configuration you want to retrieve.
\n\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -25314,7 +27803,7 @@ "Restore": { "target": "com.amazonaws.s3#Restore", "traits": { - "smithy.api#documentation": "Provides information about object restoration action and expiration time of the restored\n object copy.
\nThis functionality is not supported for directory buckets.\n Only the S3 Express One Zone storage class is supported by directory buckets to store objects.
\nProvides information about object restoration action and expiration time of the restored\n object copy.
\nThis functionality is not supported for directory buckets.\n Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones.
Provides storage class information of the object. Amazon S3 returns this header for all\n objects except for S3 Standard storage class objects.
\n\n Directory buckets -\n Only the S3 Express One Zone storage class is supported by directory buckets to store objects.
\nProvides storage class information of the object. Amazon S3 returns this header for all\n objects except for S3 Standard storage class objects.
\n\n Directory buckets -\n Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones.
The bucket name containing the object.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\n\n Object Lambda access points - When you use this action with an Object Lambda access point, you must direct requests to the Object Lambda access point hostname. The Object Lambda access point hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name containing the object.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\n\n Object Lambda access points - When you use this action with an Object Lambda access point, you must direct requests to the Object Lambda access point hostname. The Object Lambda access point hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name containing the object whose retention settings you want to retrieve.
\n\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name containing the object whose retention settings you want to retrieve.
\n\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -25885,7 +28374,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name containing the object for which to get the tagging information.
\n\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name containing the object for which to get the tagging information.
\n\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\n\n Object Lambda access points - When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.
Access points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\n\n Object Lambda access points - When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. \nFor more information about InvalidAccessPointAliasError, see List of\n Error Codes.
Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
If the object is an archived object (an object whose storage class is GLACIER), the\n response includes this header if either the archive restoration is in progress (see RestoreObject or an archive copy is already restored.
\nIf an archive copy is already restored, the header value indicates when Amazon S3 is\n scheduled to delete the object copy. For example:
\n\n x-amz-restore: ongoing-request=\"false\", expiry-date=\"Fri, 21 Dec 2012 00:00:00\n GMT\"\n
If the object restoration is in progress, the header returns the value\n ongoing-request=\"true\".
For more information about archiving objects, see Transitioning Objects: General Considerations.
\nThis functionality is not supported for directory buckets.\n Only the S3 Express One Zone storage class is supported by directory buckets to store objects.
\nIf the object is an archived object (an object whose storage class is GLACIER), the\n response includes this header if either the archive restoration is in progress (see RestoreObject or an archive copy is already restored.
\nIf an archive copy is already restored, the header value indicates when Amazon S3 is\n scheduled to delete the object copy. For example:
\n\n x-amz-restore: ongoing-request=\"false\", expiry-date=\"Fri, 21 Dec 2012 00:00:00\n GMT\"\n
If the object restoration is in progress, the header returns the value\n ongoing-request=\"true\".
For more information about archiving objects, see Transitioning Objects: General Considerations.
\nThis functionality is not supported for directory buckets.\n Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones.
Provides storage class information of the object. Amazon S3 returns this header for all\n objects except for S3 Standard storage class objects.
\nFor more information, see Storage Classes.
\n\n Directory buckets -\n Only the S3 Express One Zone storage class is supported by directory buckets to store objects.
\nProvides storage class information of the object. Amazon S3 returns this header for all\n objects except for S3 Standard storage class objects.
\nFor more information, see Storage Classes.
\n\n Directory buckets -\n Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones.
The name of the bucket that contains the object.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket that contains the object.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket to which the multipart upload was initiated.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket to which the multipart upload was initiated.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket containing the objects.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket containing the objects.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The class of storage used to store the uploaded object.
\n\n Directory buckets -\n Only the S3 Express One Zone storage class is supported by directory buckets to store objects.
\nThe class of storage used to store the uploaded object.
\n\n Directory buckets -\n Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones.
The name of the bucket to which the parts are being uploaded.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket to which the parts are being uploaded.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The class of storage used to store the object.
\n\n Directory buckets -\n Only the S3 Express One Zone storage class is supported by directory buckets to store objects.
\nThe class of storage used to store the object.
\n\n Directory buckets -\n Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones.
The class of storage used to store the object.
\n\n Directory buckets -\n Only the S3 Express One Zone storage class is supported by directory buckets to store objects.
\nThe class of storage used to store the object.
\n\n Directory buckets -\n Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones.
Specifies the restoration status of an object. Objects in certain storage classes must\n be restored before they can be retrieved. For more information about these storage classes\n and how to work with archived objects, see Working with archived\n objects in the Amazon S3 User Guide.
\nThis functionality is not supported for directory buckets.\n Only the S3 Express One Zone storage class is supported by directory buckets to store objects.
\nSpecifies the restoration status of an object. Objects in certain storage classes must\n be restored before they can be retrieved. For more information about these storage classes\n and how to work with archived objects, see Working with archived\n objects in the Amazon S3 User Guide.
\nThis functionality is not supported for directory buckets.\n Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones.
The bucket name that contains the object to which you want to attach the ACL.
\n\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name that contains the object to which you want to attach the ACL.
\n\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name containing the object that you want to place a legal hold on.
\n\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name containing the object that you want to place a legal hold on.
\n\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -33307,7 +35796,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name to which the PUT action was initiated.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name to which the PUT action was initiated.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. For more information, see\n Storage\n Classes in the Amazon S3 User Guide.
\nFor directory buckets, only the S3 Express One Zone storage class is supported to store\n newly created objects.
\nAmazon S3 on Outposts only uses the OUTPOSTS Storage Class.
\nBy default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. For more information, see\n Storage\n Classes in the Amazon S3 User Guide.
\nDirectory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones.
Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.
\nThe bucket name that contains the object you want to apply this Object Retention\n configuration to.
\n\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name that contains the object you want to apply this Object Retention\n configuration to.
\n\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -33767,7 +36256,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name containing the object.
\n\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name containing the object.
\n\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The byte array of partial, one or more result records. S3 Select doesn't guarantee that\n a record will be self-contained in one record frame. To ensure continuous streaming of\n data, S3 Select might split the same record across multiple record frames instead of\n aggregating the results in memory. Some S3 clients (for example, the SDK for Java) handle this behavior by creating a ByteStream out of the response by\n default. Other clients might not handle this behavior by default. In those cases, you must\n aggregate the results on the client side and parse the response.
The byte array of partial, one or more result records. S3 Select doesn't guarantee that\n a record will be self-contained in one record frame. To ensure continuous streaming of\n data, S3 Select might split the same record across multiple record frames instead of\n aggregating the results in memory. Some S3 clients (for example, the SDKforJava) handle this behavior by creating a ByteStream out of the response by\n default. Other clients might not handle this behavior by default. In those cases, you must\n aggregate the results on the client side and parse the response.
This operation is not supported for directory buckets.
\nRestores an archived copy of an object back into Amazon S3
\nThis functionality is not supported for Amazon S3 on Outposts.
\nThis action performs the following types of requests:
\n\n restore an archive - Restore an archived object
For more information about the S3 structure in the request body, see the\n following:
\n PutObject\n
\n\n Managing Access with ACLs in the\n Amazon S3 User Guide\n
\n\n Protecting Data Using Server-Side Encryption in the\n Amazon S3 User Guide\n
\nTo use this operation, you must have permissions to perform the\n s3:RestoreObject action. The bucket owner has this permission by\n default and can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval\n or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the\n S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive\n storage classes, you must first initiate a restore request, and then wait until a\n temporary copy of the object is available. If you want a permanent copy of the\n object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket.\n To access an archived object, you must restore the object for the duration (number\n of days) that you specify. For objects in the Archive Access or Deep Archive\n Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request,\n and then wait until the object is moved into the Frequent Access tier.
\nTo restore a specific object version, you can provide a version ID. If you\n don't provide a version ID, Amazon S3 restores the current version.
\nWhen restoring an archived object, you can specify one of the following data\n access tier options in the Tier element of the request body:
\n Expedited - Expedited retrievals allow you to quickly access\n your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval\n storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests\n for restoring archives are required. For all but the largest archived\n objects (250 MB+), data accessed using Expedited retrievals is typically\n made available within 1–5 minutes. Provisioned capacity ensures that\n retrieval capacity for Expedited retrievals is available when you need it.\n Expedited retrievals and provisioned capacity are not available for objects\n stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.
\n Standard - Standard retrievals allow you to access any of\n your archived objects within several hours. This is the default option for\n retrieval requests that do not specify the retrieval option. Standard\n retrievals typically finish within 3–5 hours for objects stored in the\n S3 Glacier Flexible Retrieval Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored\n in S3 Intelligent-Tiering.
\n Bulk - Bulk retrievals free for objects stored in the\n S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes,\n enabling you to retrieve large amounts, even petabytes, of data at no cost.\n Bulk retrievals typically finish within 5–12 hours for objects stored in the\n S3 Glacier Flexible Retrieval Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost\n retrieval option when restoring objects from\n S3 Glacier Deep Archive. They typically finish within 48 hours for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.
For more information about archive retrieval options and provisioned capacity\n for Expedited data access, see Restoring Archived\n Objects in the Amazon S3 User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster\n speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the\n Amazon S3 User Guide.
\nTo get the status of object restoration, you can send a HEAD\n request. Operations return the x-amz-restore header, which provides\n information about the restoration status, in the response. You can use Amazon S3 event\n notifications to notify you when a restore is initiated or completed. For more\n information, see Configuring Amazon S3 Event\n Notifications in the Amazon S3 User Guide.
After restoring an archived object, you can update the restoration period by\n reissuing the request with a new period. Amazon S3 updates the restoration period\n relative to the current time and charges only for the request-there are no\n data transfer charges. You cannot update the restoration period when Amazon S3 is\n actively processing your current restore request for the object.
\nIf your bucket has a lifecycle configuration with a rule that includes an\n expiration action, the object expiration overrides the life span that you specify\n in a restore request. For example, if you restore an object copy for 10 days, but\n the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days.\n For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle\n Management in Amazon S3 User Guide.
\nA successful action returns either the 200 OK or 202\n Accepted status code.
If the object is not previously restored, then Amazon S3 returns 202\n Accepted in the response.
If the object is previously restored, Amazon S3 returns 200 OK in\n the response.
Special errors:
\n\n Code: RestoreAlreadyInProgress\n
\n\n Cause: Object restore is already in progress.\n
\n\n HTTP Status Code: 409 Conflict\n
\n\n SOAP Fault Code Prefix: Client\n
\n\n Code: GlacierExpeditedRetrievalNotAvailable\n
\n\n Cause: expedited retrievals are currently not available.\n Try again later. (Returned if there is insufficient capacity to\n process the Expedited request. This error applies only to Expedited\n retrievals and not to S3 Standard or Bulk retrievals.)\n
\n\n HTTP Status Code: 503\n
\n\n SOAP Fault Code Prefix: N/A\n
\nThe following operations are related to RestoreObject:
This operation is not supported for directory buckets.
\nRestores an archived copy of an object back into Amazon S3
\nThis functionality is not supported for Amazon S3 on Outposts.
\nThis action performs the following types of requests:
\n\n restore an archive - Restore an archived object
For more information about the S3 structure in the request body, see the\n following:
\n PutObject\n
\n\n Managing Access with ACLs in the\n Amazon S3 User Guide\n
\n\n Protecting Data Using Server-Side Encryption in the\n Amazon S3 User Guide\n
\nTo use this operation, you must have permissions to perform the\n s3:RestoreObject action. The bucket owner has this permission by\n default and can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
Objects that you archive to the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the\n S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive\n storage classes, you must first initiate a restore request, and then wait until a\n temporary copy of the object is available. If you want a permanent copy of the\n object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket.\n To access an archived object, you must restore the object for the duration (number\n of days) that you specify. For objects in the Archive Access or Deep Archive\n Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request,\n and then wait until the object is moved into the Frequent Access tier.
\nTo restore a specific object version, you can provide a version ID. If you\n don't provide a version ID, Amazon S3 restores the current version.
\nWhen restoring an archived object, you can specify one of the following data\n access tier options in the Tier element of the request body:
\n Expedited - Expedited retrievals allow you to quickly access\n your data stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests\n for restoring archives are required. For all but the largest archived\n objects (250 MB+), data accessed using Expedited retrievals is typically\n made available within 1–5 minutes. Provisioned capacity ensures that\n retrieval capacity for Expedited retrievals is available when you need it.\n Expedited retrievals and provisioned capacity are not available for objects\n stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.
\n Standard - Standard retrievals allow you to access any of\n your archived objects within several hours. This is the default option for\n retrieval requests that do not specify the retrieval option. Standard\n retrievals typically finish within 3–5 hours for objects stored in the\n S3 Glacier Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored\n in S3 Intelligent-Tiering.
\n Bulk - Bulk retrievals free for objects stored in the\n S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes,\n enabling you to retrieve large amounts, even petabytes, of data at no cost.\n Bulk retrievals typically finish within 5–12 hours for objects stored in the\n S3 Glacier Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost\n retrieval option when restoring objects from\n S3 Glacier Deep Archive. They typically finish within 48 hours for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.
For more information about archive retrieval options and provisioned capacity\n for Expedited data access, see Restoring Archived\n Objects in the Amazon S3 User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster\n speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the\n Amazon S3 User Guide.
\nTo get the status of object restoration, you can send a HEAD\n request. Operations return the x-amz-restore header, which provides\n information about the restoration status, in the response. You can use Amazon S3 event\n notifications to notify you when a restore is initiated or completed. For more\n information, see Configuring Amazon S3 Event\n Notifications in the Amazon S3 User Guide.
After restoring an archived object, you can update the restoration period by\n reissuing the request with a new period. Amazon S3 updates the restoration period\n relative to the current time and charges only for the request-there are no\n data transfer charges. You cannot update the restoration period when Amazon S3 is\n actively processing your current restore request for the object.
\nIf your bucket has a lifecycle configuration with a rule that includes an\n expiration action, the object expiration overrides the life span that you specify\n in a restore request. For example, if you restore an object copy for 10 days, but\n the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days.\n For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle\n Management in Amazon S3 User Guide.
\nA successful action returns either the 200 OK or 202\n Accepted status code.
If the object is not previously restored, then Amazon S3 returns 202\n Accepted in the response.
If the object is previously restored, Amazon S3 returns 200 OK in\n the response.
Special errors:
\n\n Code: RestoreAlreadyInProgress\n
\n\n Cause: Object restore is already in progress.\n
\n\n HTTP Status Code: 409 Conflict\n
\n\n SOAP Fault Code Prefix: Client\n
\n\n Code: GlacierExpeditedRetrievalNotAvailable\n
\n\n Cause: expedited retrievals are currently not available.\n Try again later. (Returned if there is insufficient capacity to\n process the Expedited request. This error applies only to Expedited\n retrievals and not to S3 Standard or Bulk retrievals.)\n
\n\n HTTP Status Code: 503\n
\n\n SOAP Fault Code Prefix: N/A\n
\nThe following operations are related to RestoreObject:
The bucket name containing the object to restore.
\n\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name containing the object to restore.
\n\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
Specifies the restoration status of an object. Objects in certain storage classes must\n be restored before they can be retrieved. For more information about these storage classes\n and how to work with archived objects, see Working with archived\n objects in the Amazon S3 User Guide.
\nThis functionality is not supported for directory buckets.\n Only the S3 Express One Zone storage class is supported by directory buckets to store objects.
\nSpecifies the restoration status of an object. Objects in certain storage classes must\n be restored before they can be retrieved. For more information about these storage classes\n and how to work with archived objects, see Working with archived\n objects in the Amazon S3 User Guide.
\nThis functionality is not supported for directory buckets.\n Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones.
The bucket name.
\n\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
Copying objects across different Amazon Web Services Regions isn't supported when the source or destination bucket is in Amazon Web Services Local Zones. The source and destination buckets must have the same parent Amazon Web Services Region. Otherwise, \n you get an HTTP 400 Bad Request error with the error code InvalidRequest.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name.
\n\n Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
Copying objects across different Amazon Web Services Regions isn't supported when the source or destination bucket is in Amazon Web Services Local Zones. The source and destination buckets must have the same parent Amazon Web Services Region. Otherwise, \n you get an HTTP 400 Bad Request error with the error code InvalidRequest.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket to which the multipart upload was initiated.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nAccess points and Object Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The name of the bucket to which the multipart upload was initiated.
\n\n Directory buckets -\n When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format \n Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format \n bucket-base-name--zone-id--x-s3 (for example, \n amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming\n restrictions, see Directory bucket naming\n rules in the Amazon S3 User Guide.
\n Access points - When you use this action with an access point for general purpose buckets, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When you use this action with an access point for directory buckets, you must provide the access point name in place of the bucket name. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nObject Lambda access points are not supported by directory buckets.
\n\n S3 on Outposts - When you use this action with S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the \n form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts, the destination bucket must be the Outposts access point ARN or the access point alias. For more information about S3 on Outposts, see What is S3 on Outposts? in the Amazon S3 User Guide.
The instance type and the Amazon Resource Name (ARN) of the SageMaker AI image\n created on the instance.
\nThe value of InstanceType passed as part of the ResourceSpec\n in the CreateApp call overrides the value passed as part of the\n ResourceSpec configured for the user profile or the domain. If\n InstanceType is not specified in any of those three ResourceSpec\n values for a KernelGateway app, the CreateApp call fails with a\n request validation error.
\n Indicates whether the application is launched in recovery mode.\n
" + } } }, "traits": { @@ -13741,6 +13897,12 @@ "traits": { "smithy.api#documentation": "A shell script that runs every time you start a notebook instance, including when you\n create the notebook instance. The shell script must be a base64-encoded string.
" } + }, + "Tags": { + "target": "com.amazonaws.sagemaker#TagList", + "traits": { + "smithy.api#documentation": "An array of key-value pairs. You can use tags to categorize your Amazon Web Services\n resources in different ways, for example, by purpose, owner, or environment. For more\n information, see Tagging Amazon Web Services Resources.
" + } } }, "traits": { @@ -13998,6 +14160,12 @@ "smithy.api#required": {} } }, + "KmsKeyId": { + "target": "com.amazonaws.sagemaker#KmsKeyId", + "traits": { + "smithy.api#documentation": "SageMaker Partner AI Apps uses Amazon Web Services KMS to encrypt data at rest using an Amazon Web Services managed key by default. For more control, specify a\n customer managed key.
" + } + }, "MaintenanceConfig": { "target": "com.amazonaws.sagemaker#PartnerAppMaintenanceConfig", "traits": { @@ -14994,7 +15162,7 @@ } ], "traits": { - "smithy.api#documentation": "Starts a transform job. A transform job uses a trained model to get inferences on a\n dataset and saves these results to an Amazon S3 location that you specify.
\nTo perform batch transformations, you create a transform job and use the data that you\n have readily available.
\nIn the request body, you provide the following:
\n\n TransformJobName - Identifies the transform job. The name must be\n unique within an Amazon Web Services Region in an Amazon Web Services account.
\n ModelName - Identifies the model to use. ModelName\n must be the name of an existing Amazon SageMaker model in the same Amazon Web Services Region and Amazon Web Services\n\t\t account. For information on creating a model, see CreateModel.
\n TransformInput - Describes the dataset to be transformed and the\n Amazon S3 location where it is stored.
\n TransformOutput - Identifies the Amazon S3 location where you want\n Amazon SageMaker to save the results from the transform job.
\n TransformResources - Identifies the ML compute instances for the\n transform job.
For more information about how batch transformation works, see Batch\n Transform.
" + "smithy.api#documentation": "Starts a transform job. A transform job uses a trained model to get inferences on a\n dataset and saves these results to an Amazon S3 location that you specify.
\nTo perform batch transformations, you create a transform job and use the data that you\n have readily available.
\nIn the request body, you provide the following:
\n\n TransformJobName - Identifies the transform job. The name must be\n unique within an Amazon Web Services Region in an Amazon Web Services account.
\n ModelName - Identifies the model to use. ModelName\n must be the name of an existing Amazon SageMaker model in the same Amazon Web Services Region and Amazon Web Services\n\t\t account. For information on creating a model, see CreateModel.
\n TransformInput - Describes the dataset to be transformed and the\n Amazon S3 location where it is stored.
\n TransformOutput - Identifies the Amazon S3 location where you want\n Amazon SageMaker to save the results from the transform job.
\n TransformResources - Identifies the ML compute instances and AMI\n image versions for the transform job.
For more information about how batch transformation works, see Batch\n Transform.
" } }, "com.amazonaws.sagemaker#CreateTransformJobRequest": { @@ -19264,6 +19432,12 @@ "smithy.api#documentation": "The status.
" } }, + "RecoveryMode": { + "target": "com.amazonaws.sagemaker#Boolean", + "traits": { + "smithy.api#documentation": "\n Indicates whether the application is launched in recovery mode.\n
" + } + }, "LastHealthCheckTimestamp": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { @@ -25523,12 +25697,24 @@ "smithy.api#documentation": "The time that the SageMaker Partner AI App was created.
" } }, + "LastModifiedTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "The time that the SageMaker Partner AI App was last modified.
" + } + }, "ExecutionRoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#documentation": "The ARN of the IAM role associated with the SageMaker Partner AI App.
" } }, + "KmsKeyId": { + "target": "com.amazonaws.sagemaker#KmsKeyId", + "traits": { + "smithy.api#documentation": "The Amazon Web Services KMS customer managed key used to encrypt the data at rest associated with SageMaker Partner AI Apps.
" + } + }, "BaseUrl": { "target": "com.amazonaws.sagemaker#String2048", "traits": { @@ -31513,25 +31699,25 @@ "target": "com.amazonaws.sagemaker#ResourcePropertyName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "A resource property name. For example, TrainingJobName. For\n valid property names, see SearchRecord.\n You must specify a valid property for the resource.
A resource property name. For example, TrainingJobName. For valid property\n names, see SearchRecord. You must\n specify a valid property for the resource.
A Boolean binary operator that is used to evaluate the filter. The operator field\n contains one of the following values:
\nThe value of Name equals Value.
The value of Name doesn't equal Value.
The Name property exists.
The Name property does not exist.
The value of Name is greater than Value.\n Not supported for text properties.
The value of Name is greater than or equal to Value.\n Not supported for text properties.
The value of Name is less than Value.\n Not supported for text properties.
The value of Name is less than or equal to Value.\n Not supported for text properties.
The value of Name is one of the comma delimited strings in\n Value. Only supported for text properties.
The value of Name contains the string Value.\n Only supported for text properties.
A SearchExpression can include the Contains operator\n multiple times when the value of Name is one of the following:
\n Experiment.DisplayName\n
\n Experiment.ExperimentName\n
\n Experiment.Tags\n
\n Trial.DisplayName\n
\n Trial.TrialName\n
\n Trial.Tags\n
\n TrialComponent.DisplayName\n
\n TrialComponent.TrialComponentName\n
\n TrialComponent.Tags\n
\n TrialComponent.InputArtifacts\n
\n TrialComponent.OutputArtifacts\n
A SearchExpression can include only one Contains operator\n for all other values of Name. In these cases, if you include multiple\n Contains operators in the SearchExpression, the result is\n the following error message: \"'CONTAINS' operator usage limit of 1\n exceeded.\"
A Boolean binary operator that is used to evaluate the filter. The operator field contains\n one of the following values:
\nThe value of Name equals Value.
The value of Name doesn't equal Value.
The Name property exists.
The Name property does not exist.
The value of Name is greater than Value. Not supported for\n text properties.
The value of Name is greater than or equal to Value. Not\n supported for text properties.
The value of Name is less than Value. Not supported for\n text properties.
The value of Name is less than or equal to Value. Not\n supported for text properties.
The value of Name is one of the comma delimited strings in\n Value. Only supported for text properties.
The value of Name contains the string Value. Only\n supported for text properties.
A SearchExpression can include the Contains operator\n multiple times when the value of Name is one of the following:
\n Experiment.DisplayName\n
\n Experiment.ExperimentName\n
\n Experiment.Tags\n
\n Trial.DisplayName\n
\n Trial.TrialName\n
\n Trial.Tags\n
\n TrialComponent.DisplayName\n
\n TrialComponent.TrialComponentName\n
\n TrialComponent.Tags\n
\n TrialComponent.InputArtifacts\n
\n TrialComponent.OutputArtifacts\n
A SearchExpression can include only one Contains operator\n for all other values of Name. In these cases, if you include multiple\n Contains operators in the SearchExpression, the result is\n the following error message: \"'CONTAINS' operator usage limit of 1\n exceeded.\"
A value used with Name and Operator to determine which\n resources satisfy the filter's condition. For numerical properties, Value\n must be an integer or floating-point decimal. For timestamp properties,\n Value must be an ISO 8601 date-time string of the following format:\n YYYY-mm-dd'T'HH:MM:SS.
A value used with Name and Operator to determine which resources\n satisfy the filter's condition. For numerical properties, Value must be an\n integer or floating-point decimal. For timestamp properties, Value must be an ISO\n 8601 date-time string of the following format: YYYY-mm-dd'T'HH:MM:SS.
A conditional statement for a search expression that includes a resource property, a\n Boolean operator, and a value. Resources that match the statement are returned in the\n results from the Search API.
\nIf you specify a Value, but not an Operator, SageMaker uses the\n equals operator.
In search, there are several property types:
\nTo define a metric filter, enter a value using the form\n \"Metrics., where is\n a metric name. For example, the following filter searches for training jobs\n with an \"accuracy\" metric greater than\n \"0.9\":
\n {\n
\n \"Name\": \"Metrics.accuracy\",\n
\n \"Operator\": \"GreaterThan\",\n
\n \"Value\": \"0.9\"\n
\n }\n
To define a hyperparameter filter, enter a value with the form\n \"HyperParameters.. Decimal hyperparameter\n values are treated as a decimal in a comparison if the specified\n Value is also a decimal value. If the specified\n Value is an integer, the decimal hyperparameter values are\n treated as integers. For example, the following filter is satisfied by\n training jobs with a \"learning_rate\" hyperparameter that is\n less than \"0.5\":
\n {\n
\n \"Name\": \"HyperParameters.learning_rate\",\n
\n \"Operator\": \"LessThan\",\n
\n \"Value\": \"0.5\"\n
\n }\n
To define a tag filter, enter a value with the form\n Tags..
A conditional statement for a search expression that includes a resource property, a\n Boolean operator, and a value. Resources that match the statement are returned in the results\n from the Search API.
\nIf you specify a Value, but not an Operator, SageMaker uses the\n equals operator.
In search, there are several property types:
\nTo define a metric filter, enter a value using the form\n \"Metrics., where is a metric name.\n For example, the following filter searches for training jobs with an\n \"accuracy\" metric greater than \"0.9\":
\n {\n
\n \"Name\": \"Metrics.accuracy\",\n
\n \"Operator\": \"GreaterThan\",\n
\n \"Value\": \"0.9\"\n
\n }\n
To define a hyperparameter filter, enter a value with the form\n \"HyperParameters.. Decimal hyperparameter values are treated\n as a decimal in a comparison if the specified Value is also a decimal\n value. If the specified Value is an integer, the decimal hyperparameter\n values are treated as integers. For example, the following filter is satisfied by\n training jobs with a \"learning_rate\" hyperparameter that is less than\n \"0.5\":
\n {\n
\n \"Name\": \"HyperParameters.learning_rate\",\n
\n \"Operator\": \"LessThan\",\n
\n \"Value\": \"0.5\"\n
\n }\n
To define a tag filter, enter a value with the form\n Tags..
An auto-complete API for the search functionality in the SageMaker console. It returns\n suggestions of possible matches for the property name to use in Search\n queries. Provides suggestions for HyperParameters, Tags, and\n Metrics.
An auto-complete API for the search functionality in the SageMaker console. It returns\n suggestions of possible matches for the property name to use in Search queries.\n Provides suggestions for HyperParameters, Tags, and\n Metrics.
A list of property names for a Resource that match a\n SuggestionQuery.
A list of property names for a Resource that match a\n SuggestionQuery.
The name of the property to use in the nested filters. The value must match a listed property name,\n such as InputDataConfig.
The name of the property to use in the nested filters. The value must match a listed\n property name, such as InputDataConfig.
A list of filters. Each filter acts on a property. Filters must contain at least one\n Filters value. For example, a NestedFilters call might\n include a filter on the PropertyName parameter of the\n InputDataConfig property:\n InputDataConfig.DataSource.S3DataSource.S3Uri.
A list of filters. Each filter acts on a property. Filters must contain at least one\n Filters value. For example, a NestedFilters call might include a\n filter on the PropertyName parameter of the InputDataConfig\n property: InputDataConfig.DataSource.S3DataSource.S3Uri.
A list of nested Filter objects. A resource must satisfy the conditions\n of all filters to be included in the results returned from the Search API.
\nFor example, to filter on a training job's InputDataConfig property with a\n specific channel name and S3Uri prefix, define the following filters:
\n '{Name:\"InputDataConfig.ChannelName\", \"Operator\":\"Equals\", \"Value\":\"train\"}',\n
\n '{Name:\"InputDataConfig.DataSource.S3DataSource.S3Uri\", \"Operator\":\"Contains\",\n \"Value\":\"mybucket/catdata\"}'\n
A list of nested Filter objects. A resource must\n satisfy the conditions of all filters to be included in the results returned from the Search\n API.
\nFor example, to filter on a training job's InputDataConfig property with a\n specific channel name and S3Uri prefix, define the following filters:
\n '{Name:\"InputDataConfig.ChannelName\", \"Operator\":\"Equals\", \"Value\":\"train\"}',\n
\n '{Name:\"InputDataConfig.DataSource.S3DataSource.S3Uri\", \"Operator\":\"Contains\",\n \"Value\":\"mybucket/catdata\"}'\n
A property name returned from a GetSearchSuggestions call that specifies\n a value in the PropertyNameQuery field.
A property name returned from a GetSearchSuggestions call that specifies a\n value in the PropertyNameQuery field.
Metadata for a register model job step.
" } }, + "com.amazonaws.sagemaker#Relation": { + "type": "enum", + "members": { + "EQUAL_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EqualTo" + } + }, + "GREATER_THAN_OR_EQUAL_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GreaterThanOrEqualTo" + } + } + } + }, "com.amazonaws.sagemaker#ReleaseNotes": { "type": "string", "traits": { @@ -63761,7 +64012,7 @@ "target": "com.amazonaws.sagemaker#SearchResponse" }, "traits": { - "smithy.api#documentation": "Finds SageMaker resources that match a search query. Matching resources are returned\n as a list of SearchRecord objects in the response. You can sort the search\n results by any resource property in a ascending or descending order.
You can query against the following value types: numeric, text, Boolean, and\n timestamp.
\nThe Search API may provide access to otherwise restricted data. See Amazon SageMaker \n API Permissions: Actions, Permissions, and Resources Reference for more\n information.
\nFinds SageMaker resources that match a search query. Matching resources are returned as a list\n of SearchRecord objects in the response. You can sort the search results by any\n resource property in a ascending or descending order.
You can query against the following value types: numeric, text, Boolean, and\n timestamp.
\nThe Search API may provide access to otherwise restricted data. See Amazon SageMaker API\n Permissions: Actions, Permissions, and Resources Reference for more\n information.
\nA Boolean operator used to evaluate the search expression. If you want every\n conditional statement in all lists to be satisfied for the entire search expression to\n be true, specify And. If only a single conditional statement needs to be\n true for the entire search expression to be true, specify Or. The default\n value is And.
A Boolean operator used to evaluate the search expression. If you want every conditional\n statement in all lists to be satisfied for the entire search expression to be true, specify\n And. If only a single conditional statement needs to be true for the entire\n search expression to be true, specify Or. The default value is\n And.
A multi-expression that searches for the specified resource or resources in a search. All resource\n objects that satisfy the expression's condition are included in the search results. You must specify at\n least one subexpression, filter, or nested filter. A SearchExpression can contain up to\n twenty elements.
A SearchExpression contains the following components:
A list of Filter objects. Each filter defines a simple Boolean\n expression comprised of a resource property name, Boolean operator, and\n value.
A list of NestedFilter objects. Each nested filter defines a list\n of Boolean expressions using a list of resource properties. A nested filter is\n satisfied if a single object in the list satisfies all Boolean\n expressions.
A list of SearchExpression objects. A search expression object\n can be nested in a list of search expression objects.
A Boolean operator: And or Or.
A multi-expression that searches for the specified resource or resources in a search. All\n resource objects that satisfy the expression's condition are included in the search results.\n You must specify at least one subexpression, filter, or nested filter. A\n SearchExpression can contain up to twenty elements.
A SearchExpression contains the following components:
A list of Filter objects. Each filter defines a simple Boolean expression\n comprised of a resource property name, Boolean operator, and value.
A list of NestedFilter objects. Each nested filter defines a list of\n Boolean expressions using a list of resource properties. A nested filter is satisfied if a\n single object in the list satisfies all Boolean expressions.
A list of SearchExpression objects. A search expression object can be\n nested in a list of search expression objects.
A Boolean operator: And or Or.
A Boolean conditional statement. Resources must satisfy this condition to be\n included in search results. You must provide at least one subexpression, filter, or\n nested filter. The maximum number of recursive SubExpressions,\n NestedFilters, and Filters that can be included in a\n SearchExpression object is 50.
A Boolean conditional statement. Resources must satisfy this condition to be included in\n search results. You must provide at least one subexpression, filter, or nested filter. The\n maximum number of recursive SubExpressions, NestedFilters, and\n Filters that can be included in a SearchExpression object is\n 50.
The name of the resource property used to sort the SearchResults. The\n default is LastModifiedTime.
The name of the resource property used to sort the SearchResults. The default\n is LastModifiedTime.
How SearchResults are ordered. Valid values are Ascending or\n Descending. The default is Descending.
How SearchResults are ordered. Valid values are Ascending or\n Descending. The default is Descending.
If more than MaxResults resources match the specified\n SearchExpression, the response includes a\n NextToken. The NextToken can be passed to the next\n SearchRequest to continue retrieving results.
If more than MaxResults resources match the specified\n SearchExpression, the response includes a NextToken. The\n NextToken can be passed to the next SearchRequest to continue\n retrieving results.
\n A cross account filter option. When the value is \"CrossAccount\" the \n search results will only include resources made discoverable to you from other \n accounts. When the value is \"SameAccount\" or null the \n search results will only include resources from your account. Default is \n null. For more information on searching for resources made \n discoverable to your account, see \n Search discoverable resources in the SageMaker Developer Guide.\n The maximum number of ResourceCatalogs viewable is 1000.\n
A cross account filter option. When the value is \"CrossAccount\" the search\n results will only include resources made discoverable to you from other accounts. When the\n value is \"SameAccount\" or null the search results will only include\n resources from your account. Default is null. For more information on searching\n for resources made discoverable to your account, see Search\n discoverable resources in the SageMaker Developer Guide. The maximum number of\n ResourceCatalogs viewable is 1000.
\n Limits the results of your search request to the resources that you can access.\n
" + "smithy.api#documentation": "Limits the results of your search request to the resources that you can access.
" } } }, @@ -63963,6 +64214,12 @@ "traits": { "smithy.api#documentation": "If the result of the previous Search request was truncated, the response\n includes a NextToken. To retrieve the next set of results, use the token in the next\n request.
The total number of matching results.
" + } } }, "traits": { @@ -64043,7 +64300,9 @@ "DurationHours": { "target": "com.amazonaws.sagemaker#TrainingPlanDurationHoursInput", "traits": { - "smithy.api#documentation": "The desired duration in hours for the training plan offerings.
" + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "The desired duration in hours for the training plan offerings.
", + "smithy.api#required": {} } }, "TargetResources": { @@ -67041,12 +67300,12 @@ "PropertyNameQuery": { "target": "com.amazonaws.sagemaker#PropertyNameQuery", "traits": { - "smithy.api#documentation": "Defines a property name hint. Only property\n names that begin with the specified hint are included in the response.
" + "smithy.api#documentation": "Defines a property name hint. Only property names that begin with the specified hint are\n included in the response.
" } } }, "traits": { - "smithy.api#documentation": "Specified in the GetSearchSuggestions request.\n Limits the property names that are included in the response.
" + "smithy.api#documentation": "Specified in the GetSearchSuggestions request. Limits the property names that are included in the\n response.
" } }, "com.amazonaws.sagemaker#SynthesizedJsonHumanLoopActivationConditions": { @@ -68151,6 +68410,26 @@ } } }, + "com.amazonaws.sagemaker#TotalHits": { + "type": "structure", + "members": { + "Value": { + "target": "com.amazonaws.sagemaker#Long", + "traits": { + "smithy.api#documentation": "The total number of matching results. This value may be exact or an estimate, depending on\n the Relation field.
Indicates the relationship between the returned Value and the actual total\n number of matching results. Possible values are:
\n EqualTo: The Value is the exact count of matching\n results.
\n GreaterThanOrEqualTo: The Value is a lower bound of the\n actual count of matching results.
Represents the total number of matching results and indicates how accurate that count\n is.
\nThe Value field provides the count, which may be exact or estimated. The\n Relation field indicates whether it's an exact figure or a lower bound. This\n helps understand the full scope of search results, especially when dealing with large result\n sets.
The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt model data on the storage volume\n attached to the ML compute instance(s) that run the batch transform job.
\nCertain Nitro-based instances include local storage, dependent on the instance\n type. Local storage volumes are encrypted using a hardware module on the instance.\n You can't request a VolumeKmsKeyId when using an instance type with\n local storage.
For a list of instance types that support local instance storage, see Instance Store Volumes.
\nFor more information about local instance storage encryption, see SSD\n Instance Store Volumes.
\n\n The VolumeKmsKeyId can be any of the following formats:
Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n
Alias name: alias/ExampleAlias\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\n
Specifies an option from a collection of preconfigured Amazon Machine Image (AMI)\n images. Each image is configured by Amazon Web Services with a set of software and driver\n versions.
\nAccelerator: GPU
\nNVIDIA driver version: 470
\nAccelerator: GPU
\nNVIDIA driver version: 535
\nThe key that specifies the tag that you're using to filter the search results. It must be in the following format: Tags..
The key that specifies the tag that you're using to filter the search results. It must be\n in the following format: Tags..
The list of key-value pairs used to filter your search results. If a search result contains a key from your list, it is included in the final search response if the value associated with the key in the result matches the value you specified. \n If the value doesn't match, the result is excluded from the search response. Any resources that don't have a key from the list that you've provided will also be included in the search response.
" + "smithy.api#documentation": "The list of key-value pairs used to filter your search results. If a search result\n contains a key from your list, it is included in the final search response if the value\n associated with the key in the result matches the value you specified. If the value doesn't\n match, the result is excluded from the search response. Any resources that don't have a key\n from the list that you've provided will also be included in the search response.
" } }, "com.amazonaws.sagemaker#VisibilityConditionsKey": { diff --git a/codegen/sdk/aws-models/securityhub.json b/codegen/sdk/aws-models/securityhub.json index 38e333d0d86..cf2c5272192 100644 --- a/codegen/sdk/aws-models/securityhub.json +++ b/codegen/sdk/aws-models/securityhub.json @@ -33847,7 +33847,7 @@ "StandardsArn": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The ARN of a standard.
" + "smithy.api#documentation": "The ARN of the standard.
" } }, "Name": { @@ -34291,7 +34291,7 @@ } }, "traits": { - "smithy.api#documentation": "The reason for the current status of a standard subscription.
" + "smithy.api#documentation": "The reason for the current status of your subscription to the standard.
" } }, "com.amazonaws.securityhub#StandardsSubscription": { @@ -34301,7 +34301,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The ARN of a resource that represents your subscription to a supported standard.
", + "smithy.api#documentation": "The ARN of the resource that represents your subscription to the standard.
", "smithy.api#required": {} } }, @@ -34309,7 +34309,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The ARN of a standard.
", + "smithy.api#documentation": "The ARN of the standard.
", "smithy.api#required": {} } }, @@ -34325,14 +34325,14 @@ "target": "com.amazonaws.securityhub#StandardsStatus", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The status of the standard subscription.
\nThe status values are as follows:
\n\n PENDING - Standard is in the process of being enabled.
\n READY - Standard is enabled.
\n INCOMPLETE - Standard could not be enabled completely. Some controls may not be available.
\n DELETING - Standard is in the process of being disabled.
\n FAILED - Standard could not be disabled.
The status of your subscription to the standard. Possible values are:
\n\n PENDING - The standard is in the process of being enabled. Or the standard is already \n enabled and Security Hub is adding new controls to the standard.
\n READY - The standard is enabled.
\n INCOMPLETE - The standard could not be enabled completely. One or more errors (StandardsStatusReason) \n occurred when Security Hub attempted to enable the standard.
\n DELETING - The standard is in the process of being disabled.
\n FAILED - The standard could not be disabled. One or more errors (StandardsStatusReason) \n occurred when Security Hub attempted to disable the standard.
Indicates whether the controls associated with this standards subscription can be viewed and updated.
\nThe values are as follows:
\n\n READY_FOR_UPDATES - Controls associated with this standards subscription can be viewed and updated.
\n NOT_READY_FOR_UPDATES - Controls associated with this standards subscription cannot be retrieved or updated yet. Security Hub is still processing a request to create the controls.
Specifies whether you can retrieve information about and configure individual controls that apply to the standard. Possible values are:
\n\n READY_FOR_UPDATES - Controls in the standard can be retrieved and configured.
\n NOT_READY_FOR_UPDATES - Controls in the standard cannot be retrieved or configured.
Used to associate a configuration set with a MailManager archive.
" } }, + "com.amazonaws.sesv2#Attachment": { + "type": "structure", + "members": { + "RawContent": { + "target": "com.amazonaws.sesv2#RawAttachmentData", + "traits": { + "smithy.api#documentation": "The raw data of the attachment. It needs to be base64-encoded if you are accessing Amazon SES\n directly through the HTTPS interface. If you are accessing Amazon SES using an Amazon Web Services\n SDK, the SDK takes care of the base 64-encoding for you.
", + "smithy.api#required": {} + } + }, + "ContentDisposition": { + "target": "com.amazonaws.sesv2#AttachmentContentDisposition", + "traits": { + "smithy.api#documentation": " A standard descriptor indicating how the attachment should be rendered in the email.\n Supported values: ATTACHMENT or INLINE.
The file name for the attachment as it will appear in the email.\n Amazon SES restricts certain file extensions. To ensure attachments are accepted,\n check the Unsupported attachment types\n in the Amazon SES Developer Guide.
", + "smithy.api#required": {} + } + }, + "ContentDescription": { + "target": "com.amazonaws.sesv2#AttachmentContentDescription", + "traits": { + "smithy.api#documentation": "A brief description of the attachment content.
" + } + }, + "ContentId": { + "target": "com.amazonaws.sesv2#AttachmentContentId", + "traits": { + "smithy.api#documentation": "Unique identifier for the attachment, used for referencing attachments with INLINE disposition in HTML content.
" + } + }, + "ContentTransferEncoding": { + "target": "com.amazonaws.sesv2#AttachmentContentTransferEncoding", + "traits": { + "smithy.api#documentation": " Specifies how the attachment is encoded.\n Supported values: BASE64, QUOTED_PRINTABLE, SEVEN_BIT.
The MIME type of the attachment.
\nExample: application/pdf, image/jpeg\n
Contains metadata and attachment raw content.
" + } + }, + "com.amazonaws.sesv2#AttachmentContentDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1000 + } + } + }, + "com.amazonaws.sesv2#AttachmentContentDisposition": { + "type": "enum", + "members": { + "ATTACHMENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ATTACHMENT" + } + }, + "INLINE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INLINE" + } + } + } + }, + "com.amazonaws.sesv2#AttachmentContentId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 78 + } + } + }, + "com.amazonaws.sesv2#AttachmentContentTransferEncoding": { + "type": "enum", + "members": { + "BASE64": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BASE64" + } + }, + "QUOTED_PRINTABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUOTED_PRINTABLE" + } + }, + "SEVEN_BIT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SEVEN_BIT" + } + } + } + }, + "com.amazonaws.sesv2#AttachmentContentType": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 78 + } + } + }, + "com.amazonaws.sesv2#AttachmentFileName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 255 + } + } + }, + "com.amazonaws.sesv2#AttachmentList": { + "type": "list", + "member": { + "target": "com.amazonaws.sesv2#Attachment" + } + }, "com.amazonaws.sesv2#AttributesData": { "type": "string" }, @@ -3609,7 +3743,7 @@ "Simple": { "target": "com.amazonaws.sesv2#Message", "traits": { - "smithy.api#documentation": "The simple email message. The message consists of a subject and a message body.
" + "smithy.api#documentation": "The simple email message. The message consists of a subject, message body and attachments list.
" } }, "Raw": { @@ -3626,7 +3760,7 @@ } }, "traits": { - "smithy.api#documentation": "An object that defines the entire content of the email, including the message headers\n and the body content. You can create a simple email message, in which you specify the\n subject and the text and HTML versions of the message body. You can also create raw\n messages, in which you specify a complete MIME-formatted message. Raw messages can\n include attachments and custom headers.
" + "smithy.api#documentation": "An object that defines the entire content of the email, including the message headers, body content,\n and attachments. For a simple email message, you specify the subject and provide both text\n and HTML versions of the message body. You can also add attachments to simple and templated\n messages. For a raw message, you provide a complete MIME-formatted message, which can\n include custom headers and attachments.
" } }, "com.amazonaws.sesv2#EmailInsights": { @@ -6704,7 +6838,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists all of the contact lists available.
", + "smithy.api#documentation": "Lists all of the contact lists available.
\nIf your output includes a \"NextToken\" field with a string value, this indicates there may be additional\n contacts on the filtered list - regardless of the number of contacts returned.
", "smithy.api#http": { "method": "GET", "uri": "/v2/email/contact-lists", @@ -8059,6 +8193,12 @@ "traits": { "smithy.api#documentation": "The list of message headers that will be added to the email message.
" } + }, + "Attachments": { + "target": "com.amazonaws.sesv2#AttachmentList", + "traits": { + "smithy.api#documentation": "The List of attachments to include in your email. All recipients will receive the same attachments.
" + } } }, "traits": { @@ -10140,6 +10280,9 @@ } } }, + "com.amazonaws.sesv2#RawAttachmentData": { + "type": "blob" + }, "com.amazonaws.sesv2#RawMessage": { "type": "structure", "members": { @@ -11427,7 +11570,7 @@ { "conditions": [], "endpoint": { - "url": "https://{EndpointId}.endpoints.email.{PartitionResult#dualStackDnsSuffix}", + "url": "https://{EndpointId}.endpoints.email.global.{PartitionResult#dualStackDnsSuffix}", "properties": { "authSchemes": [ { @@ -12412,7 +12555,7 @@ } ] }, - "url": "https://abc123.456def.endpoints.email.api.aws" + "url": "https://abc123.456def.endpoints.email.global.api.aws" } }, "params": { @@ -12918,6 +13061,12 @@ "traits": { "smithy.api#documentation": "The list of message headers that will be added to the email message.
" } + }, + "Attachments": { + "target": "com.amazonaws.sesv2#AttachmentList", + "traits": { + "smithy.api#documentation": "The List of attachments to include in your email. All recipients will receive the same attachments.
" + } } }, "traits": { diff --git a/codegen/sdk/aws-models/ssm.json b/codegen/sdk/aws-models/ssm.json index 20a79a1c25b..299bdf67d41 100644 --- a/codegen/sdk/aws-models/ssm.json +++ b/codegen/sdk/aws-models/ssm.json @@ -4328,6 +4328,12 @@ "traits": { "smithy.api#documentation": "Information about the patches to use to update the managed nodes, including target operating\n systems and source repositories. Applies to Linux managed nodes only.
" } + }, + "AvailableSecurityUpdatesComplianceStatus": { + "target": "com.amazonaws.ssm#PatchComplianceStatus", + "traits": { + "smithy.api#documentation": "Indicates whether managed nodes for which there are available security-related patches that\n have not been approved by the baseline are being defined as COMPLIANT or\n NON_COMPLIANT. This option is specified when the CreatePatchBaseline\n or UpdatePatchBaseline commands are run.
Applies to Windows Server managed nodes only.
" + } } }, "traits": { @@ -6818,6 +6824,12 @@ "smithy.api#documentation": "Information about the patches to use to update the managed nodes, including target operating\n systems and source repositories. Applies to Linux managed nodes only.
" } }, + "AvailableSecurityUpdatesComplianceStatus": { + "target": "com.amazonaws.ssm#PatchComplianceStatus", + "traits": { + "smithy.api#documentation": "Indicates the status you want to assign to security patches that are available but not\n approved because they don't meet the installation criteria specified in the patch\n baseline.
\nExample scenario: Security patches that you might want installed can be skipped if you have\n specified a long period to wait after a patch is released before installation. If an update to\n the patch is released during your specified waiting period, the waiting period for installing the\n patch starts over. If the waiting period is too long, multiple versions of the patch could be\n released but never installed.
\nSupported for Windows Server managed nodes only.
" + } + }, "ClientToken": { "target": "com.amazonaws.ssm#ClientToken", "traits": { @@ -10350,6 +10362,13 @@ "smithy.api#default": null, "smithy.api#documentation": "The number of managed nodes with patches installed that are specified as other than\n Critical or Security but aren't compliant with the patch baseline. The\n status of these managed nodes is NON_COMPLIANT.
The number of managed nodes for which security-related patches are available but not\n approved because because they didn't meet the patch baseline requirements. For example, an\n updated version of a patch might have been released before the specified auto-approval period was\n over.
\nApplies to Windows Server managed nodes only.
" + } } }, "traits": { @@ -14520,6 +14539,12 @@ "traits": { "smithy.api#documentation": "Information about the patches to use to update the managed nodes, including target operating\n systems and source repositories. Applies to Linux managed nodes only.
" } + }, + "AvailableSecurityUpdatesComplianceStatus": { + "target": "com.amazonaws.ssm#PatchComplianceStatus", + "traits": { + "smithy.api#documentation": "Indicates the compliance status of managed nodes for which security-related patches are\n available but were not approved. This preference is specified when the\n CreatePatchBaseline or UpdatePatchBaseline commands are run.
Applies to Windows Server managed nodes only.
" + } } }, "traits": { @@ -15509,6 +15534,13 @@ "smithy.api#documentation": "The number of patches from the patch baseline that aren't applicable for the managed node\n and therefore aren't installed on the node. This number may be truncated if the list of patch\n names is very large. The number of patches beyond this limit are reported in\n UnreportedNotApplicableCount.
The number of security-related patches that are available but not approved because they\n didn't meet the patch baseline requirements. For example, an updated version of a patch might\n have been released before the specified auto-approval period was over.
\nApplies to Windows Server managed nodes only.
" + } + }, "OperationStartTime": { "target": "com.amazonaws.ssm#DateTime", "traits": { @@ -20768,13 +20800,13 @@ "AccountIdsToAdd": { "target": "com.amazonaws.ssm#AccountIdList", "traits": { - "smithy.api#documentation": "The Amazon Web Services users that should have access to the document. The account IDs can either be a\n group of account IDs or All.
" + "smithy.api#documentation": "The Amazon Web Services users that should have access to the document. The account IDs can either be a\n group of account IDs or All. You must specify a value for this parameter or\n the AccountIdsToRemove parameter.
The Amazon Web Services users that should no longer have access to the document. The Amazon Web Services user\n can either be a group of account IDs or All. This action has a higher\n priority than AccountIdsToAdd. If you specify an ID to add and the same ID to\n remove, the system removes access to the document.
The Amazon Web Services users that should no longer have access to the document. The Amazon Web Services user\n can either be a group of account IDs or All. This action has a higher\n priority than AccountIdsToAdd. If you specify an ID to add and the same ID to\n remove, the system removes access to the document. You must specify a value for this parameter or\n the AccountIdsToAdd parameter.
The fully qualified name of the parameter that you want to create or update.
\nYou can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name\n itself.
\nThe fully qualified name includes the complete hierarchy of the parameter path and name. For\n parameters in a hierarchy, you must include a leading forward slash character (/) when you create\n or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13\n
Naming Constraints:
\nParameter names are case sensitive.
\nA parameter name must be unique within an Amazon Web Services Region
\nA parameter name can't be prefixed with \"aws\" or \"ssm\"\n (case-insensitive).
Parameter names can include only the following symbols and letters:\n a-zA-Z0-9_.-\n
In addition, the slash character ( / ) is used to delineate hierarchies in parameter\n names. For example: /Dev/Production/East/Project-ABC/MyParameter\n
A parameter name can't include spaces.
\nParameter hierarchies are limited to a maximum depth of fifteen levels.
\nFor additional information about valid values for parameter names, see Creating Systems Manager parameters in the Amazon Web Services Systems Manager User Guide.
\nThe maximum length constraint of 2048 characters listed below includes 1037 characters\n reserved for internal use by Systems Manager. The maximum length for a parameter name that you create is\n 1011 characters. This includes the characters in the ARN that precede the name you specify, such\n as arn:aws:ssm:us-east-2:111122223333:parameter/.
The fully qualified name of the parameter that you want to create or update.
\nYou can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name\n itself.
\nThe fully qualified name includes the complete hierarchy of the parameter path and name. For\n parameters in a hierarchy, you must include a leading forward slash character (/) when you create\n or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13\n
Naming Constraints:
\nParameter names are case sensitive.
\nA parameter name must be unique within an Amazon Web Services Region
\nA parameter name can't be prefixed with \"aws\" or \"ssm\"\n (case-insensitive).
Parameter names can include only the following symbols and letters:\n a-zA-Z0-9_.-\n
In addition, the slash character ( / ) is used to delineate hierarchies in parameter\n names. For example: /Dev/Production/East/Project-ABC/MyParameter\n
A parameter name can't include spaces.
\nParameter hierarchies are limited to a maximum depth of fifteen levels.
\nFor additional information about valid values for parameter names, see Creating Systems Manager parameters in the Amazon Web Services Systems Manager User Guide.
\nThe reported maximum length of 2048 characters for a parameter name includes 1037\n characters that are reserved for internal use by Systems Manager. The maximum length for a parameter name\n that you specify is 1011 characters.
\nThis count of 1011 characters includes the characters in the ARN that precede the name you\n specify. This ARN length will vary depending on your partition and Region. For example, the\n following 45 characters count toward the 1011 character maximum for a parameter created in the\n US East (Ohio) Region: arn:aws:ssm:us-east-2:111122223333:parameter/.
Information about the patches to use to update the managed nodes, including target operating\n systems and source repositories. Applies to Linux managed nodes only.
" } }, + "AvailableSecurityUpdatesComplianceStatus": { + "target": "com.amazonaws.ssm#PatchComplianceStatus", + "traits": { + "smithy.api#documentation": "Indicates the status to be assigned to security patches that are available but not approved\n because they don't meet the installation criteria specified in the patch baseline.
\nExample scenario: Security patches that you might want installed can be skipped if you have\n specified a long period to wait after a patch is released before installation. If an update to\n the patch is released during your specified waiting period, the waiting period for installing the\n patch starts over. If the waiting period is too long, multiple versions of the patch could be\n released but never installed.
\nSupported for Windows Server managed nodes only.
" + } + }, "Replace": { "target": "com.amazonaws.ssm#Boolean", "traits": { @@ -31583,6 +31650,12 @@ "traits": { "smithy.api#documentation": "Information about the patches to use to update the managed nodes, including target operating\n systems and source repositories. Applies to Linux managed nodes only.
" } + }, + "AvailableSecurityUpdatesComplianceStatus": { + "target": "com.amazonaws.ssm#PatchComplianceStatus", + "traits": { + "smithy.api#documentation": "Indicates the compliance status of managed nodes for which security-related patches are\n available but were not approved. This preference is specified when the\n CreatePatchBaseline or UpdatePatchBaseline commands are run.
Applies to Windows Server managed nodes only.
" + } } }, "traits": { diff --git a/codegen/sdk/aws-models/sso-oidc.json b/codegen/sdk/aws-models/sso-oidc.json index 7cd97dd1221..6befd215c17 100644 --- a/codegen/sdk/aws-models/sso-oidc.json +++ b/codegen/sdk/aws-models/sso-oidc.json @@ -1024,6 +1024,20 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.ssooidc#AwsAdditionalDetails": { + "type": "structure", + "members": { + "identityContext": { + "target": "com.amazonaws.ssooidc#IdentityContext", + "traits": { + "smithy.api#documentation": "STS context assertion that carries a user identifier to the Amazon Web Services service that it calls\n and can be used to obtain an identity-enhanced IAM role session. This value corresponds to\n the sts:identity_context claim in the ID token.
This structure contains Amazon Web Services-specific parameter extensions for the token endpoint\n responses and includes the identity context.
" + } + }, "com.amazonaws.ssooidc#ClientId": { "type": "string" }, @@ -1314,7 +1328,10 @@ "openid", "aws", "sts:identity_context" - ] + ], + "awsAdditionalDetails": { + "identityContext": "EXAMPLEIDENTITYCONTEXT" + } } }, { @@ -1336,7 +1353,10 @@ "openid", "aws", "sts:identity_context" - ] + ], + "awsAdditionalDetails": { + "identityContext": "EXAMPLEIDENTITYCONTEXT" + } } }, { @@ -1380,7 +1400,10 @@ "openid", "aws", "sts:identity_context" - ] + ], + "awsAdditionalDetails": { + "identityContext": "EXAMPLEIDENTITYCONTEXT" + } } } ], @@ -1512,6 +1535,12 @@ "traits": { "smithy.api#documentation": "The list of scopes for which authorization is granted. The access token that is issued is\n limited to the scopes that are granted.
" } + }, + "awsAdditionalDetails": { + "target": "com.amazonaws.ssooidc#AwsAdditionalDetails", + "traits": { + "smithy.api#documentation": "A structure containing information from the idToken. Only the\n identityContext is in it, which is a value extracted from the\n idToken. This provides direct access to identity information without requiring\n JWT parsing.
Indicates the status of the gateway as a member of the Active Directory domain.
\nThis field is only used as part of a JoinDomain request. It is not\n affected by Active Directory connectivity changes that occur after the\n JoinDomain request succeeds.
\n ACCESS_DENIED: Indicates that the JoinDomain operation\n failed due to an authentication error.
\n DETACHED: Indicates that gateway is not joined to a domain.
\n JOINED: Indicates that the gateway has successfully joined a\n domain.
\n JOINING: Indicates that a JoinDomain operation is in\n progress.
\n NETWORK_ERROR: Indicates that JoinDomain operation\n failed due to a network or connectivity error.
\n TIMEOUT: Indicates that the JoinDomain operation failed\n because the operation didn't complete within the allotted time.
\n UNKNOWN_ERROR: Indicates that the JoinDomain operation\n failed due to another type of error.
Indicates the status of the gateway as a member of the Active Directory domain.
\nThis field is only used as part of a JoinDomain request. It is not\n affected by Active Directory connectivity changes that occur after the\n JoinDomain request succeeds.
\n ACCESS_DENIED: Indicates that the JoinDomain operation\n failed due to an authentication error.
\n DETACHED: Indicates that gateway is not joined to a domain.
\n JOINED: Indicates that the gateway has successfully joined a\n domain.
\n JOINING: Indicates that a JoinDomain operation is in\n progress.
\n INSUFFICIENT_PERMISSIONS: Indicates that the JoinDomain\n operation failed because the specified user lacks the necessary permissions to join\n the domain.
\n NETWORK_ERROR: Indicates that JoinDomain operation\n failed due to a network or connectivity error.
\n TIMEOUT: Indicates that the JoinDomain operation failed\n because the operation didn't complete within the allotted time.
\n UNKNOWN_ERROR: Indicates that the JoinDomain operation\n failed due to another type of error.
Returns a list of existing cache reports for all file shares associated with your\n Amazon Web Services account. This list includes all information provided by the\n DescribeCacheReport action, such as report name, status, completion\n progress, start time, end time, filters, and tags.
Returns a list of existing cache reports for all file shares associated with your\n Amazon Web Services account. This list includes all information provided by the\n DescribeCacheReport action, such as report name, status, completion\n progress, start time, end time, filters, and tags.
The ARN of the Amazon S3 bucket where the cache report will be saved.
\nWe do not recommend saving the cache report to the same Amazon S3 bucket for\n which you are generating the report.
\nThis field does not accept access point ARNs.
\nThe ARN of the Amazon S3 bucket where you want to save the cache report.
\nWe do not recommend saving the cache report to the same Amazon S3 bucket for\n which you are generating the report.
\nThis field does not accept access point ARNs.
\nThe Amazon Web Services Region of the Amazon S3 bucket associated with the file\n share for which you want to generate the cache report.
", + "smithy.api#documentation": "The Amazon Web Services Region of the Amazon S3 bucket where you want to save the\n cache report.
", "smithy.api#required": {} } }, diff --git a/codegen/sdk/aws-models/taxsettings.json b/codegen/sdk/aws-models/taxsettings.json index 457f69dfb5f..ee8971938c6 100644 --- a/codegen/sdk/aws-models/taxsettings.json +++ b/codegen/sdk/aws-models/taxsettings.json @@ -201,6 +201,12 @@ "smithy.api#documentation": "\n Additional tax information associated with your TRN in Saudi Arabia.\n
" } }, + "indonesiaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#IndonesiaAdditionalInfo", + "traits": { + "smithy.api#documentation": "\n
" + } + }, "vietnamAdditionalInfo": { "target": "com.amazonaws.taxsettings#VietnamAdditionalInfo", "traits": { @@ -218,6 +224,12 @@ "traits": { "smithy.api#documentation": "Additional tax information to specify for a TRN in Greece.
" } + }, + "uzbekistanAdditionalInfo": { + "target": "com.amazonaws.taxsettings#UzbekistanAdditionalInfo", + "traits": { + "smithy.api#documentation": "\n Additional tax information to specify for a TRN in Uzbekistan.\n
" + } } }, "traits": { @@ -323,6 +335,12 @@ "smithy.api#documentation": "\n Additional tax information in India. \n
" } }, + "indonesiaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#IndonesiaAdditionalInfo", + "traits": { + "smithy.api#documentation": "Additional tax information associated with your TRN in Indonesia.
" + } + }, "vietnamAdditionalInfo": { "target": "com.amazonaws.taxsettings#VietnamAdditionalInfo", "traits": { @@ -340,6 +358,12 @@ "traits": { "smithy.api#documentation": "Additional tax information to specify for a TRN in Greece.\n
" } + }, + "uzbekistanAdditionalInfo": { + "target": "com.amazonaws.taxsettings#UzbekistanAdditionalInfo", + "traits": { + "smithy.api#documentation": "\n Additional tax information associated with your TRN in Uzbekistan.\n
" + } } }, "traits": { @@ -707,7 +731,7 @@ "aws.iam#iamAction": { "documentation": "Grants store permission" }, - "smithy.api#documentation": "Adds or updates tax registration for multiple accounts in batch. This can be used to add\n or update tax registrations for up to five accounts in one batch. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first.
\nTo call this API operation for specific countries, see the following country-specific\n requirements.
\n\n Bangladesh\n
\nYou must specify the tax registration certificate document in the\n taxRegistrationDocuments field of the VerificationDetails\n object.
\n Brazil\n
\nYou must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation.
\nFor Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address.
\n\n Georgia\n
\nThe valid personType values are Physical Person and Business.
\n Kenya\n
\nYou must specify the personType in the kenyaAdditionalInfo\n field of the additionalTaxInformation object.
If the personType is Physical Person, you must specify the\n tax registration certificate document in the taxRegistrationDocuments field\n of the VerificationDetails object.
\n Malaysia\n
\nThe sector valid values are Business and Individual.
\n RegistrationType valid values are NRIC for individual, and TIN and sales and service tax (SST) for Business.
For individual, you can specify the taxInformationNumber in MalaysiaAdditionalInfo with NRIC type, and a valid MyKad or NRIC number.
For business, you must specify a businessRegistrationNumber in MalaysiaAdditionalInfo with a TIN type and tax identification number.
For business resellers, you must specify a businessRegistrationNumber and taxInformationNumber in MalaysiaAdditionalInfo with a sales and service tax (SST) type and a valid SST number.
For business resellers with service codes, you must specify businessRegistrationNumber, taxInformationNumber, and distinct serviceTaxCodes in MalaysiaAdditionalInfo with a SST type and valid sales and service tax (SST) number. By using this API operation, Amazon Web Services registers your self-declaration that you’re an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD), and have a valid SST number.
Amazon Web Services reserves the right to seek additional information and/or take other actions to\n support your self-declaration as appropriate.
\nAmazon Web Services is currently registered under the following service tax codes. You must include\n at least one of the service tax codes in the service tax code strings to declare yourself\n as an authorized registered business reseller.
\nTaxable service and service tax codes:
\nConsultancy - 9907061674
\nTraining or coaching service - 9907071685
\nIT service - 9907101676
\nDigital services and electronic medium - 9907121690
\n\n Nepal\n
\nThe sector valid values are Business and Individual.
\n Saudi Arabia\n
\nFor address, you must specify addressLine3.
\n South Korea\n
\nYou must specify the certifiedEmailId and legalName in the\n TaxRegistrationEntry object. Use Korean characters for\n legalName.
You must specify the businessRepresentativeName,\n itemOfBusiness, and lineOfBusiness in the\n southKoreaAdditionalInfo field of the additionalTaxInformation\n object. Use Korean characters for these fields.
You must specify the tax registration certificate document in the\n taxRegistrationDocuments field of the VerificationDetails\n object.
For the address object, use Korean characters for addressLine1, addressLine2\n city, postalCode, and stateOrRegion.
\n Spain\n
\nYou must specify the registrationType in the\n spainAdditionalInfo field of the additionalTaxInformation\n object.
If the registrationType is Local, you must specify the tax\n registration certificate document in the taxRegistrationDocuments field of\n the VerificationDetails object.
\n Turkey\n
\nYou must specify the sector in the taxRegistrationEntry object.
If your sector is Business, Individual, or\n Government:
Specify the taxOffice. If your\n sector is Individual, don't enter this value.
(Optional) Specify the kepEmailId. If your\n sector is Individual, don't enter this value.
\n Note: In the Tax Settings page of the Billing console, Government appears as Public institutions\n
If your sector is Business and you're subject to KDV tax,\n you must specify your industry in the industries field.
For address, you must specify districtOrCounty.
\n Ukraine\n
\nThe sector valid values are Business and Individual.
Adds or updates tax registration for multiple accounts in batch. This can be used to add\n or update tax registrations for up to five accounts in one batch. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first.
\nTo call this API operation for specific countries, see the following country-specific\n requirements.
\n\n Bangladesh\n
\nYou must specify the tax registration certificate document in the\n taxRegistrationDocuments field of the VerificationDetails\n object.
\n Brazil\n
\nYou must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation.
\nFor Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address.
\n\n Georgia\n
\nThe valid personType values are Physical Person and Business.
\n Indonesia\n
\n\n PutTaxRegistration: The use of this operation to submit tax information is subject to the Amazon Web Services service terms. By submitting, you’re providing consent for Amazon Web Services to validate NIK, NPWP, and NITKU data, provided by you with the Directorate General of Taxes of Indonesia in accordance with the Minister of Finance Regulation (PMK) Number 112/PMK.03/2022.
\n BatchPutTaxRegistration: The use of this operation to submit tax information is subject to the Amazon Web Services service terms. By submitting, you’re providing consent for Amazon Web Services to validate NIK, NPWP, and NITKU data, provided by you with the Directorate General of Taxes of Indonesia in accordance with the Minister of Finance Regulation (PMK) Number 112/PMK.03/2022, through our third-party partner PT Achilles Advanced Management (OnlinePajak).
You must specify the taxRegistrationNumberType in the indonesiaAdditionalInfo field of the additionalTaxInformation object.
If you specify decisionNumber, you must specify the ppnExceptionDesignationCode in the indonesiaAdditionalInfo field of the additionalTaxInformation object. If the taxRegistrationNumberType is set to NPWP or NITKU, valid values for ppnExceptionDesignationCode are either 01, 02, 03, 07, or 08.
For other taxRegistrationNumberType values, ppnExceptionDesignationCode must be either 01, 07, or 08.
If ppnExceptionDesignationCode is 07, you must specify the decisionNumber in the indonesiaAdditionalInfo field of the additionalTaxInformation object.
\n Kenya\n
\nYou must specify the personType in the kenyaAdditionalInfo\n field of the additionalTaxInformation object.
If the personType is Physical Person, you must specify the\n tax registration certificate document in the taxRegistrationDocuments field\n of the VerificationDetails object.
\n Malaysia\n
\nThe sector valid values are Business and Individual.
\n RegistrationType valid values are NRIC for individual, and TIN and sales and service tax (SST) for Business.
For individual, you can specify the taxInformationNumber in MalaysiaAdditionalInfo with NRIC type, and a valid MyKad or NRIC number.
For business, you must specify a businessRegistrationNumber in MalaysiaAdditionalInfo with a TIN type and tax identification number.
For business resellers, you must specify a businessRegistrationNumber and taxInformationNumber in MalaysiaAdditionalInfo with a sales and service tax (SST) type and a valid SST number.
For business resellers with service codes, you must specify businessRegistrationNumber, taxInformationNumber, and distinct serviceTaxCodes in MalaysiaAdditionalInfo with a SST type and valid sales and service tax (SST) number. By using this API operation, Amazon Web Services registers your self-declaration that you’re an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD), and have a valid SST number.
Amazon Web Services reserves the right to seek additional information and/or take other actions to\n support your self-declaration as appropriate.
\nAmazon Web Services is currently registered under the following service tax codes. You must include\n at least one of the service tax codes in the service tax code strings to declare yourself\n as an authorized registered business reseller.
\nTaxable service and service tax codes:
\nConsultancy - 9907061674
\nTraining or coaching service - 9907071685
\nIT service - 9907101676
\nDigital services and electronic medium - 9907121690
\n\n Nepal\n
\nThe sector valid values are Business and Individual.
\n Saudi Arabia\n
\nFor address, you must specify addressLine3.
\n South Korea\n
\nYou must specify the certifiedEmailId and legalName in the\n TaxRegistrationEntry object. Use Korean characters for\n legalName.
You must specify the businessRepresentativeName,\n itemOfBusiness, and lineOfBusiness in the\n southKoreaAdditionalInfo field of the additionalTaxInformation\n object. Use Korean characters for these fields.
You must specify the tax registration certificate document in the\n taxRegistrationDocuments field of the VerificationDetails\n object.
For the address object, use Korean characters for addressLine1, addressLine2\n city, postalCode, and stateOrRegion.
\n Spain\n
\nYou must specify the registrationType in the\n spainAdditionalInfo field of the additionalTaxInformation\n object.
If the registrationType is Local, you must specify the tax\n registration certificate document in the taxRegistrationDocuments field of\n the VerificationDetails object.
\n Turkey\n
\nYou must specify the sector in the taxRegistrationEntry object.
If your sector is Business, Individual, or\n Government:
Specify the taxOffice. If your\n sector is Individual, don't enter this value.
(Optional) Specify the kepEmailId. If your\n sector is Individual, don't enter this value.
\n Note: In the Tax Settings page of the Billing console, Government appears as Public institutions\n
If your sector is Business and you're subject to KDV tax,\n you must specify your industry in the industries field.
For address, you must specify districtOrCounty.
\n Ukraine\n
\nThe sector valid values are Business and Individual.
The tax registration number type.
" + } + }, + "ppnExceptionDesignationCode": { + "target": "com.amazonaws.taxsettings#PpnExceptionDesignationCode", + "traits": { + "smithy.api#documentation": "Exception code if you are designated by Directorate General of Taxation (DGT) as a VAT collector, non-collected VAT, or VAT-exempt customer.
" + } + }, + "decisionNumber": { + "target": "com.amazonaws.taxsettings#DecisionNumber", + "traits": { + "smithy.api#documentation": "VAT-exempt customers have a Directorate General of Taxation (DGT) exemption letter or certificate (Surat Keterangan Bebas) decision number. Non-collected VAT have a DGT letter or certificate (Surat Keterangan Tidak Dipungut).
" + } + } + }, + "traits": { + "smithy.api#documentation": "Additional tax information associated with your TRN in Indonesia.
" + } + }, + "com.amazonaws.taxsettings#IndonesiaTaxRegistrationNumberType": { + "type": "enum", + "members": { + "NIK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NIK" + } + }, + "PASSPORT_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PassportNumber" + } + }, + "NPWP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NPWP" + } + }, + "NITKU": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NITKU" + } + } + } + }, "com.amazonaws.taxsettings#Industries": { "type": "enum", "members": { @@ -2254,6 +2339,12 @@ "smithy.api#pattern": "^(?!\\s*$)[\\s\\S]+$" } }, + "com.amazonaws.taxsettings#PpnExceptionDesignationCode": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(01|02|03|07|08)$" + } + }, "com.amazonaws.taxsettings#PutSupplementalTaxRegistration": { "type": "operation", "input": { @@ -2497,7 +2588,7 @@ "aws.iam#iamAction": { "documentation": "Grants store permission" }, - "smithy.api#documentation": "Adds or updates tax registration for a single account. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first.
\nTo call this API operation for specific countries, see the following country-specific\n requirements.
\n\n Bangladesh\n
\nYou must specify the tax registration certificate document in the\n taxRegistrationDocuments field of the VerificationDetails\n object.
\n Brazil\n
\nYou must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation.
\nFor Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address.
\n\n Georgia\n
\nThe valid personType values are Physical Person and Business.
\n Kenya\n
\nYou must specify the personType in the kenyaAdditionalInfo\n field of the additionalTaxInformation object.
If the personType is Physical Person, you must specify the\n tax registration certificate document in the taxRegistrationDocuments field\n of the VerificationDetails object.
\n Malaysia\n
\nThe sector valid values are Business and Individual.
\n RegistrationType valid values are NRIC for individual, and TIN and sales and service tax (SST) for Business.
For individual, you can specify the taxInformationNumber in MalaysiaAdditionalInfo with NRIC type, and a valid MyKad or NRIC number.
For business, you must specify a businessRegistrationNumber in MalaysiaAdditionalInfo with a TIN type and tax identification number.
For business resellers, you must specify a businessRegistrationNumber and taxInformationNumber in MalaysiaAdditionalInfo with a sales and service tax (SST) type and a valid SST number.
For business resellers with service codes, you must specify businessRegistrationNumber, taxInformationNumber, and distinct serviceTaxCodes in MalaysiaAdditionalInfo with a SST type and valid sales and service tax (SST) number. By using this API operation, Amazon Web Services registers your self-declaration that you’re an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD), and have a valid SST number.
Amazon Web Services reserves the right to seek additional information and/or take other actions to\n support your self-declaration as appropriate.
\nAmazon Web Services is currently registered under the following service tax codes. You must include\n at least one of the service tax codes in the service tax code strings to declare yourself\n as an authorized registered business reseller.
\nTaxable service and service tax codes:
\nConsultancy - 9907061674
\nTraining or coaching service - 9907071685
\nIT service - 9907101676
\nDigital services and electronic medium - 9907121690
\n\n Nepal\n
\nThe sector valid values are Business and Individual.
\n Saudi Arabia\n
\nFor address, you must specify addressLine3.
\n South Korea\n
\nYou must specify the certifiedEmailId and legalName in the\n TaxRegistrationEntry object. Use Korean characters for\n legalName.
You must specify the businessRepresentativeName,\n itemOfBusiness, and lineOfBusiness in the\n southKoreaAdditionalInfo field of the additionalTaxInformation\n object. Use Korean characters for these fields.
You must specify the tax registration certificate document in the\n taxRegistrationDocuments field of the VerificationDetails\n object.
For the address object, use Korean characters for addressLine1, addressLine2\n city, postalCode, and stateOrRegion.
\n Spain\n
\nYou must specify the registrationType in the\n spainAdditionalInfo field of the additionalTaxInformation\n object.
If the registrationType is Local, you must specify the tax\n registration certificate document in the taxRegistrationDocuments field of\n the VerificationDetails object.
\n Turkey\n
\nYou must specify the sector in the taxRegistrationEntry object.
If your sector is Business, Individual, or\n Government:
Specify the taxOffice. If your\n sector is Individual, don't enter this value.
(Optional) Specify the kepEmailId. If your\n sector is Individual, don't enter this value.
\n Note: In the Tax Settings page of the Billing console, Government appears as Public institutions\n
If your sector is Business and you're subject to KDV tax,\n you must specify your industry in the industries field.
For address, you must specify districtOrCounty.
\n Ukraine\n
\nThe sector valid values are Business and Individual.
Adds or updates tax registration for a single account. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first.
\nTo call this API operation for specific countries, see the following country-specific\n requirements.
\n\n Bangladesh\n
\nYou must specify the tax registration certificate document in the\n taxRegistrationDocuments field of the VerificationDetails\n object.
\n Brazil\n
\nYou must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation.
\nFor Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address.
\n\n Georgia\n
\nThe valid personType values are Physical Person and Business.
\n Indonesia\n
\n\n PutTaxRegistration: The use of this operation to submit tax information is subject to the Amazon Web Services service terms. By submitting, you’re providing consent for Amazon Web Services to validate NIK, NPWP, and NITKU data, provided by you with the Directorate General of Taxes of Indonesia in accordance with the Minister of Finance Regulation (PMK) Number 112/PMK.03/2022.
\n BatchPutTaxRegistration: The use of this operation to submit tax information is subject to the Amazon Web Services service terms. By submitting, you’re providing consent for Amazon Web Services to validate NIK, NPWP, and NITKU data, provided by you with the Directorate General of Taxes of Indonesia in accordance with the Minister of Finance Regulation (PMK) Number 112/PMK.03/2022, through our third-party partner PT Achilles Advanced Management (OnlinePajak).
You must specify the taxRegistrationNumberType in the indonesiaAdditionalInfo field of the additionalTaxInformation object.
If you specify decisionNumber, you must specify the ppnExceptionDesignationCode in the indonesiaAdditionalInfo field of the additionalTaxInformation object. If the taxRegistrationNumberType is set to NPWP or NITKU, valid values for ppnExceptionDesignationCode are either 01, 02, 03, 07, or 08.
For other taxRegistrationNumberType values, ppnExceptionDesignationCode must be either 01, 07, or 08.
If ppnExceptionDesignationCode is 07, you must specify the decisionNumber in the indonesiaAdditionalInfo field of the additionalTaxInformation object.
\n Kenya\n
\nYou must specify the personType in the kenyaAdditionalInfo\n field of the additionalTaxInformation object.
If the personType is Physical Person, you must specify the\n tax registration certificate document in the taxRegistrationDocuments field\n of the VerificationDetails object.
\n Malaysia\n
\nThe sector valid values are Business and Individual.
\n RegistrationType valid values are NRIC for individual, and TIN and sales and service tax (SST) for Business.
For individual, you can specify the taxInformationNumber in MalaysiaAdditionalInfo with NRIC type, and a valid MyKad or NRIC number.
For business, you must specify a businessRegistrationNumber in MalaysiaAdditionalInfo with a TIN type and tax identification number.
For business resellers, you must specify a businessRegistrationNumber and taxInformationNumber in MalaysiaAdditionalInfo with a sales and service tax (SST) type and a valid SST number.
For business resellers with service codes, you must specify businessRegistrationNumber, taxInformationNumber, and distinct serviceTaxCodes in MalaysiaAdditionalInfo with a SST type and valid sales and service tax (SST) number. By using this API operation, Amazon Web Services registers your self-declaration that you’re an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD), and have a valid SST number.
Amazon Web Services reserves the right to seek additional information and/or take other actions to\n support your self-declaration as appropriate.
\nAmazon Web Services is currently registered under the following service tax codes. You must include\n at least one of the service tax codes in the service tax code strings to declare yourself\n as an authorized registered business reseller.
\nTaxable service and service tax codes:
\nConsultancy - 9907061674
\nTraining or coaching service - 9907071685
\nIT service - 9907101676
\nDigital services and electronic medium - 9907121690
\n\n Nepal\n
\nThe sector valid values are Business and Individual.
\n Saudi Arabia\n
\nFor address, you must specify addressLine3.
\n South Korea\n
\nYou must specify the certifiedEmailId and legalName in the\n TaxRegistrationEntry object. Use Korean characters for\n legalName.
You must specify the businessRepresentativeName,\n itemOfBusiness, and lineOfBusiness in the\n southKoreaAdditionalInfo field of the additionalTaxInformation\n object. Use Korean characters for these fields.
You must specify the tax registration certificate document in the\n taxRegistrationDocuments field of the VerificationDetails\n object.
For the address object, use Korean characters for addressLine1, addressLine2\n city, postalCode, and stateOrRegion.
\n Spain\n
\nYou must specify the registrationType in the\n spainAdditionalInfo field of the additionalTaxInformation\n object.
If the registrationType is Local, you must specify the tax\n registration certificate document in the taxRegistrationDocuments field of\n the VerificationDetails object.
\n Turkey\n
\nYou must specify the sector in the taxRegistrationEntry object.
If your sector is Business, Individual, or\n Government:
Specify the taxOffice. If your\n sector is Individual, don't enter this value.
(Optional) Specify the kepEmailId. If your\n sector is Individual, don't enter this value.
\n Note: In the Tax Settings page of the Billing console, Government appears as Public institutions\n
If your sector is Business and you're subject to KDV tax,\n you must specify your industry in the industries field.
For address, you must specify districtOrCounty.
\n Ukraine\n
\nThe sector valid values are Business and Individual.
\n The tax registration number type. The tax registration number type valid values are Business and Individual.\n
\n The unique 12-digit number issued to identify VAT-registered identities in Uzbekistan.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "\n Additional tax information to specify for a TRN in Uzbekistan.\n
" + } + }, + "com.amazonaws.taxsettings#UzbekistanTaxRegistrationNumberType": { + "type": "enum", + "members": { + "BUSINESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Business" + } + }, + "INDIVIDUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Individual" + } + } + } + }, "com.amazonaws.taxsettings#ValidationException": { "type": "structure", "members": { @@ -4626,6 +4754,12 @@ "target": "com.amazonaws.taxsettings#ValidationExceptionField" } }, + "com.amazonaws.taxsettings#VatRegistrationNumber": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9]{12}$" + } + }, "com.amazonaws.taxsettings#VerificationDetails": { "type": "structure", "members": { diff --git a/codegen/sdk/aws-models/transcribe.json b/codegen/sdk/aws-models/transcribe.json index bff90e8adfa..422476da37c 100644 --- a/codegen/sdk/aws-models/transcribe.json +++ b/codegen/sdk/aws-models/transcribe.json @@ -2978,6 +2978,12 @@ "smithy.api#enumValue": "wo-SN" } }, + "ZH_HK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "zh-HK" + } + }, "ZU_ZA": { "target": "smithy.api#Unit", "traits": { diff --git a/codegen/sdk/aws-models/transfer.json b/codegen/sdk/aws-models/transfer.json index 18d7db616bc..9cc215807e1 100644 --- a/codegen/sdk/aws-models/transfer.json +++ b/codegen/sdk/aws-models/transfer.json @@ -154,7 +154,7 @@ "EncryptionAlgorithm": { "target": "com.amazonaws.transfer#EncryptionAlg", "traits": { - "smithy.api#documentation": "The algorithm that is used to encrypt the file.
\nNote the following:
\nDo not use the DES_EDE3_CBC algorithm unless you must support a legacy client that requires it, as it is a weak encryption algorithm.
You can only specify NONE if the URL for your connector uses HTTPS. Using HTTPS ensures that\n no traffic is sent in clear text.
The algorithm that is used to encrypt the file.
Note the following:
Do not use the DES_EDE3_CBC algorithm unless you must support a legacy client that requires it, as it is a weak encryption algorithm.
You can only specify NONE if the URL for your connector uses HTTPS. Using HTTPS ensures that no traffic is sent in clear text.
The signing algorithm for the MDN response.
\nIf set to DEFAULT (or not set at all), the value for SigningAlgorithm is used.
The signing algorithm for the MDN response.
If set to DEFAULT (or not set at all), the value for SigningAlgorithm is used.
Used for outbound requests (from an Transfer Family server to a partner AS2 server) to determine whether\n the partner response for transfers is synchronous or asynchronous. Specify either of the following values:
\n\n SYNC: The system expects a synchronous MDN response, confirming that the file was transferred successfully (or not).
\n NONE: Specifies that no MDN response is required.
Used for outbound requests (from an Transfer Family connector to a partner AS2 server) to determine whether the partner response for transfers is synchronous or asynchronous. Specify either of the following values:
SYNC: The system expects a synchronous MDN response, confirming that the file was transferred successfully (or not).
NONE: Specifies that no MDN response is required.
Provides Basic authentication support to the AS2 Connectors API. To use Basic authentication,\n you must provide the name or Amazon Resource Name (ARN) of a secret in Secrets Manager.
\nThe default value for this parameter is null, which indicates that Basic authentication is not enabled for the connector.
If the connector should use Basic authentication, the secret needs to be in the following format:
\n\n {\n \"Username\": \"user-name\",\n \"Password\": \"user-password\"\n }\n
Replace user-name and user-password with the credentials for the actual user that is being authenticated.
Note the following:
\nYou are storing these credentials in Secrets Manager, not passing them directly into this API.
\nIf you are using the API, SDKs, or CloudFormation to configure your connector, then you must create the secret before you can enable Basic authentication.\n However, if you are using the Amazon Web Services management console, you can have the system create the secret for you.
\nIf you have previously enabled Basic authentication for a connector, you can disable it by using the UpdateConnector API call. For example, if you are using the CLI, you can run the following command to remove Basic authentication:
\n update-connector --connector-id my-connector-id --as2-config 'BasicAuthSecretId=\"\"'\n
Provides Basic authentication support to the AS2 Connectors API. To use Basic authentication, you must provide the name or Amazon Resource Name (ARN) of a secret in Secrets Manager.
The default value for this parameter is null, which indicates that Basic authentication is not enabled for the connector.
If the connector should use Basic authentication, the secret needs to be in the following format:
{ \"Username\": \"user-name\", \"Password\": \"user-password\" }
Replace user-name and user-password with the credentials for the actual user that is being authenticated.
Note the following:
You are storing these credentials in Secrets Manager, not passing them directly into this API.
If you are using the API, SDKs, or CloudFormation to configure your connector, then you must create the secret before you can enable Basic authentication. However, if you are using the Amazon Web Services management console, you can have the system create the secret for you.
If you have previously enabled Basic authentication for a connector, you can disable it by using the UpdateConnector API call. For example, if you are using the CLI, you can run the following command to remove Basic authentication:
update-connector --connector-id my-connector-id --as2-config 'BasicAuthSecretId=\"\"'
Allows you to use the Amazon S3 Content-Type that is associated with objects in S3 instead of\n having the content type mapped based on the file extension. This parameter is enabled by default when you create an AS2 connector\n from the console, but disabled by default when you create an AS2 connector by calling the API directly.
Allows you to use the Amazon S3 Content-Type that is associated with objects in S3 instead of having the content type mapped based on the file extension. This parameter is enabled by default when you create an AS2 connector from the console, but disabled by default when you create an AS2 connector by calling the API directly.
Contains the details for an AS2 connector object. The connector object is used for AS2 outbound\n processes, to connect the Transfer Family customer with the trading partner.
" + "smithy.api#documentation": "Contains the details for an AS2 connector object. The connector object is used for AS2 outbound processes, to connect the Transfer Family customer with the trading partner.
" } }, "com.amazonaws.transfer#As2ConnectorSecretId": { @@ -432,7 +432,7 @@ } }, "traits": { - "smithy.api#documentation": "This exception is thrown when the UpdateServer is called for a file transfer\n protocol-enabled server that has VPC as the endpoint type and the server's\n VpcEndpointID is not in the available state.
This exception is thrown when the UpdateServer is called for a file transfer protocol-enabled server that has VPC as the endpoint type and the server's VpcEndpointID is not in the available state.
For transfers that fail, this parameter contains a code indicating the reason. For example, RETRIEVE_FILE_NOT_FOUND\n
For transfers that fail, this parameter contains a code indicating the reason. For example, RETRIEVE_FILE_NOT_FOUND
Specifies the location for the file being copied. Use ${Transfer:UserName} or\n ${Transfer:UploadDate} in this field to parametrize the destination prefix by\n username or uploaded date.
Set the value of DestinationFileLocation to\n ${Transfer:UserName} to copy uploaded files to an Amazon S3 bucket\n that is prefixed with the name of the Transfer Family user that uploaded the\n file.
Set the value of DestinationFileLocation to ${Transfer:UploadDate} to copy uploaded files to \n an Amazon S3 bucket that is prefixed with the date of the upload.
The system resolves UploadDate to a date format of YYYY-MM-DD, based on the date the file\n is uploaded in UTC.
Specifies the location for the file being copied. Use ${Transfer:UserName} or ${Transfer:UploadDate} in this field to parametrize the destination prefix by username or uploaded date.
Set the value of DestinationFileLocation to ${Transfer:UserName} to copy uploaded files to an Amazon S3 bucket that is prefixed with the name of the Transfer Family user that uploaded the file.
Set the value of DestinationFileLocation to ${Transfer:UploadDate} to copy uploaded files to an Amazon S3 bucket that is prefixed with the date of the upload.
The system resolves UploadDate to a date format of YYYY-MM-DD, based on the date the file is uploaded in UTC.
A flag that indicates whether to overwrite an existing file of the same name.\n The default is FALSE.
If the workflow is processing a file that has the same name as an existing file, the behavior is as follows:
\nIf OverwriteExisting is TRUE, the existing file is replaced with the file being processed.
If OverwriteExisting is FALSE, nothing happens, and the workflow processing stops.
A flag that indicates whether to overwrite an existing file of the same name. The default is FALSE.
If the workflow is processing a file that has the same name as an existing file, the behavior is as follows:
If OverwriteExisting is TRUE, the existing file is replaced with the file being processed.
If OverwriteExisting is FALSE, nothing happens, and the workflow processing stops.
Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file\n for the workflow.
\nTo use the previous file as the input, enter ${previous.file}.\n In this case, this workflow step uses the output file from the previous workflow step as input.\n This is the default value.
To use the originally uploaded file location as input for this step, enter ${original.file}.
Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow.
To use the previous file as the input, enter ${previous.file}. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value.
To use the originally uploaded file location as input for this step, enter ${original.file}.
Used by administrators to choose which groups in the directory should have access to\n upload and download files over the enabled protocols using Transfer Family. For example, a\n Microsoft Active Directory might contain 50,000 users, but only a small fraction might need\n the ability to transfer files to the server. An administrator can use\n CreateAccess to limit the access to the correct set of users who need this\n ability.
Used by administrators to choose which groups in the directory should have access to upload and download files over the enabled protocols using Transfer Family. For example, a Microsoft Active Directory might contain 50,000 users, but only a small fraction might need the ability to transfer files to the server. An administrator can use CreateAccess to limit the access to the correct set of users who need this ability.
The landing directory (folder) for a user when they log in to the server using the client.
\nA HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
The landing directory (folder) for a user when they log in to the server using the client.
A HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server.\n If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer \n protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for \n how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings,\n using the HomeDirectoryMappings parameter. If, on the other hand,\n HomeDirectoryType is PATH, you provide an absolute path\n using the HomeDirectory parameter. You cannot have both\n HomeDirectory and HomeDirectoryMappings in your\n template.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server. If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings, using the HomeDirectoryMappings parameter. If, on the other hand, HomeDirectoryType is PATH, you provide an absolute path using the HomeDirectory parameter. You cannot have both HomeDirectory and HomeDirectoryMappings in your template.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should\n be visible to your user and how you want to make them visible. You must specify the\n Entry and Target pair, where Entry shows how the path\n is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you\n only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) \n role provides access to paths in Target. This value\n can be set only when HomeDirectoryType is set to\n LOGICAL.
The following is an Entry and Target pair example.
\n [ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]\n
In most cases, you can use this value instead of the session policy to lock down your\n user to the designated home directory (\"chroot\"). To do this, you can set\n Entry to / and set Target to the\n HomeDirectory parameter value.
The following is an Entry and Target pair example for chroot.
\n [ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]\n
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target. This value can be set only when HomeDirectoryType is set to LOGICAL.
The following is an Entry and Target pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.
The following is an Entry and Target pair example for chroot.
[ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
A session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's\n access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName},\n ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.
This policy applies only when the domain of ServerId is Amazon S3. Amazon EFS does not use session policies.
For session policies, Transfer Family stores the policy as a JSON blob, instead\n of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass\n it in the Policy argument.
For an example of a session policy, see Example\n session policy.
\nFor more information, see AssumeRole in the Security Token Service API\n Reference.
\nA session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.
This policy applies only when the domain of ServerId is Amazon S3. Amazon EFS does not use session policies.
For session policies, Transfer Family stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.
For an example of a session policy, see Example session policy.
For more information, see AssumeRole in the Security Token Service API Reference.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 \n bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users \n when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust \n relationship that allows the server to access your resources when servicing your users' transfer requests.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.
", "smithy.api#required": {} } }, @@ -648,7 +648,7 @@ "ExternalId": { "target": "com.amazonaws.transfer#ExternalId", "traits": { - "smithy.api#documentation": "A unique identifier that is required to identify specific groups within your directory.\n The users of the group that you associate have access to your Amazon S3 or Amazon EFS\n resources over the enabled protocols using Transfer Family. If you know the group name,\n you can view the SID values by running the following command using Windows PowerShell.
\n\n Get-ADGroup -Filter {samAccountName -like \"YourGroupName*\"} -Properties * | Select SamAccountName,ObjectSid\n
In that command, replace YourGroupName with the name of your Active Directory group.
\nThe regular expression used to validate this parameter is a string of characters consisting of uppercase and lowercase alphanumeric characters with no spaces.\n You can also include underscores or any of the following characters: =,.@:/-
", + "smithy.api#documentation": "A unique identifier that is required to identify specific groups within your directory. The users of the group that you associate have access to your Amazon S3 or Amazon EFS resources over the enabled protocols using Transfer Family. If you know the group name, you can view the SID values by running the following command using Windows PowerShell.
Get-ADGroup -Filter {samAccountName -like \"YourGroupName*\"} -Properties * | Select SamAccountName,ObjectSid
In that command, replace YourGroupName with the name of your Active Directory group.
The regular expression used to validate this parameter is a string of characters consisting of uppercase and lowercase alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@:/-
", "smithy.api#required": {} } } @@ -670,7 +670,7 @@ "ExternalId": { "target": "com.amazonaws.transfer#ExternalId", "traits": { - "smithy.api#documentation": "The external identifier of the group whose users have access to your Amazon S3 or Amazon\n EFS resources over the enabled protocols using Transfer Family.
", + "smithy.api#documentation": "The external identifier of the group whose users have access to your Amazon S3 or Amazon EFS resources over the enabled protocols using Transfer Family.
", "smithy.api#required": {} } } @@ -714,7 +714,7 @@ "iam:PassRole" ] }, - "smithy.api#documentation": "Creates an agreement. An agreement is a bilateral trading partner agreement, or partnership,\n between an Transfer Family server and an AS2 process. The agreement defines the file and message\n transfer relationship between the server and the AS2 process. To define an agreement, Transfer Family\n combines a server, local profile, partner profile, certificate, and other\n attributes.
\nThe partner is identified with the PartnerProfileId, and the AS2 process is identified with the LocalProfileId.
Specify either\n BaseDirectory or CustomDirectories, but not both. Specifying both causes the command to fail.
Creates an agreement. An agreement is a bilateral trading partner agreement, or partnership, between an Transfer Family server and an AS2 process. The agreement defines the file and message transfer relationship between the server and the AS2 process. To define an agreement, Transfer Family combines a server, local profile, partner profile, certificate, and other attributes.
The partner is identified with the PartnerProfileId, and the AS2 process is identified with the LocalProfileId.
Specify either BaseDirectory or CustomDirectories, but not both. Specifying both causes the command to fail.
A system-assigned unique identifier for a server instance. This is the specific server\n that the agreement uses.
", + "smithy.api#documentation": "A system-assigned unique identifier for a server instance. This is the specific server that the agreement uses.
", "smithy.api#required": {} } }, @@ -750,20 +750,20 @@ "BaseDirectory": { "target": "com.amazonaws.transfer#HomeDirectory", "traits": { - "smithy.api#documentation": "The landing directory (folder) for files transferred by using the AS2 protocol.
\nA BaseDirectory example is\n /amzn-s3-demo-bucket/home/mydirectory.
The landing directory (folder) for files transferred by using the AS2 protocol.
A BaseDirectory example is /amzn-s3-demo-bucket/home/mydirectory.
Connectors are used to send files using either the AS2 or SFTP protocol. For the access role,\n provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.
\n\n For AS2 connectors\n
\nWith AS2, you can send files by calling StartFileTransfer and specifying the\n file paths in the request parameter, SendFilePaths. We use the file’s parent\n directory (for example, for --send-file-paths /bucket/dir/file.txt, parent\n directory is /bucket/dir/) to temporarily store a processed AS2 message file,\n store the MDN when we receive them from the partner, and write a final JSON file containing\n relevant metadata of the transmission. So, the AccessRole needs to provide read\n and write access to the parent directory of the file location used in the\n StartFileTransfer request. Additionally, you need to provide read and write\n access to the parent directory of the files that you intend to send with\n StartFileTransfer.
If you are using Basic authentication for your AS2 connector, the access role requires the\n secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using\n a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also\n needs the kms:Decrypt permission for that key.
\n For SFTP connectors\n
\nMake sure that the access role provides\n read and write access to the parent directory of the file location\n that's used in the StartFileTransfer request.\n Additionally, make sure that the role provides\n secretsmanager:GetSecretValue permission to Secrets Manager.
Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.
For AS2 connectors
With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.
If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.
For SFTP connectors
Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager.
The status of the agreement. The agreement can be either ACTIVE or\n INACTIVE.
The status of the agreement. The agreement can be either ACTIVE or INACTIVE.
\n Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload\n filename when saving it.\n
\n\n ENABLED: the filename provided by your trading parter is preserved when the file is saved.
\n DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as\n described in File names and locations.
Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it.
ENABLED: the filename provided by your trading parter is preserved when the file is saved.
DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations.
\n Determines whether or not unsigned messages from your trading partners will be accepted.\n
\n\n ENABLED: Transfer Family rejects unsigned messages from your trading partner.
\n DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.
Determines whether or not unsigned messages from your trading partners will be accepted.
ENABLED: Transfer Family rejects unsigned messages from your trading partner.
DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.
A CustomDirectoriesType structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files.
Failed files
\nMDN files
\nPayload files
\nStatus files
\nTemporary files
\nA CustomDirectoriesType structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files.
Failed files
MDN files
Payload files
Status files
Temporary files
The unique identifier for the agreement. Use this ID for deleting, or updating an\n agreement, as well as in any other API calls that require that you specify the agreement\n ID.
", + "smithy.api#documentation": "The unique identifier for the agreement. Use this ID for deleting, or updating an agreement, as well as in any other API calls that require that you specify the agreement ID.
", "smithy.api#required": {} } } @@ -845,7 +845,7 @@ "iam:PassRole" ] }, - "smithy.api#documentation": "Creates the connector, which captures the parameters for a connection for the\n AS2 or SFTP protocol. For AS2, the connector is required for sending files to an externally hosted AS2 server. For SFTP, the connector is required when sending files to an SFTP server or receiving files from an SFTP server.\n For more details about connectors, see Configure AS2 connectors and Create SFTP connectors.
\nYou must specify exactly one configuration object: either for AS2 (As2Config) or SFTP (SftpConfig).
Creates the connector, which captures the parameters for a connection for the AS2 or SFTP protocol. For AS2, the connector is required for sending files to an externally hosted AS2 server. For SFTP, the connector is required when sending files to an SFTP server or receiving files from an SFTP server. For more details about connectors, see Configure AS2 connectors and Create SFTP connectors.
You must specify exactly one configuration object: either for AS2 (As2Config) or SFTP (SftpConfig).
Connectors are used to send files using either the AS2 or SFTP protocol. For the access role,\n provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.
\n\n For AS2 connectors\n
\nWith AS2, you can send files by calling StartFileTransfer and specifying the\n file paths in the request parameter, SendFilePaths. We use the file’s parent\n directory (for example, for --send-file-paths /bucket/dir/file.txt, parent\n directory is /bucket/dir/) to temporarily store a processed AS2 message file,\n store the MDN when we receive them from the partner, and write a final JSON file containing\n relevant metadata of the transmission. So, the AccessRole needs to provide read\n and write access to the parent directory of the file location used in the\n StartFileTransfer request. Additionally, you need to provide read and write\n access to the parent directory of the files that you intend to send with\n StartFileTransfer.
If you are using Basic authentication for your AS2 connector, the access role requires the\n secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using\n a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also\n needs the kms:Decrypt permission for that key.
\n For SFTP connectors\n
\nMake sure that the access role provides\n read and write access to the parent directory of the file location\n that's used in the StartFileTransfer request.\n Additionally, make sure that the role provides\n secretsmanager:GetSecretValue permission to Secrets Manager.
Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.
For AS2 connectors
With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.
If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.
For SFTP connectors
Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn\n on CloudWatch logging for Amazon S3 events. When set, you can view connector\n activity in your CloudWatch logs.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn on CloudWatch logging for Amazon S3 events. When set, you can view connector activity in your CloudWatch logs.
" } }, "Tags": { @@ -955,14 +955,14 @@ "As2Id": { "target": "com.amazonaws.transfer#As2Id", "traits": { - "smithy.api#documentation": "The As2Id is the AS2-name, as defined in the \n RFC 4130. For inbound transfers, this is the AS2-From header for the AS2 messages\n sent from the partner. For outbound connectors, this is the AS2-To header for the\n AS2 messages sent to the partner using the StartFileTransfer API operation. This ID cannot include spaces.
The As2Id is the AS2-name, as defined in the RFC 4130. For inbound transfers, this is the AS2-From header for the AS2 messages sent from the partner. For outbound connectors, this is the AS2-To header for the AS2 messages sent to the partner using the StartFileTransfer API operation. This ID cannot include spaces.
Determines the type of profile to create:
\nSpecify LOCAL to create a local profile. A local profile represents the AS2-enabled Transfer Family server organization or party.
Specify PARTNER to create a partner profile. A partner profile represents a remote organization, external to Transfer Family.
Determines the type of profile to create:
Specify LOCAL to create a local profile. A local profile represents the AS2-enabled Transfer Family server organization or party.
Specify PARTNER to create a partner profile. A partner profile represents a remote organization, external to Transfer Family.
Instantiates an auto-scaling virtual server based on the selected file transfer protocol\n in Amazon Web Services. When you make updates to your file transfer protocol-enabled server or when you work\n with users, use the service-generated ServerId property that is assigned to the\n newly created server.
Instantiates an auto-scaling virtual server based on the selected file transfer protocol in Amazon Web Services. When you make updates to your file transfer protocol-enabled server or when you work with users, use the service-generated ServerId property that is assigned to the newly created server.
The Amazon Resource Name (ARN) of the Certificate Manager (ACM) certificate. Required\n when Protocols is set to FTPS.
To request a new public certificate, see Request a public certificate\n in the Certificate Manager User Guide.
\nTo import an existing certificate into ACM, see Importing certificates into ACM\n in the Certificate Manager User Guide.
\nTo request a private certificate to use FTPS through private IP addresses, see Request a\n private certificate in the Certificate Manager User\n Guide.
\nCertificates with the following cryptographic algorithms and key sizes are\n supported:
\n2048-bit RSA (RSA_2048)
\n4096-bit RSA (RSA_4096)
\nElliptic Prime Curve 256 bit (EC_prime256v1)
\nElliptic Prime Curve 384 bit (EC_secp384r1)
\nElliptic Prime Curve 521 bit (EC_secp521r1)
\nThe certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP\n address specified and information about the issuer.
\nThe Amazon Resource Name (ARN) of the Certificate Manager (ACM) certificate. Required when Protocols is set to FTPS.
To request a new public certificate, see Request a public certificate in the Certificate Manager User Guide.
To import an existing certificate into ACM, see Importing certificates into ACM in the Certificate Manager User Guide.
To request a private certificate to use FTPS through private IP addresses, see Request a private certificate in the Certificate Manager User Guide.
Certificates with the following cryptographic algorithms and key sizes are supported:
2048-bit RSA (RSA_2048)
4096-bit RSA (RSA_4096)
Elliptic Prime Curve 256 bit (EC_prime256v1)
Elliptic Prime Curve 384 bit (EC_secp384r1)
Elliptic Prime Curve 521 bit (EC_secp521r1)
The certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and information about the issuer.
The domain of the storage system that is used for file transfers. There are two domains\n available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The\n default value is S3.
\nAfter the server is created, the domain cannot be changed.
\nThe domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3.
After the server is created, the domain cannot be changed.
The virtual private cloud (VPC) endpoint settings that are configured for your server.\n When you host your endpoint within your VPC, you can make your endpoint accessible only to resources\n within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over\n the internet. Your VPC's default security groups are automatically assigned to your\n endpoint.
" + "smithy.api#documentation": "The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make your endpoint accessible only to resources within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint.
" } }, "EndpointType": { "target": "com.amazonaws.transfer#EndpointType", "traits": { - "smithy.api#documentation": "The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC)\n or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and \n resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.
\n After May 19, 2021, you won't be able to create a server using\n EndpointType=VPC_ENDPOINT in your Amazon Web Services account if your account hasn't already\n done so before May 19, 2021. If you have already created servers with\n EndpointType=VPC_ENDPOINT in your Amazon Web Services account on or before May 19, 2021,\n you will not be affected. After this date, use\n EndpointType=VPC.
For more information, see\n https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.
\nIt is recommended that you use VPC as the EndpointType. With\n this endpoint type, you have the option to directly associate up to three Elastic IPv4\n addresses (BYO IP included) with your server's endpoint and use VPC security groups to\n restrict traffic by the client's public IP address. This is not possible with\n EndpointType set to VPC_ENDPOINT.
The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.
After May 19, 2021, you won't be able to create a server using EndpointType=VPC_ENDPOINT in your Amazon Web Services account if your account hasn't already done so before May 19, 2021. If you have already created servers with EndpointType=VPC_ENDPOINT in your Amazon Web Services account on or before May 19, 2021, you will not be affected. After this date, use EndpointType=VPC.
For more information, see https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.
It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT.
The RSA, ECDSA, or ED25519 private key to use for your SFTP-enabled server. You can add multiple host keys, in case you want\n to rotate keys, or have a set of active keys that use different algorithms.
\nUse the following command to generate an RSA 2048 bit key with no passphrase:
\n\n ssh-keygen -t rsa -b 2048 -N \"\" -m PEM -f my-new-server-key.
Use a minimum value of 2048 for the -b option. You can create a stronger key by using 3072 or 4096.
Use the following command to generate an ECDSA 256 bit key with no passphrase:
\n\n ssh-keygen -t ecdsa -b 256 -N \"\" -m PEM -f my-new-server-key.
Valid values for the -b option for ECDSA are 256, 384, and 521.
Use the following command to generate an ED25519 key with no passphrase:
\n\n ssh-keygen -t ed25519 -N \"\" -f my-new-server-key.
For all of these commands, you can replace my-new-server-key with a string of your choice.
\nIf you aren't planning to migrate existing users from an existing SFTP-enabled\n server to a new server, don't update the host key. Accidentally changing a\n server's host key can be disruptive.
\nFor more information, see Manage host keys for your SFTP-enabled server in the Transfer Family User Guide.
" + "smithy.api#documentation": "The RSA, ECDSA, or ED25519 private key to use for your SFTP-enabled server. You can add multiple host keys, in case you want to rotate keys, or have a set of active keys that use different algorithms.
Use the following command to generate an RSA 2048 bit key with no passphrase:
ssh-keygen -t rsa -b 2048 -N \"\" -m PEM -f my-new-server-key.
Use a minimum value of 2048 for the -b option. You can create a stronger key by using 3072 or 4096.
Use the following command to generate an ECDSA 256 bit key with no passphrase:
ssh-keygen -t ecdsa -b 256 -N \"\" -m PEM -f my-new-server-key.
Valid values for the -b option for ECDSA are 256, 384, and 521.
Use the following command to generate an ED25519 key with no passphrase:
ssh-keygen -t ed25519 -N \"\" -f my-new-server-key.
For all of these commands, you can replace my-new-server-key with a string of your choice.
If you aren't planning to migrate existing users from an existing SFTP-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive.
For more information, see Manage host keys for your SFTP-enabled server in the Transfer Family User Guide.
" } }, "IdentityProviderDetails": { "target": "com.amazonaws.transfer#IdentityProviderDetails", "traits": { - "smithy.api#documentation": "Required when IdentityProviderType is set to\n AWS_DIRECTORY_SERVICE, Amazon Web Services_LAMBDA or\n API_GATEWAY. Accepts an array containing all of the information required to use\n a directory in AWS_DIRECTORY_SERVICE or invoke a customer-supplied authentication\n API, including the API Gateway URL. Cannot be specified when IdentityProviderType\n is set to SERVICE_MANAGED.
Required when IdentityProviderType is set to AWS_DIRECTORY_SERVICE, Amazon Web Services_LAMBDA or API_GATEWAY. Accepts an array containing all of the information required to use a directory in AWS_DIRECTORY_SERVICE or invoke a customer-supplied authentication API, including the API Gateway URL. Cannot be specified when IdentityProviderType is set to SERVICE_MANAGED.
The mode of authentication for a server. The default value is\n SERVICE_MANAGED, which allows you to store and access user credentials within\n the Transfer Family service.
Use AWS_DIRECTORY_SERVICE to provide access to\n Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your\n on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to\n provide a Directory ID by using the IdentityProviderDetails parameter.
Use the API_GATEWAY value to integrate with an identity provider of your choosing. The\n API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call\n for authentication by using the IdentityProviderDetails parameter.
Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. \n If you choose this value, you must specify the ARN for the Lambda function in the Function parameter \n for the IdentityProviderDetails data type.
The mode of authentication for a server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the Transfer Family service.
Use AWS_DIRECTORY_SERVICE to provide access to Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to provide a Directory ID by using the IdentityProviderDetails parameter.
Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the IdentityProviderDetails parameter.
Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the Function parameter for the IdentityProviderDetails data type.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn\n on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in\n your CloudWatch logs.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFS events. When set, you can view user activity in your CloudWatch logs.
" } }, "PostAuthenticationLoginBanner": { "target": "com.amazonaws.transfer#PostAuthenticationLoginBanner", "traits": { - "smithy.api#documentation": "Specifies a string to display when users connect to a server. This string is displayed after the user authenticates.
\nThe SFTP protocol does not support post-authentication display banners.
\nSpecifies a string to display when users connect to a server. This string is displayed after the user authenticates.
The SFTP protocol does not support post-authentication display banners.
Specifies a string to display when users connect to a server. This string is displayed before the user authenticates.\n For example, the following banner displays details about using the system:
\n\n This system is for the use of authorized users only. Individuals using this computer system without authority,\n or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by\n system personnel.\n
Specifies a string to display when users connect to a server. This string is displayed before the user authenticates. For example, the following banner displays details about using the system:
This system is for the use of authorized users only. Individuals using this computer system without authority, or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by system personnel.
Specifies the file transfer protocol or protocols over which your file transfer protocol\n client can connect to your server's endpoint. The available protocols are:
\n\n SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over\n SSH
\n FTPS (File Transfer Protocol Secure): File transfer with TLS\n encryption
\n FTP (File Transfer Protocol): Unencrypted file transfer
\n AS2 (Applicability Statement 2): used for transporting structured business-to-business data
If you select FTPS, you must choose a certificate stored in Certificate Manager (ACM) \n which is used to identify your server when clients connect to it over\n FTPS.
If Protocol includes either FTP or FTPS, then the\n EndpointType must be VPC and the\n IdentityProviderType must be either AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY.
If Protocol includes FTP, then\n AddressAllocationIds cannot be associated.
If Protocol is set only to SFTP, the EndpointType\n can be set to PUBLIC and the IdentityProviderType can be set any of the supported identity types: \n SERVICE_MANAGED, AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY.
If Protocol includes AS2, then the\n EndpointType must be VPC, and domain must be Amazon S3.
Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:
SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH
FTPS (File Transfer Protocol Secure): File transfer with TLS encryption
FTP (File Transfer Protocol): Unencrypted file transfer
AS2 (Applicability Statement 2): used for transporting structured business-to-business data
If you select FTPS, you must choose a certificate stored in Certificate Manager (ACM) which is used to identify your server when clients connect to it over FTPS.
If Protocol includes either FTP or FTPS, then the EndpointType must be VPC and the IdentityProviderType must be either AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY.
If Protocol includes FTP, then AddressAllocationIds cannot be associated.
If Protocol is set only to SFTP, the EndpointType can be set to PUBLIC and the IdentityProviderType can be set any of the supported identity types: SERVICE_MANAGED, AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY.
If Protocol includes AS2, then the EndpointType must be VPC, and domain must be Amazon S3.
The protocol settings that are configured for your server.
\n\n To indicate passive mode (for FTP and FTPS protocols), use the PassiveIp parameter.\n Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.\n
To ignore the error that is generated when the client attempts to use the SETSTAT command on a file that you are \n uploading to an Amazon S3 bucket, use the SetStatOption parameter. To have the Transfer Family server ignore the \n SETSTAT command and upload files without needing to make any changes to your SFTP client, set the value to \n ENABLE_NO_OP. If you set the SetStatOption parameter to ENABLE_NO_OP, Transfer Family \n generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a SETSTAT \n call.
To determine whether your Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the \n TlsSessionResumptionMode parameter.
\n As2Transports indicates the transport method for the AS2 messages. Currently, only HTTP is supported.
The protocol settings that are configured for your server.
To indicate passive mode (for FTP and FTPS protocols), use the PassiveIp parameter. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.
To ignore the error that is generated when the client attempts to use the SETSTAT command on a file that you are uploading to an Amazon S3 bucket, use the SetStatOption parameter. To have the Transfer Family server ignore the SETSTAT command and upload files without needing to make any changes to your SFTP client, set the value to ENABLE_NO_OP. If you set the SetStatOption parameter to ENABLE_NO_OP, Transfer Family generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a SETSTAT call.
To determine whether your Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the TlsSessionResumptionMode parameter.
As2Transports indicates the transport method for the AS2 messages. Currently, only HTTP is supported.
Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
\nIn addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a\n workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects\n while the file is still being uploaded.
Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects while the file is still being uploaded.
Specifies the log groups to which your server logs are sent.
\nTo specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:
\n\n arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*\n
For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*\n
If you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty\n value for this parameter in an update-server call. For example:
\n update-server --server-id s-1234567890abcdef0 --structured-log-destinations\n
Specifies the log groups to which your server logs are sent.
To specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:
arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*
For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*
If you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an update-server call. For example:
update-server --server-id s-1234567890abcdef0 --structured-log-destinations
Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.
\nBy default, home directory mappings have a TYPE of DIRECTORY. If you enable this option, you would then need to explicitly set the HomeDirectoryMapEntry\n Type to FILE if you want a mapping to have a file target.
Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.
By default, home directory mappings have a TYPE of DIRECTORY. If you enable this option, you would then need to explicitly set the HomeDirectoryMapEntry Type to FILE if you want a mapping to have a file target.
Creates a user and associates them with an existing file transfer protocol-enabled server.\n You can only create and associate users with servers that have the\n IdentityProviderType set to SERVICE_MANAGED. Using parameters for\n CreateUser, you can specify the user name, set the home directory, store the\n user's public key, and assign the user's Identity and Access Management (IAM)\n role. You can also optionally add a session policy, and assign metadata with tags that can\n be used to group and search for users.
Creates a user and associates them with an existing file transfer protocol-enabled server. You can only create and associate users with servers that have the IdentityProviderType set to SERVICE_MANAGED. Using parameters for CreateUser, you can specify the user name, set the home directory, store the user's public key, and assign the user's Identity and Access Management (IAM) role. You can also optionally add a session policy, and assign metadata with tags that can be used to group and search for users.
The landing directory (folder) for a user when they log in to the server using the client.
\nA HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
The landing directory (folder) for a user when they log in to the server using the client.
A HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server.\n If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer \n protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for \n how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings,\n using the HomeDirectoryMappings parameter. If, on the other hand,\n HomeDirectoryType is PATH, you provide an absolute path\n using the HomeDirectory parameter. You cannot have both\n HomeDirectory and HomeDirectoryMappings in your\n template.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server. If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings, using the HomeDirectoryMappings parameter. If, on the other hand, HomeDirectoryType is PATH, you provide an absolute path using the HomeDirectory parameter. You cannot have both HomeDirectory and HomeDirectoryMappings in your template.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should\n be visible to your user and how you want to make them visible. You must specify the\n Entry and Target pair, where Entry shows how the path\n is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you\n only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) \n role provides access to paths in Target. This value\n can be set only when HomeDirectoryType is set to\n LOGICAL.
The following is an Entry and Target pair example.
\n [ { \"Entry\": \"/directory1\", \"Target\":\n \"/bucket_name/home/mydirectory\" } ]\n
In most cases, you can use this value instead of the session policy to lock your user down\n to the designated home directory (\"chroot\"). To do this, you can set\n Entry to / and set Target to the value the user\n should see for their home directory when they log in.
The following is an Entry and Target pair example for chroot.
\n [ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]\n
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target. This value can be set only when HomeDirectoryType is set to LOGICAL.
The following is an Entry and Target pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the value the user should see for their home directory when they log in.
The following is an Entry and Target pair example for chroot.
[ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
A session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's\n access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName},\n ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.
This policy applies only when the domain of ServerId is Amazon S3. Amazon EFS does not use session policies.
For session policies, Transfer Family stores the policy as a JSON blob, instead\n of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass\n it in the Policy argument.
For an example of a session policy, see Example session\n policy.
\nFor more information, see AssumeRole in the Amazon Web Services\n Security Token Service API Reference.
\nA session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.
This policy applies only when the domain of ServerId is Amazon S3. Amazon EFS does not use session policies.
For session policies, Transfer Family stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.
For an example of a session policy, see Example session policy.
For more information, see AssumeRole in the Amazon Web Services Security Token Service API Reference.
Specifies the full POSIX identity, including user ID (Uid), group ID\n (Gid), and any secondary groups IDs (SecondaryGids), that controls\n your users' access to your Amazon EFS file systems. The POSIX permissions that are set on\n files and directories in Amazon EFS determine the level of access your users get when\n transferring files into and out of your Amazon EFS file systems.
Specifies the full POSIX identity, including user ID (Uid), group ID (Gid), and any secondary groups IDs (SecondaryGids), that controls your users' access to your Amazon EFS file systems. The POSIX permissions that are set on files and directories in Amazon EFS determine the level of access your users get when transferring files into and out of your Amazon EFS file systems.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 \n bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users \n when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust \n relationship that allows the server to access your resources when servicing your users' transfer requests.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.
", "smithy.api#required": {} } }, "ServerId": { "target": "com.amazonaws.transfer#ServerId", "traits": { - "smithy.api#documentation": "A system-assigned unique identifier for a server instance. This is the specific server\n that you added your user to.
", + "smithy.api#documentation": "A system-assigned unique identifier for a server instance. This is the specific server that you added your user to.
", "smithy.api#required": {} } }, @@ -1276,19 +1276,19 @@ "target": "com.amazonaws.transfer#SshPublicKeyBody", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "The public portion of the Secure Shell (SSH) key used to authenticate the user to the\n server.
\nThe three standard SSH public key format elements are <key type>,\n <body base64>, and an optional <comment>, with spaces\n between each element.
Transfer Family accepts RSA, ECDSA, and ED25519 keys.
\nFor RSA keys, the key type is ssh-rsa.
For ED25519 keys, the key type is ssh-ed25519.
For ECDSA keys, the key type is either ecdsa-sha2-nistp256,\n ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521, depending on the\n size of the key you generated.
The public portion of the Secure Shell (SSH) key used to authenticate the user to the server.
The three standard SSH public key format elements are <key type>, <body base64>, and an optional <comment>, with spaces between each element.
Transfer Family accepts RSA, ECDSA, and ED25519 keys.
For RSA keys, the key type is ssh-rsa.
For ED25519 keys, the key type is ssh-ed25519.
For ECDSA keys, the key type is either ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521, depending on the size of the key you generated.
Key-value pairs that can be used to group and search for users. Tags are metadata attached\n to users for any purpose.
" + "smithy.api#documentation": "Key-value pairs that can be used to group and search for users. Tags are metadata attached to users for any purpose.
" } }, "UserName": { "target": "com.amazonaws.transfer#UserName", "traits": { - "smithy.api#documentation": "A unique string that identifies a user and is associated with a ServerId. This user name must be a minimum of 3 and a maximum of 100 characters\n long. The following are valid characters: a-z, A-Z, 0-9, underscore '_', hyphen\n '-', period '.', and at sign '@'. The user name can't start\n with a hyphen, period, or at sign.
A unique string that identifies a user and is associated with a ServerId. This user name must be a minimum of 3 and a maximum of 100 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore '_', hyphen '-', period '.', and at sign '@'. The user name can't start with a hyphen, period, or at sign.
You can provide a structure that contains the details for the identity provider to use with your web app.
", + "smithy.api#documentation": "You can provide a structure that contains the details for the identity provider to use with your web app.
For more details about this parameter, see Configure your identity provider for Transfer Family web apps.
", "smithy.api#required": {} } }, "AccessEndpoint": { "target": "com.amazonaws.transfer#WebAppAccessEndpoint", "traits": { - "smithy.api#documentation": "The AccessEndpoint is the URL that you provide to your users for them to interact with the Transfer Family web app. You can specify a custom URL or use the default value.
The AccessEndpoint is the URL that you provide to your users for them to interact with the Transfer Family web app. You can specify a custom URL or use the default value.
Before you enter a custom URL for this parameter, follow the steps described in Update your access endpoint with a custom URL.
" } }, "WebAppUnits": { @@ -1386,6 +1386,12 @@ "traits": { "smithy.api#documentation": "Key-value pairs that can be used to group and search for web apps.
" } + }, + "WebAppEndpointPolicy": { + "target": "com.amazonaws.transfer#WebAppEndpointPolicy", + "traits": { + "smithy.api#documentation": " Setting for the type of endpoint policy for the web app. The default value is STANDARD.
If you are creating the web app in an Amazon Web Services GovCloud (US) Region, you can set this parameter to FIPS.
\n Allows you to create a workflow with specified steps and step details the workflow invokes after file transfer completes.\n After creating a workflow, you can associate the workflow created with any transfer servers by specifying the workflow-details field in CreateServer and UpdateServer operations.\n
Allows you to create a workflow with specified steps and step details the workflow invokes after file transfer completes. After creating a workflow, you can associate the workflow created with any transfer servers by specifying the workflow-details field in CreateServer and UpdateServer operations.
Specifies the details for the steps that are in the specified workflow.
\n\n The TYPE specifies which of the following actions is being taken for this step.\n
\n \n COPY\n - Copy the file to another location.
\n \n CUSTOM\n - Perform a custom step with an Lambda function target.
\n \n DECRYPT\n - Decrypt a file that was encrypted before it was uploaded.
\n \n DELETE\n - Delete the file.
\n \n TAG\n - Add a tag to the file.
\n Currently, copying and tagging are supported only on S3.\n
\nFor file location, you specify either the Amazon S3 bucket and key, or the Amazon EFS file system ID\n and path.
", + "smithy.api#documentation": "Specifies the details for the steps that are in the specified workflow.
The TYPE specifies which of the following actions is being taken for this step.
COPY - Copy the file to another location.
CUSTOM - Perform a custom step with an Lambda function target.
DECRYPT - Decrypt a file that was encrypted before it was uploaded.
DELETE - Delete the file.
TAG - Add a tag to the file.
Currently, copying and tagging are supported only on S3.
For file location, you specify either the Amazon S3 bucket and key, or the Amazon EFS file system ID and path.
", "smithy.api#required": {} } }, "OnExceptionSteps": { "target": "com.amazonaws.transfer#WorkflowSteps", "traits": { - "smithy.api#documentation": "Specifies the steps (actions) to take if errors are encountered during execution of the workflow.
\nFor custom steps, the Lambda function needs to send FAILURE to the call\n back API to kick off the exception steps. Additionally, if the Lambda does not send\n SUCCESS before it times out, the exception steps are executed.
Specifies the steps (actions) to take if errors are encountered during execution of the workflow.
For custom steps, the Lambda function needs to send FAILURE to the call back API to kick off the exception steps. Additionally, if the Lambda does not send SUCCESS before it times out, the exception steps are executed.
Key-value pairs that can be used to group and search for workflows. Tags are metadata attached\n to workflows for any purpose.
" + "smithy.api#documentation": "Key-value pairs that can be used to group and search for workflows. Tags are metadata attached to workflows for any purpose.
" } } }, @@ -1559,7 +1565,7 @@ "SourceFileLocation": { "target": "com.amazonaws.transfer#SourceFileLocation", "traits": { - "smithy.api#documentation": "Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file\n for the workflow.
\nTo use the previous file as the input, enter ${previous.file}.\n In this case, this workflow step uses the output file from the previous workflow step as input.\n This is the default value.
To use the originally uploaded file location as input for this step, enter ${original.file}.
Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow.
To use the previous file as the input, enter ${previous.file}. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value.
To use the originally uploaded file location as input for this step, enter ${original.file}.
Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file\n for the workflow.
\nTo use the previous file as the input, enter ${previous.file}.\n In this case, this workflow step uses the output file from the previous workflow step as input.\n This is the default value.
To use the originally uploaded file location as input for this step, enter ${original.file}.
Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow.
To use the previous file as the input, enter ${previous.file}. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value.
To use the originally uploaded file location as input for this step, enter ${original.file}.
A flag that indicates whether to overwrite an existing file of the same name.\n The default is FALSE.
If the workflow is processing a file that has the same name as an existing file, the behavior is as follows:
\nIf OverwriteExisting is TRUE, the existing file is replaced with the file being processed.
If OverwriteExisting is FALSE, nothing happens, and the workflow processing stops.
A flag that indicates whether to overwrite an existing file of the same name. The default is FALSE.
If the workflow is processing a file that has the same name as an existing file, the behavior is as follows:
If OverwriteExisting is TRUE, the existing file is replaced with the file being processed.
If OverwriteExisting is FALSE, nothing happens, and the workflow processing stops.
Specifies the location for the file being decrypted. Use ${Transfer:UserName} or\n ${Transfer:UploadDate} in this field to parametrize the destination prefix by\n username or uploaded date.
Set the value of DestinationFileLocation to\n ${Transfer:UserName} to decrypt uploaded files to an Amazon S3 bucket\n that is prefixed with the name of the Transfer Family user that uploaded the\n file.
Set the value of DestinationFileLocation to ${Transfer:UploadDate} to decrypt uploaded files to \n an Amazon S3 bucket that is prefixed with the date of the upload.
The system resolves UploadDate to a date format of YYYY-MM-DD, based on the date the file\n is uploaded in UTC.
Specifies the location for the file being decrypted. Use ${Transfer:UserName} or ${Transfer:UploadDate} in this field to parametrize the destination prefix by username or uploaded date.
Set the value of DestinationFileLocation to ${Transfer:UserName} to decrypt uploaded files to an Amazon S3 bucket that is prefixed with the name of the Transfer Family user that uploaded the file.
Set the value of DestinationFileLocation to ${Transfer:UploadDate} to decrypt uploaded files to an Amazon S3 bucket that is prefixed with the date of the upload.
The system resolves UploadDate to a date format of YYYY-MM-DD, based on the date the file is uploaded in UTC.
Allows you to delete the access specified in the ServerID and\n ExternalID parameters.
Allows you to delete the access specified in the ServerID and ExternalID parameters.
A unique identifier that is required to identify specific groups within your directory.\n The users of the group that you associate have access to your Amazon S3 or Amazon EFS\n resources over the enabled protocols using Transfer Family. If you know the group name,\n you can view the SID values by running the following command using Windows PowerShell.
\n\n Get-ADGroup -Filter {samAccountName -like \"YourGroupName*\"} -Properties * | Select SamAccountName,ObjectSid\n
In that command, replace YourGroupName with the name of your Active Directory group.
\nThe regular expression used to validate this parameter is a string of characters consisting of uppercase and lowercase alphanumeric characters with no spaces.\n You can also include underscores or any of the following characters: =,.@:/-
", + "smithy.api#documentation": "A unique identifier that is required to identify specific groups within your directory. The users of the group that you associate have access to your Amazon S3 or Amazon EFS resources over the enabled protocols using Transfer Family. If you know the group name, you can view the SID values by running the following command using Windows PowerShell.
Get-ADGroup -Filter {samAccountName -like \"YourGroupName*\"} -Properties * | Select SamAccountName,ObjectSid
In that command, replace YourGroupName with the name of your Active Directory group.
The regular expression used to validate this parameter is a string of characters consisting of uppercase and lowercase alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@:/-
", "smithy.api#required": {} } } @@ -1766,7 +1772,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes the certificate that's specified in the CertificateId\n parameter.
Deletes the certificate that's specified in the CertificateId parameter.
Deletes the file transfer protocol-enabled server that you specify.
\nNo response returns from this operation.
", + "smithy.api#documentation": "Deletes the file transfer protocol-enabled server that you specify.
No response returns from this operation.
", "smithy.api#idempotent": {} } }, @@ -2015,7 +2031,7 @@ "ServerId": { "target": "com.amazonaws.transfer#ServerId", "traits": { - "smithy.api#documentation": "A system-assigned unique identifier for a file transfer protocol-enabled server instance\n that has the user assigned to it.
", + "smithy.api#documentation": "A system-assigned unique identifier for a file transfer protocol-enabled server instance that has the user assigned to it.
", "smithy.api#required": {} } }, @@ -2050,7 +2066,7 @@ "SourceFileLocation": { "target": "com.amazonaws.transfer#SourceFileLocation", "traits": { - "smithy.api#documentation": "Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file\n for the workflow.
\nTo use the previous file as the input, enter ${previous.file}.\n In this case, this workflow step uses the output file from the previous workflow step as input.\n This is the default value.
To use the originally uploaded file location as input for this step, enter ${original.file}.
Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow.
To use the previous file as the input, enter ${previous.file}. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value.
To use the originally uploaded file location as input for this step, enter ${original.file}.
Deletes the user belonging to a file transfer protocol-enabled server you specify.
\nNo response returns from this operation.
\nWhen you delete a user from a server, the user's information is lost.
\nDeletes the user belonging to a file transfer protocol-enabled server you specify.
No response returns from this operation.
When you delete a user from a server, the user's information is lost.
A system-assigned unique identifier for a server instance that has the user assigned to\n it.
", + "smithy.api#documentation": "A system-assigned unique identifier for a server instance that has the user assigned to it.
", "smithy.api#required": {} } }, @@ -2296,7 +2312,7 @@ } ], "traits": { - "smithy.api#documentation": "Describes the access that is assigned to the specific file transfer protocol-enabled\n server, as identified by its ServerId property and its\n ExternalId.
The response from this call returns the properties of the access that is associated with\n the ServerId value that was specified.
Describes the access that is assigned to the specific file transfer protocol-enabled server, as identified by its ServerId property and its ExternalId.
The response from this call returns the properties of the access that is associated with the ServerId value that was specified.
A unique identifier that is required to identify specific groups within your directory.\n The users of the group that you associate have access to your Amazon S3 or Amazon EFS\n resources over the enabled protocols using Transfer Family. If you know the group name,\n you can view the SID values by running the following command using Windows PowerShell.
\n\n Get-ADGroup -Filter {samAccountName -like \"YourGroupName*\"} -Properties * | Select SamAccountName,ObjectSid\n
In that command, replace YourGroupName with the name of your Active Directory group.
\nThe regular expression used to validate this parameter is a string of characters consisting of uppercase and lowercase alphanumeric characters with no spaces.\n You can also include underscores or any of the following characters: =,.@:/-
", + "smithy.api#documentation": "A unique identifier that is required to identify specific groups within your directory. The users of the group that you associate have access to your Amazon S3 or Amazon EFS resources over the enabled protocols using Transfer Family. If you know the group name, you can view the SID values by running the following command using Windows PowerShell.
Get-ADGroup -Filter {samAccountName -like \"YourGroupName*\"} -Properties * | Select SamAccountName,ObjectSid
In that command, replace YourGroupName with the name of your Active Directory group.
The regular expression used to validate this parameter is a string of characters consisting of uppercase and lowercase alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@:/-
", "smithy.api#required": {} } } @@ -2400,7 +2416,7 @@ "target": "com.amazonaws.transfer#DescribedAgreement", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "The details for the specified agreement, returned as a DescribedAgreement\n object.
The details for the specified agreement, returned as a DescribedAgreement object.
Describes the connector that's identified by the ConnectorId.\n
Describes the connector that's identified by the ConnectorId.
You can use DescribeExecution to check the details of the execution of the specified workflow.
This API call only returns details for in-progress workflows.
\n\n If you provide an ID for an execution that is not in progress, or if the execution doesn't match the specified workflow ID, you receive a\n ResourceNotFound exception.
You can use DescribeExecution to check the details of the execution of the specified workflow.
This API call only returns details for in-progress workflows.
If you provide an ID for an execution that is not in progress, or if the execution doesn't match the specified workflow ID, you receive a ResourceNotFound exception.
Describes the security policy that is attached to your server or SFTP connector. The response contains a description of the security policy's properties. For more\n information about security policies, see Working with security\n policies for servers or Working with security\n policies for SFTP connectors.
", + "smithy.api#documentation": "Describes the security policy that is attached to your server or SFTP connector. The response contains a description of the security policy's properties. For more information about security policies, see Working with security policies for servers or Working with security policies for SFTP connectors.
", "smithy.api#readonly": {} } }, @@ -2804,7 +2820,7 @@ "transfer:DescribeServer" ] }, - "smithy.api#documentation": "Describes a file transfer protocol-enabled server that you specify by passing the\n ServerId parameter.
The response contains a description of a server's properties. When you set\n EndpointType to VPC, the response will contain the\n EndpointDetails.
Describes a file transfer protocol-enabled server that you specify by passing the ServerId parameter.
The response contains a description of a server's properties. When you set EndpointType to VPC, the response will contain the EndpointDetails.
An array containing the properties of a server with the ServerID you\n specified.
An array containing the properties of a server with the ServerID you specified.
Describes the user assigned to the specific file transfer protocol-enabled server, as\n identified by its ServerId property.
The response from this call returns the properties of the user associated with the\n ServerId value that was specified.
Describes the user assigned to the specific file transfer protocol-enabled server, as identified by its ServerId property.
The response from this call returns the properties of the user associated with the ServerId value that was specified.
The name of the user assigned to one or more servers. User names are part of the sign-in\n credentials to use the Transfer Family service and perform file transfer tasks.
", + "smithy.api#documentation": "The name of the user assigned to one or more servers. User names are part of the sign-in credentials to use the Transfer Family service and perform file transfer tasks.
", "smithy.api#required": {} } } @@ -2961,7 +2977,7 @@ "target": "com.amazonaws.transfer#DescribedUser", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "An array containing the properties of the Transfer Family user for the ServerID value\n that you specified.
An array containing the properties of the Transfer Family user for the ServerID value that you specified.
The landing directory (folder) for a user when they log in to the server using the client.
\nA HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
The landing directory (folder) for a user when they log in to the server using the client.
A HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should\n be visible to your user and how you want to make them visible. You must specify the\n Entry and Target pair, where Entry shows how the path\n is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you\n only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) \n role provides access to paths in Target. This value\n can be set only when HomeDirectoryType is set to\n LOGICAL.
In most cases, you can use this value instead of the session policy to lock down the\n associated access to the designated home directory (\"chroot\"). To do this, you\n can set Entry to '/' and set Target to the\n HomeDirectory parameter value.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target. This value can be set only when HomeDirectoryType is set to LOGICAL.
In most cases, you can use this value instead of the session policy to lock down the associated access to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server.\n If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer \n protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for \n how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings,\n using the HomeDirectoryMappings parameter. If, on the other hand,\n HomeDirectoryType is PATH, you provide an absolute path\n using the HomeDirectory parameter. You cannot have both\n HomeDirectory and HomeDirectoryMappings in your\n template.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server. If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings, using the HomeDirectoryMappings parameter. If, on the other hand, HomeDirectoryType is PATH, you provide an absolute path using the HomeDirectory parameter. You cannot have both HomeDirectory and HomeDirectoryMappings in your template.
A session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's\n access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName},\n ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.
A session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 \n bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users \n when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust \n relationship that allows the server to access your resources when servicing your users' transfer requests.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.
" } }, "ExternalId": { "target": "com.amazonaws.transfer#ExternalId", "traits": { - "smithy.api#documentation": "A unique identifier that is required to identify specific groups within your directory.\n The users of the group that you associate have access to your Amazon S3 or Amazon EFS\n resources over the enabled protocols using Transfer Family. If you know the group name,\n you can view the SID values by running the following command using Windows PowerShell.
\n\n Get-ADGroup -Filter {samAccountName -like \"YourGroupName*\"} -Properties * | Select SamAccountName,ObjectSid\n
In that command, replace YourGroupName with the name of your Active Directory group.
\nThe regular expression used to validate this parameter is a string of characters consisting of uppercase and lowercase alphanumeric characters with no spaces.\n You can also include underscores or any of the following characters: =,.@:/-
" + "smithy.api#documentation": "A unique identifier that is required to identify specific groups within your directory. The users of the group that you associate have access to your Amazon S3 or Amazon EFS resources over the enabled protocols using Transfer Family. If you know the group name, you can view the SID values by running the following command using Windows PowerShell.
Get-ADGroup -Filter {samAccountName -like \"YourGroupName*\"} -Properties * | Select SamAccountName,ObjectSid
In that command, replace YourGroupName with the name of your Active Directory group.
The regular expression used to validate this parameter is a string of characters consisting of uppercase and lowercase alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@:/-
" } } }, @@ -3246,13 +3262,13 @@ "Status": { "target": "com.amazonaws.transfer#AgreementStatusType", "traits": { - "smithy.api#documentation": "The current status of the agreement, either ACTIVE or\n INACTIVE.
The current status of the agreement, either ACTIVE or INACTIVE.
A system-assigned unique identifier for a server instance. This identifier indicates the\n specific server that the agreement uses.
" + "smithy.api#documentation": "A system-assigned unique identifier for a server instance. This identifier indicates the specific server that the agreement uses.
" } }, "LocalProfileId": { @@ -3270,13 +3286,13 @@ "BaseDirectory": { "target": "com.amazonaws.transfer#HomeDirectory", "traits": { - "smithy.api#documentation": "The landing directory (folder) for files that are transferred by using the AS2\n protocol.
" + "smithy.api#documentation": "The landing directory (folder) for files that are transferred by using the AS2 protocol.
" } }, "AccessRole": { "target": "com.amazonaws.transfer#Role", "traits": { - "smithy.api#documentation": "Connectors are used to send files using either the AS2 or SFTP protocol. For the access role,\n provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.
\n\n For AS2 connectors\n
\nWith AS2, you can send files by calling StartFileTransfer and specifying the\n file paths in the request parameter, SendFilePaths. We use the file’s parent\n directory (for example, for --send-file-paths /bucket/dir/file.txt, parent\n directory is /bucket/dir/) to temporarily store a processed AS2 message file,\n store the MDN when we receive them from the partner, and write a final JSON file containing\n relevant metadata of the transmission. So, the AccessRole needs to provide read\n and write access to the parent directory of the file location used in the\n StartFileTransfer request. Additionally, you need to provide read and write\n access to the parent directory of the files that you intend to send with\n StartFileTransfer.
If you are using Basic authentication for your AS2 connector, the access role requires the\n secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using\n a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also\n needs the kms:Decrypt permission for that key.
\n For SFTP connectors\n
\nMake sure that the access role provides\n read and write access to the parent directory of the file location\n that's used in the StartFileTransfer request.\n Additionally, make sure that the role provides\n secretsmanager:GetSecretValue permission to Secrets Manager.
Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.
For AS2 connectors
With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.
If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.
For SFTP connectors
Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager.
\n Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload\n filename when saving it.\n
\n\n ENABLED: the filename provided by your trading parter is preserved when the file is saved.
\n DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as\n described in File names and locations.
Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it.
ENABLED: the filename provided by your trading parter is preserved when the file is saved.
DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations.
\n Determines whether or not unsigned messages from your trading partners will be accepted.\n
\n\n ENABLED: Transfer Family rejects unsigned messages from your trading partner.
\n DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.
Determines whether or not unsigned messages from your trading partners will be accepted.
ENABLED: Transfer Family rejects unsigned messages from your trading partner.
DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.
A CustomDirectoriesType structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files.
Failed files
\nMDN files
\nPayload files
\nStatus files
\nTemporary files
\nA CustomDirectoriesType structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files.
Failed files
MDN files
Payload files
Status files
Temporary files
Specifies how this certificate is used. It can be used in the following ways:
\n\n SIGNING: For signing AS2 messages
\n ENCRYPTION: For encrypting AS2 messages
\n TLS: For securing AS2 communications sent over HTTPS
Specifies how this certificate is used. It can be used in the following ways:
SIGNING: For signing AS2 messages
ENCRYPTION: For encrypting AS2 messages
TLS: For securing AS2 communications sent over HTTPS
Currently, the only available status is ACTIVE: all other values are reserved for future use.
A certificate's status can be either ACTIVE or INACTIVE.
You can set ActiveDate and InactiveDate in the UpdateCertificate call. If you set values for these parameters, those values are used to determine whether the certificate has a status of ACTIVE or INACTIVE.
If you don't set values for ActiveDate and InactiveDate, we use the NotBefore and NotAfter date as specified on the X509 certificate to determine when a certificate is active and when it is inactive.
An optional date that specifies when the certificate becomes active.
" + "smithy.api#documentation": "An optional date that specifies when the certificate becomes active. If you do not specify a value, ActiveDate takes the same value as NotBeforeDate, which is specified by the CA.
An optional date that specifies when the certificate becomes inactive.
" + "smithy.api#documentation": "An optional date that specifies when the certificate becomes inactive. If you do not specify a value, InactiveDate takes the same value as NotAfterDate, which is specified by the CA.
The final date that the certificate is\n valid.
" + "smithy.api#documentation": "The final date that the certificate is valid.
" } }, "Type": { @@ -3433,13 +3449,13 @@ "AccessRole": { "target": "com.amazonaws.transfer#Role", "traits": { - "smithy.api#documentation": "Connectors are used to send files using either the AS2 or SFTP protocol. For the access role,\n provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.
\n\n For AS2 connectors\n
\nWith AS2, you can send files by calling StartFileTransfer and specifying the\n file paths in the request parameter, SendFilePaths. We use the file’s parent\n directory (for example, for --send-file-paths /bucket/dir/file.txt, parent\n directory is /bucket/dir/) to temporarily store a processed AS2 message file,\n store the MDN when we receive them from the partner, and write a final JSON file containing\n relevant metadata of the transmission. So, the AccessRole needs to provide read\n and write access to the parent directory of the file location used in the\n StartFileTransfer request. Additionally, you need to provide read and write\n access to the parent directory of the files that you intend to send with\n StartFileTransfer.
If you are using Basic authentication for your AS2 connector, the access role requires the\n secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using\n a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also\n needs the kms:Decrypt permission for that key.
\n For SFTP connectors\n
\nMake sure that the access role provides\n read and write access to the parent directory of the file location\n that's used in the StartFileTransfer request.\n Additionally, make sure that the role provides\n secretsmanager:GetSecretValue permission to Secrets Manager.
Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.
For AS2 connectors
With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.
If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.
For SFTP connectors
Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn\n on CloudWatch logging for Amazon S3 events. When set, you can view connector\n activity in your CloudWatch logs.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn on CloudWatch logging for Amazon S3 events. When set, you can view connector activity in your CloudWatch logs.
" } }, "Tags": { @@ -3468,7 +3484,7 @@ } }, "traits": { - "smithy.api#documentation": "Describes the parameters for the connector, as identified by the\n ConnectorId.
Describes the parameters for the connector, as identified by the ConnectorId.
A structure that describes the Amazon S3 or EFS file location.\n This is the file location when the execution begins: if the file is being copied,\n this is the initial (as opposed to destination) file location.
" + "smithy.api#documentation": "A structure that describes the Amazon S3 or EFS file location. This is the file location when the execution begins: if the file is being copied, this is the initial (as opposed to destination) file location.
" } }, "ServiceMetadata": { @@ -3510,13 +3526,13 @@ "Status": { "target": "com.amazonaws.transfer#ExecutionStatus", "traits": { - "smithy.api#documentation": "The status is one of the execution. Can be in progress, completed, exception encountered, or handling the exception.\n
" + "smithy.api#documentation": "The status is one of the execution. Can be in progress, completed, exception encountered, or handling the exception.
" } }, "Results": { "target": "com.amazonaws.transfer#ExecutionResults", "traits": { - "smithy.api#documentation": "A structure that describes the execution results. This includes a list of the steps along with the details of each step,\n error type and message (if any), and the OnExceptionSteps structure.
A structure that describes the execution results. This includes a list of the steps along with the details of each step, error type and message (if any), and the OnExceptionSteps structure.
The encryption algorithm that is used for the host key. The Type parameter is specified by using one of the\n following values:
\n ssh-rsa\n
\n ssh-ed25519\n
\n ecdsa-sha2-nistp256\n
\n ecdsa-sha2-nistp384\n
\n ecdsa-sha2-nistp521\n
The encryption algorithm that is used for the host key. The Type parameter is specified by using one of the following values:
ssh-rsa
ssh-ed25519
ecdsa-sha2-nistp256
ecdsa-sha2-nistp384
ecdsa-sha2-nistp521
Indicates whether to list only LOCAL type profiles or only PARTNER type profiles. \n If not supplied in the request, the command lists all types of profiles.
Indicates whether to list only LOCAL type profiles or only PARTNER type profiles. If not supplied in the request, the command lists all types of profiles.
The As2Id is the AS2-name, as defined in the \n RFC 4130. For inbound transfers, this is the AS2-From header for the AS2 messages\n sent from the partner. For outbound connectors, this is the AS2-To header for the\n AS2 messages sent to the partner using the StartFileTransfer API operation. This ID cannot include spaces.
The As2Id is the AS2-name, as defined in the RFC 4130. For inbound transfers, this is the AS2-From header for the AS2 messages sent from the partner. For outbound connectors, this is the AS2-To header for the AS2 messages sent to the partner using the StartFileTransfer API operation. This ID cannot include spaces.
Specifies whether this policy enables Federal Information Processing Standards (FIPS).\n This parameter applies to both server and connector security policies.
" + "smithy.api#documentation": "Specifies whether this policy enables Federal Information Processing Standards (FIPS). This parameter applies to both server and connector security policies.
" } }, "SecurityPolicyName": { @@ -3665,31 +3681,31 @@ "SshCiphers": { "target": "com.amazonaws.transfer#SecurityPolicyOptions", "traits": { - "smithy.api#documentation": "Lists the enabled Secure Shell (SSH) cipher encryption algorithms in the security policy\n that is attached to the server or connector. This parameter applies to both server and\n connector security policies.
" + "smithy.api#documentation": "Lists the enabled Secure Shell (SSH) cipher encryption algorithms in the security policy that is attached to the server or connector. This parameter applies to both server and connector security policies.
" } }, "SshKexs": { "target": "com.amazonaws.transfer#SecurityPolicyOptions", "traits": { - "smithy.api#documentation": "Lists the enabled SSH key exchange (KEX) encryption algorithms in the security policy that\n is attached to the server or connector. This parameter applies to both server and connector\n security policies.
" + "smithy.api#documentation": "Lists the enabled SSH key exchange (KEX) encryption algorithms in the security policy that is attached to the server or connector. This parameter applies to both server and connector security policies.
" } }, "SshMacs": { "target": "com.amazonaws.transfer#SecurityPolicyOptions", "traits": { - "smithy.api#documentation": "Lists the enabled SSH message authentication code (MAC) encryption algorithms in the\n security policy that is attached to the server or connector. This parameter applies to both\n server and connector security policies.
" + "smithy.api#documentation": "Lists the enabled SSH message authentication code (MAC) encryption algorithms in the security policy that is attached to the server or connector. This parameter applies to both server and connector security policies.
" } }, "TlsCiphers": { "target": "com.amazonaws.transfer#SecurityPolicyOptions", "traits": { - "smithy.api#documentation": "Lists the enabled Transport Layer Security (TLS) cipher encryption algorithms in the\n security policy that is attached to the server.
\nThis parameter only applies to security policies for servers.
\nLists the enabled Transport Layer Security (TLS) cipher encryption algorithms in the security policy that is attached to the server.
This parameter only applies to security policies for servers.
Lists the host key algorithms for the security policy.
\nThis parameter only applies to security policies for connectors.
\nLists the host key algorithms for the security policy.
This parameter only applies to security policies for connectors.
Describes the properties of a security policy that you specify. For more information\n about security policies, see Working with security\n policies for servers or Working with security\n policies for SFTP connectors.
" + "smithy.api#documentation": "Describes the properties of a security policy that you specify. For more information about security policies, see Working with security policies for servers or Working with security policies for SFTP connectors.
" } }, "com.amazonaws.transfer#DescribedServer": { @@ -3725,83 +3741,83 @@ "target": "com.amazonaws.transfer#Certificate", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "Specifies the ARN of the Amazon Web ServicesCertificate Manager (ACM) certificate. Required when\n Protocols is set to FTPS.
Specifies the ARN of the Amazon Web ServicesCertificate Manager (ACM) certificate. Required when Protocols is set to FTPS.
The protocol settings that are configured for your server.
\n\n To indicate passive mode (for FTP and FTPS protocols), use the PassiveIp parameter.\n Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.\n
To ignore the error that is generated when the client attempts to use the SETSTAT command on a file that you are \n uploading to an Amazon S3 bucket, use the SetStatOption parameter. To have the Transfer Family server ignore the \n SETSTAT command and upload files without needing to make any changes to your SFTP client, set the value to \n ENABLE_NO_OP. If you set the SetStatOption parameter to ENABLE_NO_OP, Transfer Family \n generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a SETSTAT \n call.
To determine whether your Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the \n TlsSessionResumptionMode parameter.
\n As2Transports indicates the transport method for the AS2 messages. Currently, only HTTP is supported.
The protocol settings that are configured for your server.
To indicate passive mode (for FTP and FTPS protocols), use the PassiveIp parameter. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.
To ignore the error that is generated when the client attempts to use the SETSTAT command on a file that you are uploading to an Amazon S3 bucket, use the SetStatOption parameter. To have the Transfer Family server ignore the SETSTAT command and upload files without needing to make any changes to your SFTP client, set the value to ENABLE_NO_OP. If you set the SetStatOption parameter to ENABLE_NO_OP, Transfer Family generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a SETSTAT call.
To determine whether your Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the TlsSessionResumptionMode parameter.
As2Transports indicates the transport method for the AS2 messages. Currently, only HTTP is supported.
Specifies the domain of the storage system that is used for file transfers. There are two domains\n available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The\n default value is S3.
" + "smithy.api#documentation": "Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3.
" } }, "EndpointDetails": { "target": "com.amazonaws.transfer#EndpointDetails", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "The virtual private cloud (VPC) endpoint settings that are configured for your server.\n When you host your endpoint within your VPC, you can make your endpoint accessible only to resources\n within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over\n the internet. Your VPC's default security groups are automatically assigned to your\n endpoint.
" + "smithy.api#documentation": "The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make your endpoint accessible only to resources within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint.
" } }, "EndpointType": { "target": "com.amazonaws.transfer#EndpointType", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "Defines the type of endpoint that your server is connected to. If your server is connected\n to a VPC endpoint, your server isn't accessible over the public internet.
" + "smithy.api#documentation": "Defines the type of endpoint that your server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet.
" } }, "HostKeyFingerprint": { "target": "com.amazonaws.transfer#HostKeyFingerprint", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "Specifies the Base64-encoded SHA256 fingerprint of the server's host key. This value\n is equivalent to the output of the ssh-keygen -l -f my-new-server-key\n command.
Specifies the Base64-encoded SHA256 fingerprint of the server's host key. This value is equivalent to the output of the ssh-keygen -l -f my-new-server-key command.
Specifies information to call a customer-supplied authentication API. This field is not\n populated when the IdentityProviderType of a server is\n AWS_DIRECTORY_SERVICE or SERVICE_MANAGED.
Specifies information to call a customer-supplied authentication API. This field is not populated when the IdentityProviderType of a server is AWS_DIRECTORY_SERVICE or SERVICE_MANAGED.
The mode of authentication for a server. The default value is\n SERVICE_MANAGED, which allows you to store and access user credentials within\n the Transfer Family service.
Use AWS_DIRECTORY_SERVICE to provide access to\n Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your\n on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to\n provide a Directory ID by using the IdentityProviderDetails parameter.
Use the API_GATEWAY value to integrate with an identity provider of your choosing. The\n API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call\n for authentication by using the IdentityProviderDetails parameter.
Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. \n If you choose this value, you must specify the ARN for the Lambda function in the Function parameter \n for the IdentityProviderDetails data type.
The mode of authentication for a server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the Transfer Family service.
Use AWS_DIRECTORY_SERVICE to provide access to Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to provide a Directory ID by using the IdentityProviderDetails parameter.
Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the IdentityProviderDetails parameter.
Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the Function parameter for the IdentityProviderDetails data type.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn\n on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in\n your CloudWatch logs.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFS events. When set, you can view user activity in your CloudWatch logs.
" } }, "PostAuthenticationLoginBanner": { "target": "com.amazonaws.transfer#PostAuthenticationLoginBanner", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "Specifies a string to display when users connect to a server. This string is displayed after the user authenticates.
\nThe SFTP protocol does not support post-authentication display banners.
\nSpecifies a string to display when users connect to a server. This string is displayed after the user authenticates.
The SFTP protocol does not support post-authentication display banners.
Specifies a string to display when users connect to a server. This string is displayed before the user authenticates.\n For example, the following banner displays details about using the system:
\n\n This system is for the use of authorized users only. Individuals using this computer system without authority,\n or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by\n system personnel.\n
Specifies a string to display when users connect to a server. This string is displayed before the user authenticates. For example, the following banner displays details about using the system:
This system is for the use of authorized users only. Individuals using this computer system without authority, or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by system personnel.
Specifies the file transfer protocol or protocols over which your file transfer protocol\n client can connect to your server's endpoint. The available protocols are:
\n\n SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over\n SSH
\n FTPS (File Transfer Protocol Secure): File transfer with TLS\n encryption
\n FTP (File Transfer Protocol): Unencrypted file transfer
\n AS2 (Applicability Statement 2): used for transporting structured business-to-business data
If you select FTPS, you must choose a certificate stored in Certificate Manager (ACM) \n which is used to identify your server when clients connect to it over\n FTPS.
If Protocol includes either FTP or FTPS, then the\n EndpointType must be VPC and the\n IdentityProviderType must be either AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY.
If Protocol includes FTP, then\n AddressAllocationIds cannot be associated.
If Protocol is set only to SFTP, the EndpointType\n can be set to PUBLIC and the IdentityProviderType can be set any of the supported identity types: \n SERVICE_MANAGED, AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY.
If Protocol includes AS2, then the\n EndpointType must be VPC, and domain must be Amazon S3.
Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:
SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH
FTPS (File Transfer Protocol Secure): File transfer with TLS encryption
FTP (File Transfer Protocol): Unencrypted file transfer
AS2 (Applicability Statement 2): used for transporting structured business-to-business data
If you select FTPS, you must choose a certificate stored in Certificate Manager (ACM) which is used to identify your server when clients connect to it over FTPS.
If Protocol includes either FTP or FTPS, then the EndpointType must be VPC and the IdentityProviderType must be either AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY.
If Protocol includes FTP, then AddressAllocationIds cannot be associated.
If Protocol is set only to SFTP, the EndpointType can be set to PUBLIC and the IdentityProviderType can be set any of the supported identity types: SERVICE_MANAGED, AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY.
If Protocol includes AS2, then the EndpointType must be VPC, and domain must be Amazon S3.
The condition of the server that was described. A value of\n ONLINE indicates that the server can accept jobs and transfer files. A\n State value of OFFLINE means that the server cannot perform file\n transfer operations.
The states of STARTING and STOPPING indicate that the server is\n in an intermediate state, either not fully able to respond, or not fully offline. The values\n of START_FAILED or STOP_FAILED can indicate an error\n condition.
The condition of the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.
The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.
Specifies the key-value pairs that you can use to search for and group servers that were\n assigned to the server that was described.
" + "smithy.api#documentation": "Specifies the key-value pairs that you can use to search for and group servers that were assigned to the server that was described.
" } }, "UserCount": { "target": "com.amazonaws.transfer#UserCount", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "Specifies the number of users that are assigned to a server you specified with the\n ServerId.
Specifies the number of users that are assigned to a server you specified with the ServerId.
Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
\nIn addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a\n workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects\n while the file is still being uploaded.
Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects while the file is still being uploaded.
Specifies the log groups to which your server logs are sent.
\nTo specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:
\n\n arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*\n
For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*\n
If you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty\n value for this parameter in an update-server call. For example:
\n update-server --server-id s-1234567890abcdef0 --structured-log-destinations\n
Specifies the log groups to which your server logs are sent.
To specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:
arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*
For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*
If you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an update-server call. For example:
update-server --server-id s-1234567890abcdef0 --structured-log-destinations
Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.
\nBy default, home directory mappings have a TYPE of DIRECTORY. If you enable this option, you would then need to explicitly set the HomeDirectoryMapEntry\n Type to FILE if you want a mapping to have a file target.
Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.
By default, home directory mappings have a TYPE of DIRECTORY. If you enable this option, you would then need to explicitly set the HomeDirectoryMapEntry Type to FILE if you want a mapping to have a file target.
The list of egress IP addresses of this server. These IP addresses are only relevant\n for servers that use the AS2 protocol. They are used for sending asynchronous MDNs.
\nThese IP addresses are assigned automatically when you create an AS2 server. Additionally,\n if you update an existing server and add the AS2 protocol, static IP addresses are assigned as well.
" + "smithy.api#documentation": "The list of egress IP addresses of this server. These IP addresses are only relevant for servers that use the AS2 protocol. They are used for sending asynchronous MDNs.
These IP addresses are assigned automatically when you create an AS2 server. Additionally, if you update an existing server and add the AS2 protocol, static IP addresses are assigned as well.
" } } }, "traits": { - "smithy.api#documentation": "Describes the properties of a file transfer protocol-enabled server that was\n specified.
" + "smithy.api#documentation": "Describes the properties of a file transfer protocol-enabled server that was specified.
" } }, "com.amazonaws.transfer#DescribedUser": { @@ -3879,7 +3895,7 @@ "traits": { "aws.cloudformation#cfnAdditionalIdentifier": {}, "aws.cloudformation#cfnMutability": "read", - "smithy.api#documentation": "Specifies the unique Amazon Resource Name (ARN) for the user that was requested to be\n described.
", + "smithy.api#documentation": "Specifies the unique Amazon Resource Name (ARN) for the user that was requested to be described.
", "smithy.api#required": {} } }, @@ -3887,62 +3903,62 @@ "target": "com.amazonaws.transfer#HomeDirectory", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "The landing directory (folder) for a user when they log in to the server using the client.
\nA HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
The landing directory (folder) for a user when they log in to the server using the client.
A HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should\n be visible to your user and how you want to make them visible. You must specify the\n Entry and Target pair, where Entry shows how the path\n is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you\n only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) \n role provides access to paths in Target. This value\n can be set only when HomeDirectoryType is set to\n LOGICAL.
In most cases, you can use this value instead of the session policy to lock your user\n down to the designated home directory (\"chroot\"). To do this, you can set\n Entry to '/' and set Target to the HomeDirectory\n parameter value.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target. This value can be set only when HomeDirectoryType is set to LOGICAL.
In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server.\n If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer \n protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for \n how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings,\n using the HomeDirectoryMappings parameter. If, on the other hand,\n HomeDirectoryType is PATH, you provide an absolute path\n using the HomeDirectory parameter. You cannot have both\n HomeDirectory and HomeDirectoryMappings in your\n template.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server. If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings, using the HomeDirectoryMappings parameter. If, on the other hand, HomeDirectoryType is PATH, you provide an absolute path using the HomeDirectory parameter. You cannot have both HomeDirectory and HomeDirectoryMappings in your template.
A session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's\n access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName},\n ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.
A session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.
Specifies the full POSIX identity, including user ID (Uid), group ID\n (Gid), and any secondary groups IDs (SecondaryGids), that controls\n your users' access to your Amazon Elastic File System (Amazon EFS) file systems. The POSIX\n permissions that are set on files and directories in your file system determine the level of\n access your users get when transferring files into and out of your Amazon EFS file\n systems.
Specifies the full POSIX identity, including user ID (Uid), group ID (Gid), and any secondary groups IDs (SecondaryGids), that controls your users' access to your Amazon Elastic File System (Amazon EFS) file systems. The POSIX permissions that are set on files and directories in your file system determine the level of access your users get when transferring files into and out of your Amazon EFS file systems.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 \n bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users \n when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust \n relationship that allows the server to access your resources when servicing your users' transfer requests.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.
" } }, "SshPublicKeys": { "target": "com.amazonaws.transfer#SshPublicKeys", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "Specifies the public key portion of the Secure Shell (SSH) keys stored for the described\n user.
" + "smithy.api#documentation": "Specifies the public key portion of the Secure Shell (SSH) keys stored for the described user.
To delete the public key body, set its value to zero keys, as shown here:
SshPublicKeys: []
Specifies the key-value pairs for the user requested. Tag can be used to search for and\n group users for a variety of purposes.
" + "smithy.api#documentation": "Specifies the key-value pairs for the user requested. Tag can be used to search for and group users for a variety of purposes.
" } }, "UserName": { "target": "com.amazonaws.transfer#UserName", "traits": { - "smithy.api#documentation": "Specifies the name of the user that was requested to be described. User names are used for\n authentication purposes. This is the string that will be used by your user when they log in to\n your server.
" + "smithy.api#documentation": "Specifies the name of the user that was requested to be described. User names are used for authentication purposes. This is the string that will be used by your user when they log in to your server.
" } } }, @@ -3996,6 +4012,12 @@ "traits": { "smithy.api#documentation": "Key-value pairs that can be used to group and search for web apps. Tags are metadata attached to web apps for any purpose.
" } + }, + "WebAppEndpointPolicy": { + "target": "com.amazonaws.transfer#WebAppEndpointPolicy", + "traits": { + "smithy.api#documentation": " Setting for the type of endpoint policy for the web app. The default value is STANDARD.
If your web app was created in an Amazon Web Services GovCloud (US) Region, the value of this parameter can be FIPS, which indicates the web app endpoint is FIPS-compliant.
Returns a icon file data string (in base64 encoding).
" + "smithy.api#documentation": "Returns an icon file data string (in base64 encoding).
" } } }, @@ -4181,7 +4203,7 @@ } }, "traits": { - "smithy.api#documentation": "Specifies the details for the file location for the file that's being used in the workflow. Only applicable if you are using Amazon Elastic File Systems\n (Amazon EFS) for storage.
\n\n
" + "smithy.api#documentation": "Specifies the details for the file location for the file that's being used in the workflow. Only applicable if you are using Amazon Elastic File Systems (Amazon EFS) for storage.
" } }, "com.amazonaws.transfer#EfsFileSystemId": { @@ -4256,36 +4278,36 @@ "AddressAllocationIds": { "target": "com.amazonaws.transfer#AddressAllocationIds", "traits": { - "smithy.api#documentation": "
A list of address allocation IDs that are required to attach an Elastic IP address to your\n server's endpoint.
\nAn address allocation ID corresponds to the allocation ID of an Elastic IP address. This\n value can be retrieved from the allocationId field from the Amazon EC2\n Address\n data type. One way to retrieve this value is by calling the EC2 DescribeAddresses API.
This parameter is optional. Set this parameter if you want to make your VPC endpoint\n public-facing. For details, see Create an internet-facing endpoint for your server.
\nThis property can only be set as follows:
\n\n EndpointType must be set to\n VPC\n
The Transfer Family server must be offline.
\nYou cannot set this parameter for Transfer Family servers that use the FTP protocol.
\nThe server must already have SubnetIds populated (SubnetIds and AddressAllocationIds cannot be updated simultaneously).
\n AddressAllocationIds can't contain duplicates, and must be equal in length to SubnetIds. For example,\n if you have three subnet IDs, you must also specify three address allocation IDs.
Call the UpdateServer API to set or change this parameter.
A list of address allocation IDs that are required to attach an Elastic IP address to your server's endpoint.
An address allocation ID corresponds to the allocation ID of an Elastic IP address. This value can be retrieved from the allocationId field from the Amazon EC2 Address data type. One way to retrieve this value is by calling the EC2 DescribeAddresses API.
This parameter is optional. Set this parameter if you want to make your VPC endpoint public-facing. For details, see Create an internet-facing endpoint for your server.
This property can only be set as follows:
EndpointType must be set to VPC
The Transfer Family server must be offline.
You cannot set this parameter for Transfer Family servers that use the FTP protocol.
The server must already have SubnetIds populated (SubnetIds and AddressAllocationIds cannot be updated simultaneously).
AddressAllocationIds can't contain duplicates, and must be equal in length to SubnetIds. For example, if you have three subnet IDs, you must also specify three address allocation IDs.
Call the UpdateServer API to set or change this parameter.
A list of subnet IDs that are required to host your server endpoint in your VPC.
\nThis property can only be set when EndpointType is set to\n VPC.
A list of subnet IDs that are required to host your server endpoint in your VPC.
This property can only be set when EndpointType is set to VPC.
The identifier of the VPC endpoint.
\nThis property can only be set when EndpointType is set to\n VPC_ENDPOINT.
For more information, see\n https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.
\nThe identifier of the VPC endpoint.
This property can only be set when EndpointType is set to VPC_ENDPOINT.
For more information, see https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.
The VPC identifier of the VPC in which a server's endpoint will be hosted.
\nThis property can only be set when EndpointType is set to\n VPC.
The VPC identifier of the VPC in which a server's endpoint will be hosted.
This property can only be set when EndpointType is set to VPC.
A list of security groups IDs that are available to attach to your server's\n endpoint.
\nThis property can only be set when EndpointType is set to\n VPC.
You can edit the SecurityGroupIds property in the UpdateServer API only if you are changing the EndpointType from\n PUBLIC or VPC_ENDPOINT to VPC. To change security\n groups associated with your server's VPC endpoint after creation, use the Amazon EC2\n ModifyVpcEndpoint API.
A list of security groups IDs that are available to attach to your server's endpoint.
This property can only be set when EndpointType is set to VPC.
You can edit the SecurityGroupIds property in the UpdateServer API only if you are changing the EndpointType from PUBLIC or VPC_ENDPOINT to VPC. To change security groups associated with your server's VPC endpoint after creation, use the Amazon EC2 ModifyVpcEndpoint API.
The virtual private cloud (VPC) endpoint settings that are configured for your file\n transfer protocol-enabled server. With a VPC endpoint, you can restrict access to your server\n and resources only within your VPC. To control incoming internet traffic, invoke the\n UpdateServer API and attach an Elastic IP address to your server's\n endpoint.
After May 19, 2021, you won't be able to create a server using\n EndpointType=VPC_ENDPOINT in your Amazon Web Services account if your account hasn't already\n done so before May 19, 2021. If you have already created servers with\n EndpointType=VPC_ENDPOINT in your Amazon Web Services account on or before May 19, 2021,\n you will not be affected. After this date, use\n EndpointType=VPC.
For more information, see\n https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.
\nIt is recommended that you use VPC as the EndpointType. With\n this endpoint type, you have the option to directly associate up to three Elastic IPv4\n addresses (BYO IP included) with your server's endpoint and use VPC security groups to\n restrict traffic by the client's public IP address. This is not possible with\n EndpointType set to VPC_ENDPOINT.
The virtual private cloud (VPC) endpoint settings that are configured for your file transfer protocol-enabled server. With a VPC endpoint, you can restrict access to your server and resources only within your VPC. To control incoming internet traffic, invoke the UpdateServer API and attach an Elastic IP address to your server's endpoint.
After May 19, 2021, you won't be able to create a server using EndpointType=VPC_ENDPOINT in your Amazon Web Services account if your account hasn't already done so before May 19, 2021. If you have already created servers with EndpointType=VPC_ENDPOINT in your Amazon Web Services account on or before May 19, 2021, you will not be affected. After this date, use EndpointType=VPC.
For more information, see https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.
It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT.
Specifies the error type.
\n\n ALREADY_EXISTS: occurs for a copy step, if the overwrite option is not selected and a file with the same name already exists in the target location.
\n BAD_REQUEST: a general bad request: for example, a step that attempts to\n tag an EFS file returns BAD_REQUEST, as only S3 files can be tagged.
\n CUSTOM_STEP_FAILED: occurs when the custom step provided a callback that indicates failure.
\n INTERNAL_SERVER_ERROR: a catch-all error that can occur for a variety of\n reasons.
\n NOT_FOUND: occurs when a requested entity, for example a source file for\n a copy step, does not exist.
\n PERMISSION_DENIED: occurs if your policy does not contain the correct\n permissions to complete one or more of the steps in the workflow.
\n TIMEOUT: occurs when the execution times out.
You can set the TimeoutSeconds for a custom step, anywhere from 1 second to 1800 seconds (30 minutes).
\n THROTTLED: occurs if you exceed the new execution refill rate of one\n workflow per second.
Specifies the error type.
ALREADY_EXISTS: occurs for a copy step, if the overwrite option is not selected and a file with the same name already exists in the target location.
BAD_REQUEST: a general bad request: for example, a step that attempts to tag an EFS file returns BAD_REQUEST, as only S3 files can be tagged.
CUSTOM_STEP_FAILED: occurs when the custom step provided a callback that indicates failure.
INTERNAL_SERVER_ERROR: a catch-all error that can occur for a variety of reasons.
NOT_FOUND: occurs when a requested entity, for example a source file for a copy step, does not exist.
PERMISSION_DENIED: occurs if your policy does not contain the correct permissions to complete one or more of the steps in the workflow.
TIMEOUT: occurs when the execution times out.
You can set the TimeoutSeconds for a custom step, anywhere from 1 second to 1800 seconds (30 minutes).
THROTTLED: occurs if you exceed the new execution refill rate of one workflow per second.
One of the available step types.
\n\n \n COPY\n - Copy the file to another location.
\n \n CUSTOM\n - Perform a custom step with an Lambda function target.
\n \n DECRYPT\n - Decrypt a file that was encrypted before it was uploaded.
\n \n DELETE\n - Delete the file.
\n \n TAG\n - Add a tag to the file.
One of the available step types.
COPY - Copy the file to another location.
CUSTOM - Perform a custom step with an Lambda function target.
DECRYPT - Decrypt a file that was encrypted before it was uploaded.
DELETE - Delete the file.
TAG - Add a tag to the file.
Specifies the details for an error, if it occurred during execution of the specified\n workflow step.
" + "smithy.api#documentation": "Specifies the details for an error, if it occurred during execution of the specified workflow step.
" } } }, @@ -4522,7 +4544,7 @@ "S3FileLocation": { "target": "com.amazonaws.transfer#S3FileLocation", "traits": { - "smithy.api#documentation": "Specifies the S3 details for the file being used, such as bucket, ETag, and so\n forth.
" + "smithy.api#documentation": "Specifies the S3 details for the file being used, such as bucket, ETag, and so forth.
" } }, "EfsFileLocation": { @@ -4601,12 +4623,12 @@ "Type": { "target": "com.amazonaws.transfer#MapType", "traits": { - "smithy.api#documentation": "Specifies the type of mapping. Set the type to FILE if you want the mapping to point to a file, or DIRECTORY for the directory to point to a directory.
By default, home directory mappings have a Type of DIRECTORY when you create a Transfer Family server. You would need to explicitly\n set Type to FILE if you want a mapping to have a file\n target.
Specifies the type of mapping. Set the type to FILE if you want the mapping to point to a file, or DIRECTORY for the directory to point to a directory.
By default, home directory mappings have a Type of DIRECTORY when you create a Transfer Family server. You would need to explicitly set Type to FILE if you want a mapping to have a file target.
Represents an object that contains entries and targets for\n HomeDirectoryMappings.
The following is an Entry and Target pair example for chroot.
\n [ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]\n
Represents an object that contains entries and targets for HomeDirectoryMappings.
The following is an Entry and Target pair example for chroot.
[ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
This parameter is only applicable if your IdentityProviderType is API_GATEWAY. Provides the type of InvocationRole used to authenticate the user\n account.
This parameter is only applicable if your IdentityProviderType is API_GATEWAY. Provides the type of InvocationRole used to authenticate the user account.
For SFTP-enabled servers, and for custom identity providers only, you\n can specify whether to authenticate using a password, SSH key pair, or both.
\n\n PASSWORD - users must provide their password to connect.
\n PUBLIC_KEY - users must provide their private key to connect.
\n PUBLIC_KEY_OR_PASSWORD - users can authenticate with either their password or their key. This is the default value.
\n PUBLIC_KEY_AND_PASSWORD - users must provide both their private key and their password to connect.\n The server checks the key first, and then if the key is valid, the system prompts for a password. \n If the private key provided does not match the public key that is stored, authentication fails.
For SFTP-enabled servers, and for custom identity providers only, you can specify whether to authenticate using a password, SSH key pair, or both.
PASSWORD - users must provide their password to connect.
PUBLIC_KEY - users must provide their private key to connect.
PUBLIC_KEY_OR_PASSWORD - users can authenticate with either their password or their key. This is the default value.
PUBLIC_KEY_AND_PASSWORD - users must provide both their private key and their password to connect. The server checks the key first, and then if the key is valid, the system prompts for a password. If the private key provided does not match the public key that is stored, authentication fails.
Returns information related to the type of user authentication that is in use for a file\n transfer protocol-enabled server's users. A server can have only one method of\n authentication.
" + "smithy.api#documentation": "Returns information related to the type of user authentication that is in use for a file transfer protocol-enabled server's users. A server can have only one method of authentication.
" } }, "com.amazonaws.transfer#IdentityProviderType": { @@ -4781,7 +4803,7 @@ } }, "traits": { - "smithy.api#documentation": "The mode of authentication for a server. The default value is\n SERVICE_MANAGED, which allows you to store and access user credentials within\n the Transfer Family service.
Use AWS_DIRECTORY_SERVICE to provide access to\n Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your\n on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to\n provide a Directory ID by using the IdentityProviderDetails parameter.
Use the API_GATEWAY value to integrate with an identity provider of your choosing. The\n API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call\n for authentication by using the IdentityProviderDetails parameter.
Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. \n If you choose this value, you must specify the ARN for the Lambda function in the Function parameter \n for the IdentityProviderDetails data type.
The mode of authentication for a server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the Transfer Family service.
Use AWS_DIRECTORY_SERVICE to provide access to Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to provide a Directory ID by using the IdentityProviderDetails parameter.
Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the IdentityProviderDetails parameter.
Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the Function parameter for the IdentityProviderDetails data type.
Imports the signing and encryption certificates that you need to create local (AS2)\n profiles and partner\n profiles.
" + "smithy.api#documentation": "Imports the signing and encryption certificates that you need to create local (AS2) profiles and partner profiles.
You can import both the certificate and its chain in the Certificate parameter.
If you use the Certificate parameter to upload both the certificate and its chain, don't use the CertificateChain parameter.
Specifies how this certificate is used. It can be used in the following ways:
\n\n SIGNING: For signing AS2 messages
\n ENCRYPTION: For encrypting AS2 messages
\n TLS: For securing AS2 communications sent over HTTPS
Specifies how this certificate is used. It can be used in the following ways:
SIGNING: For signing AS2 messages
ENCRYPTION: For encrypting AS2 messages
TLS: For securing AS2 communications sent over HTTPS
For the CLI, provide a file path for a certificate in URI format. For example, --certificate file://encryption-cert.pem.\n Alternatively, you can provide the raw content.
For the SDK, specify the raw content of a certificate file. For example, --certificate \"`cat encryption-cert.pem`\".
For the CLI, provide a file path for a certificate in URI format. For example, --certificate file://encryption-cert.pem. Alternatively, you can provide the raw content.
For the SDK, specify the raw content of a certificate file. For example, --certificate \"`cat encryption-cert.pem`\".
You can provide both the certificate and its chain in this parameter, without needing to use the CertificateChain parameter. If you use this parameter for both the certificate and its chain, do not use the CertificateChain parameter.
An optional list of certificates that make up the chain for the certificate that's being\n imported.
" + "smithy.api#documentation": "An optional list of certificates that make up the chain for the certificate that's being imported.
" } }, "PrivateKey": { "target": "com.amazonaws.transfer#PrivateKeyType", "traits": { - "smithy.api#documentation": "For the CLI, provide a file path for a private key in URI format.For example, --private-key file://encryption-key.pem.\n Alternatively, you can provide the raw content of the private key file.
For the SDK, specify the raw content of a private key file. For example, --private-key \"`cat encryption-key.pem`\"\n
For the CLI, provide a file path for a private key in URI format. For example, --private-key file://encryption-key.pem. Alternatively, you can provide the raw content of the private key file.
For the SDK, specify the raw content of a private key file. For example, --private-key \"`cat encryption-key.pem`\"
An optional date that specifies when the certificate becomes active.
" + "smithy.api#documentation": "An optional date that specifies when the certificate becomes active. If you do not specify a value, ActiveDate takes the same value as NotBeforeDate, which is specified by the CA.
An optional date that specifies when the certificate becomes inactive.
" + "smithy.api#documentation": "An optional date that specifies when the certificate becomes inactive. If you do not specify a value, InactiveDate takes the same value as NotAfterDate, which is specified by the CA.
Adds a host key to the server that's specified by the ServerId\n parameter.
Adds a host key to the server that's specified by the ServerId parameter.
The private key portion of an SSH key pair.
\nTransfer Family accepts RSA, ECDSA, and ED25519 keys.
", + "smithy.api#documentation": "The private key portion of an SSH key pair.
Transfer Family accepts RSA, ECDSA, and ED25519 keys.
", "smithy.api#required": {} } }, @@ -5005,7 +5027,7 @@ } ], "traits": { - "smithy.api#documentation": "Adds a Secure Shell (SSH) public key to a Transfer Family user identified by a\n UserName value assigned to the specific file transfer protocol-enabled server,\n identified by ServerId.
The response returns the UserName value, the ServerId value, and\n the name of the SshPublicKeyId.
Adds a Secure Shell (SSH) public key to a Transfer Family user identified by a UserName value assigned to the specific file transfer protocol-enabled server, identified by ServerId.
The response returns the UserName value, the ServerId value, and the name of the SshPublicKeyId.
The public key portion of an SSH key pair.
\nTransfer Family accepts RSA, ECDSA, and ED25519 keys.
", + "smithy.api#documentation": "The public key portion of an SSH key pair.
Transfer Family accepts RSA, ECDSA, and ED25519 keys.
", "smithy.api#required": {} } }, @@ -5063,7 +5085,7 @@ } }, "traits": { - "smithy.api#documentation": "Identifies the user, the server they belong to, and the identifier of the SSH public key\n associated with that user. A user can have more than one key on each server that they are\n associated with.
", + "smithy.api#documentation": "Identifies the user, the server they belong to, and the identifier of the SSH public key associated with that user. A user can have more than one key on each server that they are associated with.
", "smithy.api#output": {} } }, @@ -5079,7 +5101,7 @@ "EfsFileLocation": { "target": "com.amazonaws.transfer#EfsFileLocation", "traits": { - "smithy.api#documentation": "Specifies the details for the Amazon Elastic File System (Amazon EFS) file that's being\n decrypted.
" + "smithy.api#documentation": "Specifies the details for the Amazon Elastic File System (Amazon EFS) file that's being decrypted.
" } } }, @@ -5183,7 +5205,7 @@ "NextToken": { "target": "com.amazonaws.transfer#NextToken", "traits": { - "smithy.api#documentation": "When you can get additional results from the ListAccesses call, a\n NextToken parameter is returned in the output. You can then pass in a\n subsequent command to the NextToken parameter to continue listing additional\n accesses.
When you can get additional results from the ListAccesses call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional accesses.
When you can get additional results from the ListAccesses call, a\n NextToken parameter is returned in the output. You can then pass in a\n subsequent command to the NextToken parameter to continue listing additional\n accesses.
When you can get additional results from the ListAccesses call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional accesses.
Returns the accesses and their properties for the ServerId value that you\n specify.
Returns the accesses and their properties for the ServerId value that you specify.
Returns a list of the agreements for the server that's identified by the\n ServerId that you supply. If you want to limit the results to a certain number,\n supply a value for the MaxResults parameter. If you ran the command previously\n and received a value for NextToken, you can supply that value to continue listing\n agreements from where you left off.
Returns a list of the agreements for the server that's identified by the ServerId that you supply. If you want to limit the results to a certain number, supply a value for the MaxResults parameter. If you ran the command previously and received a value for NextToken, you can supply that value to continue listing agreements from where you left off.
When you can get additional results from the ListAgreements call, a\n NextToken parameter is returned in the output. You can then pass in a\n subsequent command to the NextToken parameter to continue listing additional\n agreements.
When you can get additional results from the ListAgreements call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional agreements.
Returns a token that you can use to call ListAgreements again and receive\n additional results, if there are any.
Returns a token that you can use to call ListAgreements again and receive additional results, if there are any.
Returns a list of the current certificates that have been imported into Transfer Family. If you want to\n limit the results to a certain number, supply a value for the MaxResults\n parameter. If you ran the command previously and received a value for the\n NextToken parameter, you can supply that value to continue listing certificates\n from where you left off.
Returns a list of the current certificates that have been imported into Transfer Family. If you want to limit the results to a certain number, supply a value for the MaxResults parameter. If you ran the command previously and received a value for the NextToken parameter, you can supply that value to continue listing certificates from where you left off.
When you can get additional results from the ListCertificates call, a\n NextToken parameter is returned in the output. You can then pass in a\n subsequent command to the NextToken parameter to continue listing additional\n certificates.
When you can get additional results from the ListCertificates call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional certificates.
Returns an array of the certificates that are specified in the\n ListCertificates call.
Returns an array of the certificates that are specified in the ListCertificates call.
When you can get additional results from the ListConnectors call, a\n NextToken parameter is returned in the output. You can then pass in a\n subsequent command to the NextToken parameter to continue listing additional\n connectors.
When you can get additional results from the ListConnectors call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional connectors.
Returns a token that you can use to call ListConnectors again and receive\n additional results, if there are any.
Returns a token that you can use to call ListConnectors again and receive additional results, if there are any.
Lists all in-progress executions for the specified workflow.
\nIf the specified workflow ID cannot be found, ListExecutions returns a \n ResourceNotFound exception.
Lists all in-progress executions for the specified workflow.
If the specified workflow ID cannot be found, ListExecutions returns a ResourceNotFound exception.
\n ListExecutions returns the NextToken parameter in the output.\n You can then pass the NextToken parameter in a subsequent command to\n continue listing additional executions.
\n This is useful for pagination, for instance.\n If you have 100 executions for a workflow, you might only want to list first 10. If so, call the API by specifying the max-results:\n
\n aws transfer list-executions --max-results 10\n
\n This returns details for the first 10 executions, as well as the pointer (NextToken) to the eleventh execution.\n You can now call the API again, supplying the NextToken value you received:\n
\n aws transfer list-executions --max-results 10 --next-token $somePointerReturnedFromPreviousListResult\n
\n This call returns the next 10 executions, the 11th through the 20th. You can then repeat the call until the details\n for all 100 executions have been returned.\n
" + "smithy.api#documentation": " ListExecutions returns the NextToken parameter in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional executions.
This is useful for pagination, for instance. If you have 100 executions for a workflow, you might only want to list first 10. If so, call the API by specifying the max-results:
aws transfer list-executions --max-results 10
This returns details for the first 10 executions, as well as the pointer (NextToken) to the eleventh execution. You can now call the API again, supplying the NextToken value you received:
aws transfer list-executions --max-results 10 --next-token $somePointerReturnedFromPreviousListResult
This call returns the next 10 executions, the 11th through the 20th. You can then repeat the call until the details for all 100 executions have been returned.
" } }, "WorkflowId": { @@ -5533,7 +5555,7 @@ "NextToken": { "target": "com.amazonaws.transfer#NextToken", "traits": { - "smithy.api#documentation": "\n ListExecutions returns the NextToken parameter in the output.\n You can then pass the NextToken parameter in a subsequent command to\n continue listing additional executions.
ListExecutions returns the NextToken parameter in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional executions.
\n Returns real-time updates and detailed information on the status of each individual file being transferred in a specific file transfer operation. \n You specify the file transfer by providing its ConnectorId and its TransferId.
File transfer results are available up to 7 days after an operation has been requested.
\n Returns real-time updates and detailed information on the status of each individual file being transferred in a specific file transfer operation. You specify the file transfer by providing its ConnectorId and its TransferId.
File transfer results are available up to 7 days after an operation has been requested.
The maximum number of files to return in a single page. Note that currently you can specify a maximum of 10 file paths in a single\n StartFileTransfer operation. Thus, the maximum\n number of file transfer results that can be returned in a single page is 10.\n
" + "smithy.api#documentation": "The maximum number of files to return in a single page. Note that currently you can specify a maximum of 10 file paths in a single StartFileTransfer operation. Thus, the maximum number of file transfer results that can be returned in a single page is 10.
" } } }, @@ -5631,14 +5653,14 @@ "FileTransferResults": { "target": "com.amazonaws.transfer#ConnectorFileTransferResults", "traits": { - "smithy.api#documentation": "Returns the details for the files transferred in the transfer identified by the TransferId and ConnectorId specified.
\n FilePath: the filename and path to where the file was sent to or retrieved from.
\n StatusCode: current status for the transfer. The status returned is one of the following values:QUEUED,\n IN_PROGRESS, COMPLETED, or FAILED\n
\n FailureCode: for transfers that fail, this parameter contains a code indicating the reason. For example, RETRIEVE_FILE_NOT_FOUND\n
\n FailureMessage: for transfers that fail, this parameter describes the reason for the failure.
Returns the details for the files transferred in the transfer identified by the TransferId and ConnectorId specified.
FilePath: the filename and path to where the file was sent to or retrieved from.
StatusCode: current status for the transfer. The status returned is one of the following values:QUEUED, IN_PROGRESS, COMPLETED, or FAILED
FailureCode: for transfers that fail, this parameter contains a code indicating the reason. For example, RETRIEVE_FILE_NOT_FOUND
FailureMessage: for transfers that fail, this parameter describes the reason for the failure.
Returns a token that you can use to call ListFileTransferResults again and receive\n additional results, if there are any (against the same TransferId.
Returns a token that you can use to call ListFileTransferResults again and receive additional results, if there are any (against the same TransferId.
Returns a list of host keys for the server that's specified by the ServerId\n parameter.
Returns a list of host keys for the server that's specified by the ServerId parameter.
When there are additional results that were not returned, a NextToken\n parameter is returned. You can use that value for a subsequent call to\n ListHostKeys to continue listing results.
When there are additional results that were not returned, a NextToken parameter is returned. You can use that value for a subsequent call to ListHostKeys to continue listing results.
Returns a token that you can use to call ListHostKeys again and receive\n additional results, if there are any.
Returns a token that you can use to call ListHostKeys again and receive additional results, if there are any.
Returns a list of the profiles for your system. If you want to limit the results to a\n certain number, supply a value for the MaxResults parameter. If you ran the\n command previously and received a value for NextToken, you can supply that value\n to continue listing profiles from where you left off.
Returns a list of the profiles for your system. If you want to limit the results to a certain number, supply a value for the MaxResults parameter. If you ran the command previously and received a value for NextToken, you can supply that value to continue listing profiles from where you left off.
When there are additional results that were not returned, a NextToken\n parameter is returned. You can use that value for a subsequent call to\n ListProfiles to continue listing results.
When there are additional results that were not returned, a NextToken parameter is returned. You can use that value for a subsequent call to ListProfiles to continue listing results.
Indicates whether to list only LOCAL type profiles or only PARTNER type profiles. \n If not supplied in the request, the command lists all types of profiles.
Indicates whether to list only LOCAL type profiles or only PARTNER type profiles. If not supplied in the request, the command lists all types of profiles.
Returns a token that you can use to call ListProfiles again and receive\n additional results, if there are any.
Returns a token that you can use to call ListProfiles again and receive additional results, if there are any.
Lists the security policies that are attached to your servers and SFTP connectors. For more information\n about security policies, see Working with security\n policies for servers or Working with security\n policies for SFTP connectors.
", + "smithy.api#documentation": "Lists the security policies that are attached to your servers and SFTP connectors. For more information about security policies, see Working with security policies for servers or Working with security policies for SFTP connectors.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -5853,13 +5875,13 @@ "MaxResults": { "target": "com.amazonaws.transfer#MaxResults", "traits": { - "smithy.api#documentation": "Specifies the number of security policies to return as a response to the\n ListSecurityPolicies query.
Specifies the number of security policies to return as a response to the ListSecurityPolicies query.
When additional results are obtained from the ListSecurityPolicies command, a\n NextToken parameter is returned in the output. You can then pass the\n NextToken parameter in a subsequent command to continue listing additional\n security policies.
When additional results are obtained from the ListSecurityPolicies command, a NextToken parameter is returned in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional security policies.
When you can get additional results from the ListSecurityPolicies operation,\n a NextToken parameter is returned in the output. In a following command, you can\n pass in the NextToken parameter to continue listing security policies.
When you can get additional results from the ListSecurityPolicies operation, a NextToken parameter is returned in the output. In a following command, you can pass in the NextToken parameter to continue listing security policies.
Lists the file transfer protocol-enabled servers that are associated with your Amazon Web Services\n account.
", + "smithy.api#documentation": "Lists the file transfer protocol-enabled servers that are associated with your Amazon Web Services account.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -5932,13 +5954,13 @@ "MaxResults": { "target": "com.amazonaws.transfer#MaxResults", "traits": { - "smithy.api#documentation": "Specifies the number of servers to return as a response to the ListServers\n query.
Specifies the number of servers to return as a response to the ListServers query.
When additional results are obtained from the ListServers command, a\n NextToken parameter is returned in the output. You can then pass the\n NextToken parameter in a subsequent command to continue listing additional\n servers.
When additional results are obtained from the ListServers command, a NextToken parameter is returned in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional servers.
When you can get additional results from the ListServers operation, a\n NextToken parameter is returned in the output. In a following command, you can\n pass in the NextToken parameter to continue listing additional servers.
When you can get additional results from the ListServers operation, a NextToken parameter is returned in the output. In a following command, you can pass in the NextToken parameter to continue listing additional servers.
Lists all of the tags associated with the Amazon Resource Name (ARN) that you specify. The\n resource can be a user, server, or role.
", + "smithy.api#documentation": "Lists all of the tags associated with the Amazon Resource Name (ARN) that you specify. The resource can be a user, server, or role.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -6006,20 +6028,20 @@ "Arn": { "target": "com.amazonaws.transfer#Arn", "traits": { - "smithy.api#documentation": "Requests the tags associated with a particular Amazon Resource Name (ARN). An ARN is an\n identifier for a specific Amazon Web Services resource, such as a server, user, or role.
", + "smithy.api#documentation": "Requests the tags associated with a particular Amazon Resource Name (ARN). An ARN is an identifier for a specific Amazon Web Services resource, such as a server, user, or role.
", "smithy.api#required": {} } }, "MaxResults": { "target": "com.amazonaws.transfer#MaxResults", "traits": { - "smithy.api#documentation": "Specifies the number of tags to return as a response to the\n ListTagsForResource request.
Specifies the number of tags to return as a response to the ListTagsForResource request.
When you request additional results from the ListTagsForResource operation, a\n NextToken parameter is returned in the input. You can then pass in a subsequent\n command to the NextToken parameter to continue listing additional tags.
When you request additional results from the ListTagsForResource operation, a NextToken parameter is returned in the input. You can then pass in a subsequent command to the NextToken parameter to continue listing additional tags.
When you can get additional results from the ListTagsForResource call, a\n NextToken parameter is returned in the output. You can then pass in a\n subsequent command to the NextToken parameter to continue listing additional\n tags.
When you can get additional results from the ListTagsForResource call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional tags.
Key-value pairs that are assigned to a resource, usually for the purpose of grouping and\n searching for items. Tags are metadata that you define.
" + "smithy.api#documentation": "Key-value pairs that are assigned to a resource, usually for the purpose of grouping and searching for items. Tags are metadata that you define.
" } } }, @@ -6084,7 +6106,7 @@ "transfer:ListUsers" ] }, - "smithy.api#documentation": "Lists the users for a file transfer protocol-enabled server that you specify by passing\n the ServerId parameter.
Lists the users for a file transfer protocol-enabled server that you specify by passing the ServerId parameter.
Specifies the number of users to return as a response to the ListUsers\n request.
Specifies the number of users to return as a response to the ListUsers request.
If there are additional results from the ListUsers call, a\n NextToken parameter is returned in the output. You can then pass \n the NextToken to a subsequent ListUsers command, to continue listing additional\n users.
If there are additional results from the ListUsers call, a NextToken parameter is returned in the output. You can then pass the NextToken to a subsequent ListUsers command, to continue listing additional users.
When you can get additional results from the ListUsers call, a\n NextToken parameter is returned in the output. You can then pass in a\n subsequent command to the NextToken parameter to continue listing additional\n users.
When you can get additional results from the ListUsers call, a NextToken parameter is returned in the output. You can then pass in a subsequent command to the NextToken parameter to continue listing additional users.
Returns the Transfer Family users and their properties for the ServerId value that\n you specify.
Returns the Transfer Family users and their properties for the ServerId value that you specify.
Returns the NextToken parameter in the output.\n You can then pass the NextToken parameter in a subsequent command to\n continue listing additional web apps.
Returns the NextToken parameter in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional web apps.
Provide this value for the NextToken parameter in a subsequent command to\n continue listing additional web apps.
Provide this value for the NextToken parameter in a subsequent command to continue listing additional web apps.
\n ListWorkflows returns the NextToken parameter in the output.\n You can then pass the NextToken parameter in a subsequent command to\n continue listing additional workflows.
ListWorkflows returns the NextToken parameter in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional workflows.
\n ListWorkflows returns the NextToken parameter in the output.\n You can then pass the NextToken parameter in a subsequent command to\n continue listing additional workflows.
ListWorkflows returns the NextToken parameter in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional workflows.
The landing directory (folder) for a user when they log in to the server using the client.
\nA HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
The landing directory (folder) for a user when they log in to the server using the client.
A HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server.\n If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer \n protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for \n how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings,\n using the HomeDirectoryMappings parameter. If, on the other hand,\n HomeDirectoryType is PATH, you provide an absolute path\n using the HomeDirectory parameter. You cannot have both\n HomeDirectory and HomeDirectoryMappings in your\n template.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server. If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings, using the HomeDirectoryMappings parameter. If, on the other hand, HomeDirectoryType is PATH, you provide an absolute path using the HomeDirectory parameter. You cannot have both HomeDirectory and HomeDirectoryMappings in your template.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 \n bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users \n when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust \n relationship that allows the server to access your resources when servicing your users' transfer requests.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.
" } }, "ExternalId": { "target": "com.amazonaws.transfer#ExternalId", "traits": { - "smithy.api#documentation": "A unique identifier that is required to identify specific groups within your directory.\n The users of the group that you associate have access to your Amazon S3 or Amazon EFS\n resources over the enabled protocols using Transfer Family. If you know the group name,\n you can view the SID values by running the following command using Windows PowerShell.
\n\n Get-ADGroup -Filter {samAccountName -like \"YourGroupName*\"} -Properties * | Select SamAccountName,ObjectSid\n
In that command, replace YourGroupName with the name of your Active Directory group.
\nThe regular expression used to validate this parameter is a string of characters consisting of uppercase and lowercase alphanumeric characters with no spaces.\n You can also include underscores or any of the following characters: =,.@:/-
" + "smithy.api#documentation": "A unique identifier that is required to identify specific groups within your directory. The users of the group that you associate have access to your Amazon S3 or Amazon EFS resources over the enabled protocols using Transfer Family. If you know the group name, you can view the SID values by running the following command using Windows PowerShell.
Get-ADGroup -Filter {samAccountName -like \"YourGroupName*\"} -Properties * | Select SamAccountName,ObjectSid
In that command, replace YourGroupName with the name of your Active Directory group.
The regular expression used to validate this parameter is a string of characters consisting of uppercase and lowercase alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@:/-
" } } }, @@ -6362,7 +6384,7 @@ "Description": { "target": "com.amazonaws.transfer#Description", "traits": { - "smithy.api#documentation": "The current description for the agreement. You can change it by calling the\n UpdateAgreement operation and providing a new description.
The current description for the agreement. You can change it by calling the UpdateAgreement operation and providing a new description.
Specifies how this certificate is used. It can be used in the following ways:
\n\n SIGNING: For signing AS2 messages
\n ENCRYPTION: For encrypting AS2 messages
\n TLS: For securing AS2 communications sent over HTTPS
Specifies how this certificate is used. It can be used in the following ways:
SIGNING: For signing AS2 messages
ENCRYPTION: For encrypting AS2 messages
TLS: For securing AS2 communications sent over HTTPS
The certificate can be either ACTIVE, PENDING_ROTATION, or\n INACTIVE. PENDING_ROTATION means that this certificate will\n replace the current certificate when it expires.
The certificate can be either ACTIVE, PENDING_ROTATION, or INACTIVE. PENDING_ROTATION means that this certificate will replace the current certificate when it expires.
An optional date that specifies when the certificate becomes active.
" + "smithy.api#documentation": "An optional date that specifies when the certificate becomes active. If you do not specify a value, ActiveDate takes the same value as NotBeforeDate, which is specified by the CA.
An optional date that specifies when the certificate becomes inactive.
" + "smithy.api#documentation": "An optional date that specifies when the certificate becomes inactive. If you do not specify a value, InactiveDate takes the same value as NotAfterDate, which is specified by the CA.
The type for the certificate. If a private key has been specified for the certificate, its\n type is CERTIFICATE_WITH_PRIVATE_KEY. If there is no private key, the type is\n CERTIFICATE.
The type for the certificate. If a private key has been specified for the certificate, its type is CERTIFICATE_WITH_PRIVATE_KEY. If there is no private key, the type is CERTIFICATE.
A structure that describes the Amazon S3 or EFS file location.\n This is the file location when the execution begins: if the file is being copied,\n this is the initial (as opposed to destination) file location.
" + "smithy.api#documentation": "A structure that describes the Amazon S3 or EFS file location. This is the file location when the execution begins: if the file is being copied, this is the initial (as opposed to destination) file location.
" } }, "ServiceMetadata": { @@ -6563,7 +6585,7 @@ "Type": { "target": "com.amazonaws.transfer#HostKeyType", "traits": { - "smithy.api#documentation": "The encryption algorithm that is used for the host key. The Type parameter is specified by using one of the\n following values:
\n ssh-rsa\n
\n ssh-ed25519\n
\n ecdsa-sha2-nistp256\n
\n ecdsa-sha2-nistp384\n
\n ecdsa-sha2-nistp521\n
The encryption algorithm that is used for the host key. The Type parameter is specified by using one of the following values:
ssh-rsa
ssh-ed25519
ecdsa-sha2-nistp256
ecdsa-sha2-nistp384
ecdsa-sha2-nistp521
The As2Id is the AS2-name, as defined in the \n RFC 4130. For inbound transfers, this is the AS2-From header for the AS2 messages\n sent from the partner. For outbound connectors, this is the AS2-To header for the\n AS2 messages sent to the partner using the StartFileTransfer API operation. This ID cannot include spaces.
The As2Id is the AS2-name, as defined in the RFC 4130. For inbound transfers, this is the AS2-From header for the AS2 messages sent from the partner. For outbound connectors, this is the AS2-To header for the AS2 messages sent to the partner using the StartFileTransfer API operation. This ID cannot include spaces.
Indicates whether to list only LOCAL type profiles or only PARTNER type profiles. \n If not supplied in the request, the command lists all types of profiles.
Indicates whether to list only LOCAL type profiles or only PARTNER type profiles. If not supplied in the request, the command lists all types of profiles.
Specifies the domain of the storage system that is used for file transfers. There are two domains\n available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The\n default value is S3.
" + "smithy.api#documentation": "Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3.
" } }, "IdentityProviderType": { "target": "com.amazonaws.transfer#IdentityProviderType", "traits": { - "smithy.api#documentation": "The mode of authentication for a server. The default value is\n SERVICE_MANAGED, which allows you to store and access user credentials within\n the Transfer Family service.
Use AWS_DIRECTORY_SERVICE to provide access to\n Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your\n on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to\n provide a Directory ID by using the IdentityProviderDetails parameter.
Use the API_GATEWAY value to integrate with an identity provider of your choosing. The\n API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call\n for authentication by using the IdentityProviderDetails parameter.
Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. \n If you choose this value, you must specify the ARN for the Lambda function in the Function parameter \n for the IdentityProviderDetails data type.
The mode of authentication for a server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the Transfer Family service.
Use AWS_DIRECTORY_SERVICE to provide access to Active Directory groups in Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in Amazon Web Services using AD Connector. This option also requires you to provide a Directory ID by using the IdentityProviderDetails parameter.
Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the IdentityProviderDetails parameter.
Use the AWS_LAMBDA value to directly use an Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the Function parameter for the IdentityProviderDetails data type.
Specifies the type of VPC endpoint that your server is connected to. If your server is\n connected to a VPC endpoint, your server isn't accessible over the public internet.
" + "smithy.api#documentation": "Specifies the type of VPC endpoint that your server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet.
" } }, "LoggingRole": { "target": "com.amazonaws.transfer#Role", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn\n on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in\n your CloudWatch logs.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFS events. When set, you can view user activity in your CloudWatch logs.
" } }, "ServerId": { @@ -6664,13 +6686,13 @@ "State": { "target": "com.amazonaws.transfer#State", "traits": { - "smithy.api#documentation": "The condition of the server that was described. A value of\n ONLINE indicates that the server can accept jobs and transfer files. A\n State value of OFFLINE means that the server cannot perform file\n transfer operations.
The states of STARTING and STOPPING indicate that the server is\n in an intermediate state, either not fully able to respond, or not fully offline. The values\n of START_FAILED or STOP_FAILED can indicate an error\n condition.
The condition of the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.
The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.
Specifies the number of users that are assigned to a server you specified with the\n ServerId.
Specifies the number of users that are assigned to a server you specified with the ServerId.
Provides the unique Amazon Resource Name (ARN) for the user that you want to learn\n about.
", + "smithy.api#documentation": "Provides the unique Amazon Resource Name (ARN) for the user that you want to learn about.
", "smithy.api#required": {} } }, "HomeDirectory": { "target": "com.amazonaws.transfer#HomeDirectory", "traits": { - "smithy.api#documentation": "The landing directory (folder) for a user when they log in to the server using the client.
\nA HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
The landing directory (folder) for a user when they log in to the server using the client.
A HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server.\n If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer \n protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for \n how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings,\n using the HomeDirectoryMappings parameter. If, on the other hand,\n HomeDirectoryType is PATH, you provide an absolute path\n using the HomeDirectory parameter. You cannot have both\n HomeDirectory and HomeDirectoryMappings in your\n template.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server. If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings, using the HomeDirectoryMappings parameter. If, on the other hand, HomeDirectoryType is PATH, you provide an absolute path using the HomeDirectory parameter. You cannot have both HomeDirectory and HomeDirectoryMappings in your template.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 \n bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users \n when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust \n relationship that allows the server to access your resources when servicing your users' transfer requests.
\nThe IAM role that controls your users' access to your Amazon S3 bucket for servers with Domain=S3, or your EFS file system for servers with Domain=EFS. \n
The policies attached to this role determine the level of access you want to provide your users when \n transferring files into and out of your S3 buckets or EFS file systems.
\nThe Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.
The IAM role that controls your users' access to your Amazon S3 bucket for servers with Domain=S3, or your EFS file system for servers with Domain=EFS.
The policies attached to this role determine the level of access you want to provide your users when transferring files into and out of your S3 buckets or EFS file systems.
Specifies the name of the user whose ARN was specified. User names are used for\n authentication purposes.
" + "smithy.api#documentation": "Specifies the name of the user whose ARN was specified. User names are used for authentication purposes.
" } } }, @@ -6798,7 +6820,7 @@ } }, "traits": { - "smithy.api#documentation": "Contains the identifier, text description, and Amazon Resource Name (ARN) for the\n workflow.
" + "smithy.api#documentation": "Contains the identifier, text description, and Amazon Resource Name (ARN) for the workflow.
" } }, "com.amazonaws.transfer#ListedWorkflows": { @@ -6833,7 +6855,7 @@ "LoggingRole": { "target": "com.amazonaws.transfer#Role", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn\n on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in\n your CloudWatch logs.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFS events. When set, you can view user activity in your CloudWatch logs.
" } }, "LogGroupName": { @@ -6884,6 +6906,16 @@ } } }, + "com.amazonaws.transfer#MaxConcurrentConnections": { + "type": "integer", + "traits": { + "smithy.api#default": 1, + "smithy.api#documentation": "The number of concurrent connections that the connector will create to the remote server.", + "smithy.api#range": { + "min": 1 + } + } + }, "com.amazonaws.transfer#MaxItems": { "type": "integer", "traits": { @@ -6973,6 +7005,16 @@ "smithy.api#pattern": "^[\\p{Print}\\p{Blank}]+$" } }, + "com.amazonaws.transfer#MoveId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + }, + "smithy.api#pattern": "^[0-9a-zA-Z./-]+$" + } + }, "com.amazonaws.transfer#NextToken": { "type": "string", "traits": { @@ -7095,7 +7137,7 @@ } }, "traits": { - "smithy.api#documentation": "The full POSIX identity, including user ID (Uid), group ID\n (Gid), and any secondary groups IDs (SecondaryGids), that controls\n your users' access to your Amazon EFS file systems. The POSIX permissions that are set on\n files and directories in your file system determine the level of access your users get when\n transferring files into and out of your Amazon EFS file systems.
The full POSIX identity, including user ID (Uid), group ID (Gid), and any secondary groups IDs (SecondaryGids), that controls your users' access to your Amazon EFS file systems. The POSIX permissions that are set on files and directories in your file system determine the level of access your users get when transferring files into and out of your Amazon EFS file systems.
\n Indicates passive mode, for FTP and FTPS protocols.\n Enter a single IPv4 address, such as the public IP address of a firewall, router, or load balancer.\n For example:\n
\n\n aws transfer update-server --protocol-details PassiveIp=0.0.0.0\n
Replace 0.0.0.0 in the example above with the actual IP address you want to use.
\n If you change the PassiveIp value, you must stop and then restart your Transfer Family server for the change to take effect. For details on using passive mode (PASV) in a NAT environment, see Configuring your FTPS server behind a firewall or NAT with Transfer Family.\n
\n Special values\n
\nThe AUTO and 0.0.0.0 are special values for the PassiveIp parameter. The value PassiveIp=AUTO\n is assigned by default to FTP and FTPS type servers. In this case, the server automatically responds with one of the endpoint IPs within the PASV response.\n PassiveIp=0.0.0.0 has a more unique application for its usage. For example, if you have a High Availability (HA) Network Load Balancer (NLB) environment,\n where you have 3 subnets, you can only specify a single IP address using the PassiveIp parameter. This reduces the effectiveness of having High Availability.\n In this case, you can specify PassiveIp=0.0.0.0. This tells the client to use the same IP address as the Control connection and utilize all AZs for their\n connections. Note, however, that not all FTP clients support the PassiveIp=0.0.0.0 response. FileZilla and WinSCP do support it. If you are using other\n clients, check to see if your client supports the PassiveIp=0.0.0.0 response.
Indicates passive mode, for FTP and FTPS protocols. Enter a single IPv4 address, such as the public IP address of a firewall, router, or load balancer. For example:
aws transfer update-server --protocol-details PassiveIp=0.0.0.0
Replace 0.0.0.0 in the example above with the actual IP address you want to use.
If you change the PassiveIp value, you must stop and then restart your Transfer Family server for the change to take effect. For details on using passive mode (PASV) in a NAT environment, see Configuring your FTPS server behind a firewall or NAT with Transfer Family.
Special values
The AUTO and 0.0.0.0 are special values for the PassiveIp parameter. The value PassiveIp=AUTO is assigned by default to FTP and FTPS type servers. In this case, the server automatically responds with one of the endpoint IPs within the PASV response. PassiveIp=0.0.0.0 has a more unique application for its usage. For example, if you have a High Availability (HA) Network Load Balancer (NLB) environment, where you have 3 subnets, you can only specify a single IP address using the PassiveIp parameter. This reduces the effectiveness of having High Availability. In this case, you can specify PassiveIp=0.0.0.0. This tells the client to use the same IP address as the Control connection and utilize all AZs for their connections. Note, however, that not all FTP clients support the PassiveIp=0.0.0.0 response. FileZilla and WinSCP do support it. If you are using other clients, check to see if your client supports the PassiveIp=0.0.0.0 response.
A property used with Transfer Family servers that use the FTPS protocol. TLS Session Resumption provides a mechanism to resume or share a negotiated secret\n key between the control and data connection for an FTPS session. TlsSessionResumptionMode determines whether or not the server resumes recent,\n negotiated sessions through a unique session ID. This property is available during CreateServer and UpdateServer calls.\n If a TlsSessionResumptionMode value is not specified during CreateServer, it is set to ENFORCED by default.
\n DISABLED: the server does not process TLS session resumption client requests and creates a new TLS session for each request.
\n ENABLED: the server processes and accepts clients that are performing TLS session resumption.\n The server doesn't reject client data connections that do not perform the TLS session resumption client processing.
\n ENFORCED: the server processes and accepts clients that are performing TLS session resumption.\n The server rejects client data connections that do not perform the TLS session resumption client processing.\n Before you set the value to ENFORCED, test your clients.
Not all FTPS clients perform TLS session resumption. So, if you choose to enforce\n TLS session resumption, you prevent any connections from FTPS clients that don't perform\n the protocol negotiation. To determine whether or not you can use the\n ENFORCED value, you need to test your clients.
A property used with Transfer Family servers that use the FTPS protocol. TLS Session Resumption provides a mechanism to resume or share a negotiated secret key between the control and data connection for an FTPS session. TlsSessionResumptionMode determines whether or not the server resumes recent, negotiated sessions through a unique session ID. This property is available during CreateServer and UpdateServer calls. If a TlsSessionResumptionMode value is not specified during CreateServer, it is set to ENFORCED by default.
DISABLED: the server does not process TLS session resumption client requests and creates a new TLS session for each request.
ENABLED: the server processes and accepts clients that are performing TLS session resumption. The server doesn't reject client data connections that do not perform the TLS session resumption client processing.
ENFORCED: the server processes and accepts clients that are performing TLS session resumption. The server rejects client data connections that do not perform the TLS session resumption client processing. Before you set the value to ENFORCED, test your clients.
Not all FTPS clients perform TLS session resumption. So, if you choose to enforce TLS session resumption, you prevent any connections from FTPS clients that don't perform the protocol negotiation. To determine whether or not you can use the ENFORCED value, you need to test your clients.
Use the SetStatOption to ignore the error that is generated when the client attempts to use SETSTAT on a file you are uploading to an S3 bucket.
Some SFTP file transfer clients can attempt to change the attributes of remote files, including timestamp and permissions, using commands, such as SETSTAT when uploading the file.\n However, these commands are not compatible with object storage systems, such as Amazon S3. Due to this incompatibility, file uploads from these clients can result in errors even when \n the file is otherwise successfully uploaded.
Set the value to ENABLE_NO_OP to have the Transfer Family server ignore the SETSTAT command, and upload files without needing to make any changes to your SFTP client.\n While the SetStatOption\n ENABLE_NO_OP setting ignores the error, it does generate a log entry in Amazon CloudWatch Logs, so you can determine when the client is making a SETSTAT call.
If you want to preserve the original timestamp for your file, and modify other file attributes using SETSTAT, you can use Amazon EFS as backend storage with Transfer Family.
Use the SetStatOption to ignore the error that is generated when the client attempts to use SETSTAT on a file you are uploading to an S3 bucket.
Some SFTP file transfer clients can attempt to change the attributes of remote files, including timestamp and permissions, using commands, such as SETSTAT when uploading the file. However, these commands are not compatible with object storage systems, such as Amazon S3. Due to this incompatibility, file uploads from these clients can result in errors even when the file is otherwise successfully uploaded.
Set the value to ENABLE_NO_OP to have the Transfer Family server ignore the SETSTAT command, and upload files without needing to make any changes to your SFTP client. While the SetStatOption ENABLE_NO_OP setting ignores the error, it does generate a log entry in Amazon CloudWatch Logs, so you can determine when the client is making a SETSTAT call.
If you want to preserve the original timestamp for your file, and modify other file attributes using SETSTAT, you can use Amazon EFS as backend storage with Transfer Family.
\n The protocol settings that are configured for your server.\n
" + "smithy.api#documentation": "The protocol settings that are configured for your server.
" } }, "com.amazonaws.transfer#Protocols": { @@ -7354,7 +7396,7 @@ } }, "traits": { - "smithy.api#documentation": "This exception is thrown when a resource is not found by the Amazon Web ServicesTransfer Family\n service.
", + "smithy.api#documentation": "This exception is thrown when a resource is not found by the Amazon Web ServicesTransfer Family service.
", "smithy.api#error": "client", "smithy.api#httpError": 404 } @@ -7447,7 +7489,7 @@ } }, "traits": { - "smithy.api#documentation": "Specifies the customer input Amazon S3 file location. If it is used inside copyStepDetails.DestinationFileLocation, it should be the S3 copy destination.
\n You need to provide the bucket and key.\n The key can represent either a path or a file.\n This is determined by whether or not you end the key value with the forward slash (/) character.\n If the final character is \"/\", then your file is copied to the folder, and its name does not change.\n If, rather, the final character is alphanumeric, your uploaded file is renamed to the path value. In this case, if a file with that name already exists, it is overwritten.\n
\nFor example, if your path is shared-files/bob/, your uploaded files are copied to the shared-files/bob/, folder.\n If your path is shared-files/today, each uploaded file is copied to the shared-files folder and named today:\n each upload overwrites the previous version of the bob file.
Specifies the customer input Amazon S3 file location. If it is used inside copyStepDetails.DestinationFileLocation, it should be the S3 copy destination.
You need to provide the bucket and key. The key can represent either a path or a file. This is determined by whether or not you end the key value with the forward slash (/) character. If the final character is \"/\", then your file is copied to the folder, and its name does not change. If, rather, the final character is alphanumeric, your uploaded file is renamed to the path value. In this case, if a file with that name already exists, it is overwritten.
For example, if your path is shared-files/bob/, your uploaded files are copied to the shared-files/bob/, folder. If your path is shared-files/today, each uploaded file is copied to the shared-files folder and named today: each upload overwrites the previous version of the bob file.
Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.
\nBy default, home directory mappings have a TYPE of DIRECTORY. If you enable this option, you would then need to explicitly set the HomeDirectoryMapEntry\n Type to FILE if you want a mapping to have a file target.
Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.
By default, home directory mappings have a TYPE of DIRECTORY. If you enable this option, you would then need to explicitly set the HomeDirectoryMapEntry Type to FILE if you want a mapping to have a file target.
Sends a callback for asynchronous custom steps.
\n\n The ExecutionId, WorkflowId, and Token are passed to the target resource during execution of a custom step of a workflow.\n You must include those with their callback as well as providing a status.\n
Sends a callback for asynchronous custom steps.
The ExecutionId, WorkflowId, and Token are passed to the target resource during execution of a custom step of a workflow. You must include those with their callback as well as providing a status.
The identifier for the secret (in Amazon Web Services Secrets Manager) that contains the SFTP user's private key, password, or both. The identifier must be the Amazon Resource Name (ARN) of the secret.
" + "smithy.api#documentation": "The identifier for the secret (in Amazon Web Services Secrets Manager) that contains the SFTP user's private key, password, or both. The identifier must be the Amazon Resource Name (ARN) of the secret.
Required when creating an SFTP connector
Optional when updating an existing SFTP connector
The public portion of the host key, or keys, that are used to identify the external server to which you are connecting.\n You can use the ssh-keyscan command against the SFTP server to retrieve the necessary key.
The three standard SSH public key format elements are <key type>,\n <body base64>, and an optional <comment>, with spaces\n between each element. Specify only the <key type> and <body\n base64>: do not enter the <comment> portion of the key.
For the trusted host key, Transfer Family accepts RSA and ECDSA keys.
\nFor RSA keys, the <key type> string is ssh-rsa.
For ECDSA keys, the <key type> string is either\n ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or\n ecdsa-sha2-nistp521, depending on the size of the key you generated.
Run this command to retrieve the SFTP server host key, where your SFTP server name is ftp.host.com.
\n ssh-keyscan ftp.host.com\n
This prints the public host key to standard output.
\n\n ftp.host.com ssh-rsa AAAAB3Nza...<long-string-for-public-key\n
Copy and paste this string into the TrustedHostKeys field for the create-connector command or into the Trusted host keys field in the console.
The public portion of the host key, or keys, that are used to identify the external server to which you are connecting. You can use the ssh-keyscan command against the SFTP server to retrieve the necessary key.
TrustedHostKeys is optional for CreateConnector. If not provided, you can use TestConnection to retrieve the server host key during the initial connection attempt, and subsequently update the connector with the observed host key.
The three standard SSH public key format elements are <key type>, <body base64>, and an optional <comment>, with spaces between each element. Specify only the <key type> and <body base64>: do not enter the <comment> portion of the key.
For the trusted host key, Transfer Family accepts RSA and ECDSA keys.
For RSA keys, the <key type> string is ssh-rsa.
For ECDSA keys, the <key type> string is either ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521, depending on the size of the key you generated.
Run this command to retrieve the SFTP server host key, where your SFTP server name is ftp.host.com.
ssh-keyscan ftp.host.com
This prints the public host key to standard output.
ftp.host.com ssh-rsa AAAAB3Nza...<long-string-for-public-key
Copy and paste this string into the TrustedHostKeys field for the create-connector command or into the Trusted host keys field in the console.
Specify the number of concurrent connections that your connector creates to the remote server. The default value is 5 (this is also the maximum value allowed).
This parameter specifies the number of active connections that your connector can establish with the remote server at the same time. Increasing this value can enhance connector performance when transferring large file batches by enabling parallel operations.
" } } }, "traits": { - "smithy.api#documentation": "Contains the details for an SFTP connector object. The connector object is used for transferring files to and from a\n partner's SFTP server.
\nBecause the SftpConnectorConfig data type is used for both creating and updating SFTP connectors, its parameters,\n TrustedHostKeys and UserSecretId are marked as not required. This is a bit misleading, as they are not required when\n you are updating an existing SFTP connector, but are required when you are creating a new SFTP connector.
Contains the details for an SFTP connector object. The connector object is used for transferring files to and from a partner's SFTP server.
" + } + }, + "com.amazonaws.transfer#SftpConnectorConnectionDetails": { + "type": "structure", + "members": { + "HostKey": { + "target": "com.amazonaws.transfer#SftpConnectorHostKey", + "traits": { + "smithy.api#documentation": "The SSH public key of the remote SFTP server. This is returned during the initial connection attempt when you call TestConnection. It allows you to retrieve the valid server host key to update the connector when you are unable to obtain it in advance.
Contains the details for an SFTP connector connection.
" + } + }, + "com.amazonaws.transfer#SftpConnectorHostKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + } } }, "com.amazonaws.transfer#SftpConnectorTrustedHostKey": { @@ -7916,7 +7988,7 @@ }, "traits": { "smithy.api#length": { - "min": 1, + "min": 0, "max": 10 } } @@ -7989,20 +8061,20 @@ "SshPublicKeyBody": { "target": "com.amazonaws.transfer#SshPublicKeyBody", "traits": { - "smithy.api#documentation": "Specifies the content of the SSH public key as specified by the\n PublicKeyId.
Transfer Family accepts RSA, ECDSA, and ED25519 keys.
", + "smithy.api#documentation": "Specifies the content of the SSH public key as specified by the PublicKeyId.
Transfer Family accepts RSA, ECDSA, and ED25519 keys.
", "smithy.api#required": {} } }, "SshPublicKeyId": { "target": "com.amazonaws.transfer#SshPublicKeyId", "traits": { - "smithy.api#documentation": "Specifies the SshPublicKeyId parameter contains the identifier of the public\n key.
Specifies the SshPublicKeyId parameter contains the identifier of the public key.
Provides information about the public Secure Shell (SSH) key that is associated with a Transfer Family\n user for the specific file transfer protocol-enabled server (as identified by\n ServerId). The information returned includes the date the key was imported, the\n public key contents, and the public key ID. A user can store more than one SSH public key\n associated with their user name on a specific server.
Provides information about the public Secure Shell (SSH) key that is associated with a Transfer Family user for the specific file transfer protocol-enabled server (as identified by ServerId). The information returned includes the date the key was imported, the public key contents, and the public key ID. A user can store more than one SSH public key associated with their user name on a specific server.
Retrieves a list of the contents of a directory from a remote SFTP server. You specify the\n connector ID, the output path, and the remote directory path. You can also specify the\n optional MaxItems value to control the maximum number of items that are listed\n from the remote directory. This API returns a list of all files and directories in the remote\n directory (up to the maximum value), but does not return files or folders in sub-directories.\n That is, it only returns a list of files and directories one-level deep.
After you receive the listing file, you can provide the files that you want to transfer to\n the RetrieveFilePaths parameter of the StartFileTransfer API\n call.
The naming convention for the output file is\n \n connector-ID-listing-ID.json. The\n output file contains the following information:
\n filePath: the complete path of a remote file, relative to the directory\n of the listing request for your SFTP connector on the remote server.
\n modifiedTimestamp: the last time the file was modified, in UTC time\n format. This field is optional. If the remote file attributes don't contain a timestamp,\n it is omitted from the file listing.
\n size: the size of the file, in bytes. This field is optional. If the\n remote file attributes don't contain a file size, it is omitted from the file\n listing.
\n path: the complete path of a remote directory, relative to the directory\n of the listing request for your SFTP connector on the remote server.
\n truncated: a flag indicating whether the list output contains all of the\n items contained in the remote directory or not. If your Truncated output\n value is true, you can increase the value provided in the optional max-items\n input attribute to be able to list more items (up to the maximum allowed list size of\n 10,000 items).
Retrieves a list of the contents of a directory from a remote SFTP server. You specify the connector ID, the output path, and the remote directory path. You can also specify the optional MaxItems value to control the maximum number of items that are listed from the remote directory. This API returns a list of all files and directories in the remote directory (up to the maximum value), but does not return files or folders in sub-directories. That is, it only returns a list of files and directories one-level deep.
After you receive the listing file, you can provide the files that you want to transfer to the RetrieveFilePaths parameter of the StartFileTransfer API call.
The naming convention for the output file is connector-ID-listing-ID.json. The output file contains the following information:
filePath: the complete path of a remote file, relative to the directory of the listing request for your SFTP connector on the remote server.
modifiedTimestamp: the last time the file was modified, in UTC time format. This field is optional. If the remote file attributes don't contain a timestamp, it is omitted from the file listing.
size: the size of the file, in bytes. This field is optional. If the remote file attributes don't contain a file size, it is omitted from the file listing.
path: the complete path of a remote directory, relative to the directory of the listing request for your SFTP connector on the remote server.
truncated: a flag indicating whether the list output contains all of the items contained in the remote directory or not. If your Truncated output value is true, you can increase the value provided in the optional max-items input attribute to be able to list more items (up to the maximum allowed list size of 10,000 items).
An optional parameter where you can specify the maximum number of file/directory names to\n retrieve. The default value is 1,000.
" + "smithy.api#documentation": "An optional parameter where you can specify the maximum number of file/directory names to retrieve. The default value is 1,000.
" } }, "OutputDirectoryPath": { @@ -8152,7 +8224,7 @@ } ], "traits": { - "smithy.api#documentation": "Begins a file transfer between local Amazon Web Services storage and a remote AS2 or SFTP server.
\nFor an AS2 connector, you specify the ConnectorId and one or more SendFilePaths to identify the files\n you want to transfer.
For an SFTP connector, the file transfer can be either outbound or inbound. In both\n cases, you specify the ConnectorId. Depending on the direction of the transfer,\n you also specify the following items:
If you are transferring file from a partner's SFTP server to Amazon Web Services\n storage, you specify one or more RetrieveFilePaths to identify the files\n you want to transfer, and a LocalDirectoryPath to specify the destination\n folder.
If you are transferring file to a partner's SFTP server from Amazon Web Services\n storage, you specify one or more SendFilePaths to identify the files you\n want to transfer, and a RemoteDirectoryPath to specify the destination\n folder.
Begins a file transfer between local Amazon Web Services storage and a remote AS2 or SFTP server.
For an AS2 connector, you specify the ConnectorId and one or more SendFilePaths to identify the files you want to transfer.
For an SFTP connector, the file transfer can be either outbound or inbound. In both cases, you specify the ConnectorId. Depending on the direction of the transfer, you also specify the following items:
If you are transferring file from a partner's SFTP server to Amazon Web Services storage, you specify one or more RetrieveFilePaths to identify the files you want to transfer, and a LocalDirectoryPath to specify the destination folder.
If you are transferring file to a partner's SFTP server from Amazon Web Services storage, you specify one or more SendFilePaths to identify the files you want to transfer, and a RemoteDirectoryPath to specify the destination folder.
One or more source paths for the Amazon S3 storage. Each string represents a source\n file path for one outbound file transfer. For example,\n \n amzn-s3-demo-bucket/myfile.txt\n .
Replace \n amzn-s3-demo-bucket\n with one of your actual buckets.
One or more source paths for the Amazon S3 storage. Each string represents a source file path for one outbound file transfer. For example, amzn-s3-demo-bucket/myfile.txt .
Replace amzn-s3-demo-bucket with one of your actual buckets.
For an inbound transfer, the LocaDirectoryPath specifies the destination for one or more files\n that are transferred from the partner's SFTP server.
For an inbound transfer, the LocaDirectoryPath specifies the destination for one or more files that are transferred from the partner's SFTP server.
For an outbound transfer, the RemoteDirectoryPath specifies the destination\n for one or more files that are transferred to the partner's SFTP server. If you don't specify\n a RemoteDirectoryPath, the destination for transferred files is the SFTP user's\n home directory.
For an outbound transfer, the RemoteDirectoryPath specifies the destination for one or more files that are transferred to the partner's SFTP server. If you don't specify a RemoteDirectoryPath, the destination for transferred files is the SFTP user's home directory.
Deletes a file or directory on the remote SFTP server.
", + "smithy.api#http": { + "method": "POST", + "uri": "/startRemoteDelete" + } + } + }, + "com.amazonaws.transfer#StartRemoteDeleteRequest": { + "type": "structure", + "members": { + "ConnectorId": { + "target": "com.amazonaws.transfer#ConnectorId", + "traits": { + "smithy.api#documentation": "The unique identifier for the connector.
", + "smithy.api#required": {} + } + }, + "DeletePath": { + "target": "com.amazonaws.transfer#FilePath", + "traits": { + "smithy.api#documentation": "The absolute path of the file or directory to delete. You can only specify one path per call to this operation.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.transfer#StartRemoteDeleteResponse": { + "type": "structure", + "members": { + "DeleteId": { + "target": "com.amazonaws.transfer#DeleteId", + "traits": { + "smithy.api#documentation": "Returns a unique identifier for the delete operation.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.transfer#StartRemoteMove": { + "type": "operation", + "input": { + "target": "com.amazonaws.transfer#StartRemoteMoveRequest" + }, + "output": { + "target": "com.amazonaws.transfer#StartRemoteMoveResponse" + }, + "errors": [ + { + "target": "com.amazonaws.transfer#InternalServiceError" + }, + { + "target": "com.amazonaws.transfer#InvalidRequestException" + }, + { + "target": "com.amazonaws.transfer#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.transfer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.transfer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "Moves or renames a file or directory on the remote SFTP server.
", + "smithy.api#http": { + "method": "POST", + "uri": "/startRemoteMove" + } + } + }, + "com.amazonaws.transfer#StartRemoteMoveRequest": { + "type": "structure", + "members": { + "ConnectorId": { + "target": "com.amazonaws.transfer#ConnectorId", + "traits": { + "smithy.api#documentation": "The unique identifier for the connector.
", + "smithy.api#required": {} + } + }, + "SourcePath": { + "target": "com.amazonaws.transfer#FilePath", + "traits": { + "smithy.api#documentation": "The absolute path of the file or directory to move or rename. You can only specify one path per call to this operation.
", + "smithy.api#required": {} + } + }, + "TargetPath": { + "target": "com.amazonaws.transfer#FilePath", + "traits": { + "smithy.api#documentation": "The absolute path for the target of the move/rename operation.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.transfer#StartRemoteMoveResponse": { + "type": "structure", + "members": { + "MoveId": { + "target": "com.amazonaws.transfer#MoveId", + "traits": { + "smithy.api#documentation": "Returns a unique identifier for the move/rename operation.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.transfer#StartServer": { "type": "operation", "input": { @@ -8235,7 +8454,7 @@ } ], "traits": { - "smithy.api#documentation": "Changes the state of a file transfer protocol-enabled server from OFFLINE to\n ONLINE. It has no impact on a server that is already ONLINE. An\n ONLINE server can accept and process file transfer jobs.
The state of STARTING indicates that the server is in an intermediate state,\n either not fully able to respond, or not fully online. The values of START_FAILED\n can indicate an error condition.
No response is returned from this call.
" + "smithy.api#documentation": "Changes the state of a file transfer protocol-enabled server from OFFLINE to ONLINE. It has no impact on a server that is already ONLINE. An ONLINE server can accept and process file transfer jobs.
The state of STARTING indicates that the server is in an intermediate state, either not fully able to respond, or not fully online. The values of START_FAILED can indicate an error condition.
No response is returned from this call.
" } }, "com.amazonaws.transfer#StartServerRequest": { @@ -8294,7 +8513,7 @@ } }, "traits": { - "smithy.api#documentation": "Describes the condition of a file transfer protocol-enabled server with respect to its\n ability to perform file operations. There are six possible states: OFFLINE,\n ONLINE, STARTING, STOPPING,\n START_FAILED, and STOP_FAILED.
\n OFFLINE indicates that the server exists, but that it is not available for\n file operations. ONLINE indicates that the server is available to perform file\n operations. STARTING indicates that the server's was instantiated, but the\n server is not yet available to perform file operations. Under normal conditions, it can take a\n couple of minutes for the server to be completely operational. Both START_FAILED\n and STOP_FAILED are error conditions.
Describes the condition of a file transfer protocol-enabled server with respect to its ability to perform file operations. There are six possible states: OFFLINE, ONLINE, STARTING, STOPPING, START_FAILED, and STOP_FAILED.
OFFLINE indicates that the server exists, but that it is not available for file operations. ONLINE indicates that the server is available to perform file operations. STARTING indicates that the server's was instantiated, but the server is not yet available to perform file operations. Under normal conditions, it can take a couple of minutes for the server to be completely operational. Both START_FAILED and STOP_FAILED are error conditions.
Changes the state of a file transfer protocol-enabled server from ONLINE to\n OFFLINE. An OFFLINE server cannot accept and process file transfer\n jobs. Information tied to your server, such as server and user properties, are not affected by\n stopping your server.
Stopping the server does not reduce or impact your file transfer protocol endpoint\n billing; you must delete the server to stop being billed.
\nThe state of STOPPING indicates that the server is in an intermediate state,\n either not fully able to respond, or not fully offline. The values of STOP_FAILED\n can indicate an error condition.
No response is returned from this call.
" + "smithy.api#documentation": "Changes the state of a file transfer protocol-enabled server from ONLINE to OFFLINE. An OFFLINE server cannot accept and process file transfer jobs. Information tied to your server, such as server and user properties, are not affected by stopping your server.
Stopping the server does not reduce or impact your file transfer protocol endpoint billing; you must delete the server to stop being billed.
The state of STOPPING indicates that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of STOP_FAILED can indicate an error condition.
No response is returned from this call.
" } }, "com.amazonaws.transfer#StopServerRequest": { @@ -8399,7 +8618,7 @@ } }, "traits": { - "smithy.api#documentation": "Creates a key-value pair for a specific resource. Tags are metadata that you can use to\n search for and group a resource for various purposes. You can apply tags to servers, users,\n and roles. A tag key can take more than one value. For example, to group servers for\n accounting purposes, you might create a tag called Group and assign the values\n Research and Accounting to that group.
Creates a key-value pair for a specific resource. Tags are metadata that you can use to search for and group a resource for various purposes. You can apply tags to servers, users, and roles. A tag key can take more than one value. For example, to group servers for accounting purposes, you might create a tag called Group and assign the values Research and Accounting to that group.
Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN).\n Resources are users, servers, roles, and other entities.
\nThere is no response returned from this call.
" + "smithy.api#documentation": "Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.
There is no response returned from this call.
" } }, "com.amazonaws.transfer#TagResourceRequest": { @@ -8455,14 +8674,14 @@ "Arn": { "target": "com.amazonaws.transfer#Arn", "traits": { - "smithy.api#documentation": "An Amazon Resource Name (ARN) for a specific Amazon Web Services resource, such as a server, user, or\n role.
", + "smithy.api#documentation": "An Amazon Resource Name (ARN) for a specific Amazon Web Services resource, such as a server, user, or role.
", "smithy.api#required": {} } }, "Tags": { "target": "com.amazonaws.transfer#Tags", "traits": { - "smithy.api#documentation": "Key-value pairs assigned to ARNs that you can use to group and search for resources by\n type. You can attach this metadata to resources (servers, users, workflows, and so on) for any purpose.
", + "smithy.api#documentation": "Key-value pairs assigned to ARNs that you can use to group and search for resources by type. You can attach this metadata to resources (servers, users, workflows, and so on) for any purpose.
", "smithy.api#required": {} } } @@ -8489,12 +8708,12 @@ "SourceFileLocation": { "target": "com.amazonaws.transfer#SourceFileLocation", "traits": { - "smithy.api#documentation": "Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file\n for the workflow.
\nTo use the previous file as the input, enter ${previous.file}.\n In this case, this workflow step uses the output file from the previous workflow step as input.\n This is the default value.
To use the originally uploaded file location as input for this step, enter ${original.file}.
Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow.
To use the previous file as the input, enter ${previous.file}. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value.
To use the originally uploaded file location as input for this step, enter ${original.file}.
Each step type has its own StepDetails structure.
The key/value pairs used to tag a file during the execution of a workflow step.
" + "smithy.api#documentation": "Each step type has its own StepDetails structure.
The key/value pairs used to tag a file during the execution of a workflow step.
" } }, "com.amazonaws.transfer#TagValue": { @@ -8541,7 +8760,7 @@ } ], "traits": { - "smithy.api#documentation": "Tests whether your SFTP connector is set up successfully. We highly recommend that you call this\n operation to test your ability to transfer files between local Amazon Web Services storage and a trading partner's\n SFTP server.
" + "smithy.api#documentation": "Tests whether your SFTP connector is set up successfully. We highly recommend that you call this operation to test your ability to transfer files between local Amazon Web Services storage and a trading partner's SFTP server.
" } }, "com.amazonaws.transfer#TestConnectionRequest": { @@ -8577,7 +8796,13 @@ "StatusMessage": { "target": "com.amazonaws.transfer#Message", "traits": { - "smithy.api#documentation": "Returns Connection succeeded if the test is successful. Or, returns a descriptive error message \n if the test fails. The following list provides troubleshooting details, depending on the error message that you receive.
Verify that your secret name aligns with the one in\n Transfer Role permissions.
\nVerify the server URL in the connector\n configuration , and verify that the login credentials work successfully outside of the connector.
\nVerify that the secret exists and is formatted correctly.
\nVerify that the trusted host key in the connector\n configuration matches the ssh-keyscan output.
Returns Connection succeeded if the test is successful. Or, returns a descriptive error message if the test fails. The following list provides troubleshooting details, depending on the error message that you receive.
Verify that your secret name aligns with the one in Transfer Role permissions.
Verify the server URL in the connector configuration , and verify that the login credentials work successfully outside of the connector.
Verify that the secret exists and is formatted correctly.
Verify that the trusted host key in the connector configuration matches the ssh-keyscan output.
Structure that contains the SFTP connector host key.
" } } }, @@ -8608,7 +8833,7 @@ } ], "traits": { - "smithy.api#documentation": "If the IdentityProviderType of a file transfer protocol-enabled server is\n AWS_DIRECTORY_SERVICE or API_Gateway, tests whether your identity\n provider is set up successfully. We highly recommend that you call this operation to test your\n authentication method as soon as you create your server. By doing so, you can troubleshoot\n issues with the identity provider integration to ensure that your users can successfully use\n the service.
\n The ServerId and UserName parameters are required. The ServerProtocol, SourceIp, and UserPassword are all optional. \n
Note the following:
\n You cannot use TestIdentityProvider if the\n IdentityProviderType of your server is SERVICE_MANAGED.
\n TestIdentityProvider does not work with keys: it only accepts\n passwords.
\n TestIdentityProvider can test the password operation for a custom Identity Provider that handles keys and passwords.
\n If you provide any incorrect values for any parameters, the Response field is empty.\n
\n If you provide a server ID for a server that uses service-managed users, you get an error:\n
\n\n \n An error occurred (InvalidRequestException) when calling the TestIdentityProvider operation: s-server-ID not configured for external auth\n \n
\n If you enter a Server ID for the --server-id parameter that does not identify an actual Transfer server, you receive the following error:\n
\n An error occurred (ResourceNotFoundException) when calling the TestIdentityProvider operation: Unknown server.\n
It is possible your sever is in a different region. You can specify a region by adding the following: --region region-code,\n such as --region us-east-2 to specify a server in US East (Ohio).
If the IdentityProviderType of a file transfer protocol-enabled server is AWS_DIRECTORY_SERVICE or API_Gateway, tests whether your identity provider is set up successfully. We highly recommend that you call this operation to test your authentication method as soon as you create your server. By doing so, you can troubleshoot issues with the identity provider integration to ensure that your users can successfully use the service.
The ServerId and UserName parameters are required. The ServerProtocol, SourceIp, and UserPassword are all optional.
Note the following:
You cannot use TestIdentityProvider if the IdentityProviderType of your server is SERVICE_MANAGED.
TestIdentityProvider does not work with keys: it only accepts passwords.
TestIdentityProvider can test the password operation for a custom Identity Provider that handles keys and passwords.
If you provide any incorrect values for any parameters, the Response field is empty.
If you provide a server ID for a server that uses service-managed users, you get an error:
An error occurred (InvalidRequestException) when calling the TestIdentityProvider operation: s-server-ID not configured for external auth
If you enter a Server ID for the --server-id parameter that does not identify an actual Transfer server, you receive the following error:
An error occurred (ResourceNotFoundException) when calling the TestIdentityProvider operation: Unknown server.
It is possible your sever is in a different region. You can specify a region by adding the following: --region region-code, such as --region us-east-2 to specify a server in US East (Ohio).
A system-assigned identifier for a specific server. That server's user authentication\n method is tested with a user name and password.
", + "smithy.api#documentation": "A system-assigned identifier for a specific server. That server's user authentication method is tested with a user name and password.
", "smithy.api#required": {} } }, "ServerProtocol": { "target": "com.amazonaws.transfer#Protocol", "traits": { - "smithy.api#documentation": "The type of file transfer protocol to be tested.
\nThe available protocols are:
\nSecure Shell (SSH) File Transfer Protocol (SFTP)
\nFile Transfer Protocol Secure (FTPS)
\nFile Transfer Protocol (FTP)
\nApplicability Statement 2 (AS2)
\nThe type of file transfer protocol to be tested.
The available protocols are:
Secure Shell (SSH) File Transfer Protocol (SFTP)
File Transfer Protocol Secure (FTPS)
File Transfer Protocol (FTP)
Applicability Statement 2 (AS2)
A message that indicates whether the test was successful or not.
\nIf an empty string is returned, the most likely cause is that the authentication failed due to an incorrect username or password.
\nA message that indicates whether the test was successful or not.
If an empty string is returned, the most likely cause is that the authentication failed due to an incorrect username or password.
Transfer Family is a fully managed service that enables the transfer of files over the File\n Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File\n Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3) or Amazon EFS.\n Additionally, you can use Applicability Statement 2 (AS2) to transfer files into and out of Amazon S3.\n Amazon Web Services helps you seamlessly migrate your file transfer workflows to Transfer Family by integrating\n with existing authentication systems, and providing DNS routing with Amazon Route 53 so\n nothing changes for your customers and partners, or their applications. With your data in\n Amazon S3, you can use it with Amazon Web Services services for processing, analytics, machine learning, and\n archiving. Getting started with Transfer Family is easy since there is no infrastructure to buy and\n set up.
", + "smithy.api#documentation": "Transfer Family is a fully managed service that enables the transfer of files over the File Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3) or Amazon EFS. Additionally, you can use Applicability Statement 2 (AS2) to transfer files into and out of Amazon S3. Amazon Web Services helps you seamlessly migrate your file transfer workflows to Transfer Family by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53 so nothing changes for your customers and partners, or their applications. With your data in Amazon S3, you can use it with Amazon Web Services services for processing, analytics, machine learning, and archiving. Getting started with Transfer Family is easy since there is no infrastructure to buy and set up.
", "smithy.api#title": "AWS Transfer Family", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -9898,7 +10129,7 @@ } ], "traits": { - "smithy.api#documentation": "Detaches a key-value pair from a resource, as identified by its Amazon Resource Name\n (ARN). Resources are users, servers, roles, and other entities.
\nNo response is returned from this call.
" + "smithy.api#documentation": "Detaches a key-value pair from a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.
No response is returned from this call.
" } }, "com.amazonaws.transfer#UntagResourceRequest": { @@ -9907,14 +10138,14 @@ "Arn": { "target": "com.amazonaws.transfer#Arn", "traits": { - "smithy.api#documentation": "The value of the resource that will have the tag removed. An Amazon Resource Name (ARN) is\n an identifier for a specific Amazon Web Services resource, such as a server, user, or role.
", + "smithy.api#documentation": "The value of the resource that will have the tag removed. An Amazon Resource Name (ARN) is an identifier for a specific Amazon Web Services resource, such as a server, user, or role.
", "smithy.api#required": {} } }, "TagKeys": { "target": "com.amazonaws.transfer#TagKeys", "traits": { - "smithy.api#documentation": "TagKeys are key-value pairs assigned to ARNs that can be used to group and search for\n resources by type. This metadata can be attached to resources for any purpose.
", + "smithy.api#documentation": "TagKeys are key-value pairs assigned to ARNs that can be used to group and search for resources by type. This metadata can be attached to resources for any purpose.
", "smithy.api#required": {} } } @@ -9952,7 +10183,7 @@ } ], "traits": { - "smithy.api#documentation": "Allows you to update parameters for the access specified in the ServerID and\n ExternalID parameters.
Allows you to update parameters for the access specified in the ServerID and ExternalID parameters.
The landing directory (folder) for a user when they log in to the server using the client.
\nA HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
The landing directory (folder) for a user when they log in to the server using the client.
A HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server.\n If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer \n protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for \n how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings,\n using the HomeDirectoryMappings parameter. If, on the other hand,\n HomeDirectoryType is PATH, you provide an absolute path\n using the HomeDirectory parameter. You cannot have both\n HomeDirectory and HomeDirectoryMappings in your\n template.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server. If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings, using the HomeDirectoryMappings parameter. If, on the other hand, HomeDirectoryType is PATH, you provide an absolute path using the HomeDirectory parameter. You cannot have both HomeDirectory and HomeDirectoryMappings in your template.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should\n be visible to your user and how you want to make them visible. You must specify the\n Entry and Target pair, where Entry shows how the path\n is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you\n only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) \n role provides access to paths in Target. This value\n can be set only when HomeDirectoryType is set to\n LOGICAL.
The following is an Entry and Target pair example.
\n [ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]\n
In most cases, you can use this value instead of the session policy to lock down your\n user to the designated home directory (\"chroot\"). To do this, you can set\n Entry to / and set Target to the\n HomeDirectory parameter value.
The following is an Entry and Target pair example for chroot.
\n [ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]\n
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target. This value can be set only when HomeDirectoryType is set to LOGICAL.
The following is an Entry and Target pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.
The following is an Entry and Target pair example for chroot.
[ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
A session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's\n access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName},\n ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.
This policy applies only when the domain of ServerId is Amazon S3. Amazon EFS does not use session policies.
For session policies, Transfer Family stores the policy as a JSON blob, instead\n of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass\n it in the Policy argument.
For an example of a session policy, see Example\n session policy.
\nFor more information, see AssumeRole in the Amazon Web ServicesSecurity Token Service API\n Reference.
\nA session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.
This policy applies only when the domain of ServerId is Amazon S3. Amazon EFS does not use session policies.
For session policies, Transfer Family stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.
For an example of a session policy, see Example session policy.
For more information, see AssumeRole in the Amazon Web ServicesSecurity Token Service API Reference.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 \n bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users \n when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust \n relationship that allows the server to access your resources when servicing your users' transfer requests.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.
" } }, "ServerId": { @@ -10001,7 +10232,7 @@ "ExternalId": { "target": "com.amazonaws.transfer#ExternalId", "traits": { - "smithy.api#documentation": "A unique identifier that is required to identify specific groups within your directory.\n The users of the group that you associate have access to your Amazon S3 or Amazon EFS\n resources over the enabled protocols using Transfer Family. If you know the group name,\n you can view the SID values by running the following command using Windows PowerShell.
\n\n Get-ADGroup -Filter {samAccountName -like \"YourGroupName*\"} -Properties * | Select SamAccountName,ObjectSid\n
In that command, replace YourGroupName with the name of your Active Directory group.
\nThe regular expression used to validate this parameter is a string of characters consisting of uppercase and lowercase alphanumeric characters with no spaces.\n You can also include underscores or any of the following characters: =,.@:/-
", + "smithy.api#documentation": "A unique identifier that is required to identify specific groups within your directory. The users of the group that you associate have access to your Amazon S3 or Amazon EFS resources over the enabled protocols using Transfer Family. If you know the group name, you can view the SID values by running the following command using Windows PowerShell.
Get-ADGroup -Filter {samAccountName -like \"YourGroupName*\"} -Properties * | Select SamAccountName,ObjectSid
In that command, replace YourGroupName with the name of your Active Directory group.
The regular expression used to validate this parameter is a string of characters consisting of uppercase and lowercase alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@:/-
", "smithy.api#required": {} } } @@ -10023,7 +10254,7 @@ "ExternalId": { "target": "com.amazonaws.transfer#ExternalId", "traits": { - "smithy.api#documentation": "The external identifier of the group whose users have access to your Amazon S3 or Amazon\n EFS resources over the enabled protocols using Amazon Web ServicesTransfer Family.
", + "smithy.api#documentation": "The external identifier of the group whose users have access to your Amazon S3 or Amazon EFS resources over the enabled protocols using Amazon Web ServicesTransfer Family.
", "smithy.api#required": {} } } @@ -10068,7 +10299,7 @@ "iam:PassRole" ] }, - "smithy.api#documentation": "Updates some of the parameters for an existing agreement. Provide the\n AgreementId and the ServerId for the agreement that you want to\n update, along with the new values for the parameters to update.
Specify either\n BaseDirectory or CustomDirectories, but not both. Specifying both causes the command to fail.
If you update an agreement from using base directory to custom directories, the base directory is no longer used. Similarly, if you change from custom directories to a base directory, the custom directories are no longer used.
\nUpdates some of the parameters for an existing agreement. Provide the AgreementId and the ServerId for the agreement that you want to update, along with the new values for the parameters to update.
Specify either BaseDirectory or CustomDirectories, but not both. Specifying both causes the command to fail.
If you update an agreement from using base directory to custom directories, the base directory is no longer used. Similarly, if you change from custom directories to a base directory, the custom directories are no longer used.
You can update the status for the agreement, either activating an inactive agreement or\n the reverse.
" + "smithy.api#documentation": "You can update the status for the agreement, either activating an inactive agreement or the reverse.
" } }, "LocalProfileId": { "target": "com.amazonaws.transfer#ProfileId", "traits": { - "smithy.api#documentation": "A unique identifier for the AS2 local profile.
\nTo change the local profile identifier, provide a new value\n here.
" + "smithy.api#documentation": "A unique identifier for the AS2 local profile.
To change the local profile identifier, provide a new value here.
" } }, "PartnerProfileId": { "target": "com.amazonaws.transfer#ProfileId", "traits": { - "smithy.api#documentation": "A unique identifier for the partner profile.\n To change the partner profile identifier, provide a new value here.
" + "smithy.api#documentation": "A unique identifier for the partner profile. To change the partner profile identifier, provide a new value here.
" } }, "BaseDirectory": { "target": "com.amazonaws.transfer#HomeDirectory", "traits": { - "smithy.api#documentation": "To change the landing directory (folder) for files that are transferred, provide the\n bucket folder that you want to use; for example,\n /amzn-s3-demo-bucket/home/mydirectory\n .
To change the landing directory (folder) for files that are transferred, provide the bucket folder that you want to use; for example, /amzn-s3-demo-bucket/home/mydirectory .
Connectors are used to send files using either the AS2 or SFTP protocol. For the access role,\n provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.
\n\n For AS2 connectors\n
\nWith AS2, you can send files by calling StartFileTransfer and specifying the\n file paths in the request parameter, SendFilePaths. We use the file’s parent\n directory (for example, for --send-file-paths /bucket/dir/file.txt, parent\n directory is /bucket/dir/) to temporarily store a processed AS2 message file,\n store the MDN when we receive them from the partner, and write a final JSON file containing\n relevant metadata of the transmission. So, the AccessRole needs to provide read\n and write access to the parent directory of the file location used in the\n StartFileTransfer request. Additionally, you need to provide read and write\n access to the parent directory of the files that you intend to send with\n StartFileTransfer.
If you are using Basic authentication for your AS2 connector, the access role requires the\n secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using\n a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also\n needs the kms:Decrypt permission for that key.
\n For SFTP connectors\n
\nMake sure that the access role provides\n read and write access to the parent directory of the file location\n that's used in the StartFileTransfer request.\n Additionally, make sure that the role provides\n secretsmanager:GetSecretValue permission to Secrets Manager.
Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.
For AS2 connectors
With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.
If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.
For SFTP connectors
Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager.
\n Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload\n filename when saving it.\n
\n\n ENABLED: the filename provided by your trading parter is preserved when the file is saved.
\n DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as\n described in File names and locations.
Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it.
ENABLED: the filename provided by your trading parter is preserved when the file is saved.
DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations.
\n Determines whether or not unsigned messages from your trading partners will be accepted.\n
\n\n ENABLED: Transfer Family rejects unsigned messages from your trading partner.
\n DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.
Determines whether or not unsigned messages from your trading partners will be accepted.
ENABLED: Transfer Family rejects unsigned messages from your trading partner.
DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.
A CustomDirectoriesType structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files.
Failed files
\nMDN files
\nPayload files
\nStatus files
\nTemporary files
\nA CustomDirectoriesType structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files.
Failed files
MDN files
Payload files
Status files
Temporary files
An optional date that specifies when the certificate becomes active.
" + "smithy.api#documentation": "An optional date that specifies when the certificate becomes active. If you do not specify a value, ActiveDate takes the same value as NotBeforeDate, which is specified by the CA.
An optional date that specifies when the certificate becomes inactive.
" + "smithy.api#documentation": "An optional date that specifies when the certificate becomes inactive. If you do not specify a value, InactiveDate takes the same value as NotAfterDate, which is specified by the CA.
Updates some of the parameters for an existing connector. Provide the\n ConnectorId for the connector that you want to update, along with the new\n values for the parameters to update.
Updates some of the parameters for an existing connector. Provide the ConnectorId for the connector that you want to update, along with the new values for the parameters to update.
Connectors are used to send files using either the AS2 or SFTP protocol. For the access role,\n provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.
\n\n For AS2 connectors\n
\nWith AS2, you can send files by calling StartFileTransfer and specifying the\n file paths in the request parameter, SendFilePaths. We use the file’s parent\n directory (for example, for --send-file-paths /bucket/dir/file.txt, parent\n directory is /bucket/dir/) to temporarily store a processed AS2 message file,\n store the MDN when we receive them from the partner, and write a final JSON file containing\n relevant metadata of the transmission. So, the AccessRole needs to provide read\n and write access to the parent directory of the file location used in the\n StartFileTransfer request. Additionally, you need to provide read and write\n access to the parent directory of the files that you intend to send with\n StartFileTransfer.
If you are using Basic authentication for your AS2 connector, the access role requires the\n secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using\n a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also\n needs the kms:Decrypt permission for that key.
\n For SFTP connectors\n
\nMake sure that the access role provides\n read and write access to the parent directory of the file location\n that's used in the StartFileTransfer request.\n Additionally, make sure that the role provides\n secretsmanager:GetSecretValue permission to Secrets Manager.
Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.
For AS2 connectors
With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.
If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.
For SFTP connectors
Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn\n on CloudWatch logging for Amazon S3 events. When set, you can view connector\n activity in your CloudWatch logs.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn on CloudWatch logging for Amazon S3 events. When set, you can view connector activity in your CloudWatch logs.
" } }, "SftpConfig": { @@ -10376,7 +10607,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the description for the host key that's specified by the ServerId and\n HostKeyId parameters.
Updates the description for the host key that's specified by the ServerId and HostKeyId parameters.
Updates some of the parameters for an existing profile. Provide the ProfileId\n for the profile that you want to update, along with the new values for the parameters to\n update.
Updates some of the parameters for an existing profile. Provide the ProfileId for the profile that you want to update, along with the new values for the parameters to update.
Updates the file transfer protocol-enabled server's properties after that server has\n been created.
\nThe UpdateServer call returns the ServerId of the server you\n updated.
Updates the file transfer protocol-enabled server's properties after that server has been created.
The UpdateServer call returns the ServerId of the server you updated.
The Amazon Resource Name (ARN) of the Amazon Web ServicesCertificate Manager (ACM) certificate. Required\n when Protocols is set to FTPS.
To request a new public certificate, see Request a public certificate\n in the Amazon Web ServicesCertificate Manager User Guide.
\nTo import an existing certificate into ACM, see Importing certificates into ACM\n in the Amazon Web ServicesCertificate Manager User Guide.
\nTo request a private certificate to use FTPS through private IP addresses, see Request a\n private certificate in the Amazon Web ServicesCertificate Manager User\n Guide.
\nCertificates with the following cryptographic algorithms and key sizes are\n supported:
\n2048-bit RSA (RSA_2048)
\n4096-bit RSA (RSA_4096)
\nElliptic Prime Curve 256 bit (EC_prime256v1)
\nElliptic Prime Curve 384 bit (EC_secp384r1)
\nElliptic Prime Curve 521 bit (EC_secp521r1)
\nThe certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP\n address specified and information about the issuer.
\nThe Amazon Resource Name (ARN) of the Amazon Web ServicesCertificate Manager (ACM) certificate. Required when Protocols is set to FTPS.
To request a new public certificate, see Request a public certificate in the Amazon Web ServicesCertificate Manager User Guide.
To import an existing certificate into ACM, see Importing certificates into ACM in the Amazon Web ServicesCertificate Manager User Guide.
To request a private certificate to use FTPS through private IP addresses, see Request a private certificate in the Amazon Web ServicesCertificate Manager User Guide.
Certificates with the following cryptographic algorithms and key sizes are supported:
2048-bit RSA (RSA_2048)
4096-bit RSA (RSA_4096)
Elliptic Prime Curve 256 bit (EC_prime256v1)
Elliptic Prime Curve 384 bit (EC_secp384r1)
Elliptic Prime Curve 521 bit (EC_secp521r1)
The certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and information about the issuer.
The protocol settings that are configured for your server.
\n\n To indicate passive mode (for FTP and FTPS protocols), use the PassiveIp parameter.\n Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.\n
To ignore the error that is generated when the client attempts to use the SETSTAT command on a file that you are \n uploading to an Amazon S3 bucket, use the SetStatOption parameter. To have the Transfer Family server ignore the \n SETSTAT command and upload files without needing to make any changes to your SFTP client, set the value to \n ENABLE_NO_OP. If you set the SetStatOption parameter to ENABLE_NO_OP, Transfer Family \n generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a SETSTAT \n call.
To determine whether your Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the \n TlsSessionResumptionMode parameter.
\n As2Transports indicates the transport method for the AS2 messages. Currently, only HTTP is supported.
The protocol settings that are configured for your server.
To indicate passive mode (for FTP and FTPS protocols), use the PassiveIp parameter. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.
To ignore the error that is generated when the client attempts to use the SETSTAT command on a file that you are uploading to an Amazon S3 bucket, use the SetStatOption parameter. To have the Transfer Family server ignore the SETSTAT command and upload files without needing to make any changes to your SFTP client, set the value to ENABLE_NO_OP. If you set the SetStatOption parameter to ENABLE_NO_OP, Transfer Family generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a SETSTAT call.
To determine whether your Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the TlsSessionResumptionMode parameter.
As2Transports indicates the transport method for the AS2 messages. Currently, only HTTP is supported.
The virtual private cloud (VPC) endpoint settings that are configured for your server.\n When you host your endpoint within your VPC, you can make your endpoint accessible only to resources\n within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over\n the internet. Your VPC's default security groups are automatically assigned to your\n endpoint.
" + "smithy.api#documentation": "The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make your endpoint accessible only to resources within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint.
" } }, "EndpointType": { "target": "com.amazonaws.transfer#EndpointType", "traits": { - "smithy.api#documentation": "The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC)\n or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and \n resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.
\n After May 19, 2021, you won't be able to create a server using\n EndpointType=VPC_ENDPOINT in your Amazon Web Services account if your account hasn't already\n done so before May 19, 2021. If you have already created servers with\n EndpointType=VPC_ENDPOINT in your Amazon Web Services account on or before May 19, 2021,\n you will not be affected. After this date, use\n EndpointType=VPC.
For more information, see\n https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.
\nIt is recommended that you use VPC as the EndpointType. With\n this endpoint type, you have the option to directly associate up to three Elastic IPv4\n addresses (BYO IP included) with your server's endpoint and use VPC security groups to\n restrict traffic by the client's public IP address. This is not possible with\n EndpointType set to VPC_ENDPOINT.
The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.
After May 19, 2021, you won't be able to create a server using EndpointType=VPC_ENDPOINT in your Amazon Web Services account if your account hasn't already done so before May 19, 2021. If you have already created servers with EndpointType=VPC_ENDPOINT in your Amazon Web Services account on or before May 19, 2021, you will not be affected. After this date, use EndpointType=VPC.
For more information, see https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.
It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT.
The RSA, ECDSA, or ED25519 private key to use for your SFTP-enabled server. You can add multiple host keys, in case you want\n to rotate keys, or have a set of active keys that use different algorithms.
\nUse the following command to generate an RSA 2048 bit key with no passphrase:
\n\n ssh-keygen -t rsa -b 2048 -N \"\" -m PEM -f my-new-server-key.
Use a minimum value of 2048 for the -b option. You can create a stronger key by using 3072 or 4096.
Use the following command to generate an ECDSA 256 bit key with no passphrase:
\n\n ssh-keygen -t ecdsa -b 256 -N \"\" -m PEM -f my-new-server-key.
Valid values for the -b option for ECDSA are 256, 384, and 521.
Use the following command to generate an ED25519 key with no passphrase:
\n\n ssh-keygen -t ed25519 -N \"\" -f my-new-server-key.
For all of these commands, you can replace my-new-server-key with a string of your choice.
\nIf you aren't planning to migrate existing users from an existing SFTP-enabled\n server to a new server, don't update the host key. Accidentally changing a\n server's host key can be disruptive.
\nFor more information, see Manage host keys for your SFTP-enabled server in the Transfer Family User Guide.
" + "smithy.api#documentation": "The RSA, ECDSA, or ED25519 private key to use for your SFTP-enabled server. You can add multiple host keys, in case you want to rotate keys, or have a set of active keys that use different algorithms.
Use the following command to generate an RSA 2048 bit key with no passphrase:
ssh-keygen -t rsa -b 2048 -N \"\" -m PEM -f my-new-server-key.
Use a minimum value of 2048 for the -b option. You can create a stronger key by using 3072 or 4096.
Use the following command to generate an ECDSA 256 bit key with no passphrase:
ssh-keygen -t ecdsa -b 256 -N \"\" -m PEM -f my-new-server-key.
Valid values for the -b option for ECDSA are 256, 384, and 521.
Use the following command to generate an ED25519 key with no passphrase:
ssh-keygen -t ed25519 -N \"\" -f my-new-server-key.
For all of these commands, you can replace my-new-server-key with a string of your choice.
If you aren't planning to migrate existing users from an existing SFTP-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive.
For more information, see Manage host keys for your SFTP-enabled server in the Transfer Family User Guide.
" } }, "IdentityProviderDetails": { "target": "com.amazonaws.transfer#IdentityProviderDetails", "traits": { - "smithy.api#documentation": "An array containing all of the information required to call a customer's\n authentication API method.
" + "smithy.api#documentation": "An array containing all of the information required to call a customer's authentication API method.
" } }, "LoggingRole": { "target": "com.amazonaws.transfer#NullableRole", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn\n on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in\n your CloudWatch logs.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFS events. When set, you can view user activity in your CloudWatch logs.
" } }, "PostAuthenticationLoginBanner": { "target": "com.amazonaws.transfer#PostAuthenticationLoginBanner", "traits": { - "smithy.api#documentation": "Specifies a string to display when users connect to a server. This string is displayed after the user authenticates.
\nThe SFTP protocol does not support post-authentication display banners.
\nSpecifies a string to display when users connect to a server. This string is displayed after the user authenticates.
The SFTP protocol does not support post-authentication display banners.
Specifies a string to display when users connect to a server. This string is displayed before the user authenticates.\n For example, the following banner displays details about using the system:
\n\n This system is for the use of authorized users only. Individuals using this computer system without authority,\n or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by\n system personnel.\n
Specifies a string to display when users connect to a server. This string is displayed before the user authenticates. For example, the following banner displays details about using the system:
This system is for the use of authorized users only. Individuals using this computer system without authority, or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by system personnel.
Specifies the file transfer protocol or protocols over which your file transfer protocol\n client can connect to your server's endpoint. The available protocols are:
\n\n SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over\n SSH
\n FTPS (File Transfer Protocol Secure): File transfer with TLS\n encryption
\n FTP (File Transfer Protocol): Unencrypted file transfer
\n AS2 (Applicability Statement 2): used for transporting structured business-to-business data
If you select FTPS, you must choose a certificate stored in Certificate Manager (ACM) \n which is used to identify your server when clients connect to it over\n FTPS.
If Protocol includes either FTP or FTPS, then the\n EndpointType must be VPC and the\n IdentityProviderType must be either AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY.
If Protocol includes FTP, then\n AddressAllocationIds cannot be associated.
If Protocol is set only to SFTP, the EndpointType\n can be set to PUBLIC and the IdentityProviderType can be set any of the supported identity types: \n SERVICE_MANAGED, AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY.
If Protocol includes AS2, then the\n EndpointType must be VPC, and domain must be Amazon S3.
Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:
SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH
FTPS (File Transfer Protocol Secure): File transfer with TLS encryption
FTP (File Transfer Protocol): Unencrypted file transfer
AS2 (Applicability Statement 2): used for transporting structured business-to-business data
If you select FTPS, you must choose a certificate stored in Certificate Manager (ACM) which is used to identify your server when clients connect to it over FTPS.
If Protocol includes either FTP or FTPS, then the EndpointType must be VPC and the IdentityProviderType must be either AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY.
If Protocol includes FTP, then AddressAllocationIds cannot be associated.
If Protocol is set only to SFTP, the EndpointType can be set to PUBLIC and the IdentityProviderType can be set any of the supported identity types: SERVICE_MANAGED, AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY.
If Protocol includes AS2, then the EndpointType must be VPC, and domain must be Amazon S3.
A system-assigned unique identifier for a server instance that the Transfer Family user is\n assigned to.
", + "smithy.api#documentation": "A system-assigned unique identifier for a server instance that the Transfer Family user is assigned to.
", "smithy.api#required": {} } }, "WorkflowDetails": { "target": "com.amazonaws.transfer#WorkflowDetails", "traits": { - "smithy.api#documentation": "Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
\nIn addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a\n workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects\n while the file is still being uploaded.
To remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example.
\n aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'\n
Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects while the file is still being uploaded.
To remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example.
aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'
Specifies the log groups to which your server logs are sent.
\nTo specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:
\n\n arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*\n
For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*\n
If you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty\n value for this parameter in an update-server call. For example:
\n update-server --server-id s-1234567890abcdef0 --structured-log-destinations\n
Specifies the log groups to which your server logs are sent.
To specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:
arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*
For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*
If you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an update-server call. For example:
update-server --server-id s-1234567890abcdef0 --structured-log-destinations
Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.
\nBy default, home directory mappings have a TYPE of DIRECTORY. If you enable this option, you would then need to explicitly set the HomeDirectoryMapEntry\n Type to FILE if you want a mapping to have a file target.
Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.
By default, home directory mappings have a TYPE of DIRECTORY. If you enable this option, you would then need to explicitly set the HomeDirectoryMapEntry Type to FILE if you want a mapping to have a file target.
A system-assigned unique identifier for a server that the Transfer Family user is assigned\n to.
", + "smithy.api#documentation": "A system-assigned unique identifier for a server that the Transfer Family user is assigned to.
", "smithy.api#required": {} } } @@ -10718,7 +10949,7 @@ "transfer:UnTagResource" ] }, - "smithy.api#documentation": "Assigns new properties to a user. Parameters you pass modify any or all of the following:\n the home directory, role, and policy for the UserName and ServerId\n you specify.
The response returns the ServerId and the UserName for the\n updated user.
In the console, you can select Restricted when you create or update a\n user. This ensures that the user can't access anything outside of their home directory. The\n programmatic way to configure this behavior is to update the user. Set their\n HomeDirectoryType to LOGICAL, and specify\n HomeDirectoryMappings with Entry as root (/) and\n Target as their home directory.
For example, if the user's home directory is /test/admin-user, the following\n command updates the user so that their configuration in the console shows the\n Restricted flag as selected.
\n aws transfer update-user --server-id <server-id> --user-name admin-user --home-directory-type LOGICAL --home-directory-mappings \"[{\\\"Entry\\\":\\\"/\\\", \\\"Target\\\":\\\"/test/admin-user\\\"}]\"\n
Assigns new properties to a user. Parameters you pass modify any or all of the following: the home directory, role, and policy for the UserName and ServerId you specify.
The response returns the ServerId and the UserName for the updated user.
In the console, you can select Restricted when you create or update a user. This ensures that the user can't access anything outside of their home directory. The programmatic way to configure this behavior is to update the user. Set their HomeDirectoryType to LOGICAL, and specify HomeDirectoryMappings with Entry as root (/) and Target as their home directory.
For example, if the user's home directory is /test/admin-user, the following command updates the user so that their configuration in the console shows the Restricted flag as selected.
aws transfer update-user --server-id <server-id> --user-name admin-user --home-directory-type LOGICAL --home-directory-mappings \"[{\\\"Entry\\\":\\\"/\\\", \\\"Target\\\":\\\"/test/admin-user\\\"}]\"
The landing directory (folder) for a user when they log in to the server using the client.
\nA HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
The landing directory (folder) for a user when they log in to the server using the client.
A HomeDirectory example is /bucket_name/home/mydirectory.
The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server.\n If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer \n protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for \n how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings,\n using the HomeDirectoryMappings parameter. If, on the other hand,\n HomeDirectoryType is PATH, you provide an absolute path\n using the HomeDirectory parameter. You cannot have both\n HomeDirectory and HomeDirectoryMappings in your\n template.
The type of landing directory (folder) that you want your users' home directory to be when they log in to the server. If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 or Amazon EFS paths visible to your users.
If HomeDirectoryType is LOGICAL, you must provide mappings, using the HomeDirectoryMappings parameter. If, on the other hand, HomeDirectoryType is PATH, you provide an absolute path using the HomeDirectory parameter. You cannot have both HomeDirectory and HomeDirectoryMappings in your template.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should\n be visible to your user and how you want to make them visible. You must specify the\n Entry and Target pair, where Entry shows how the path\n is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you\n only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) \n role provides access to paths in Target. This value\n can be set only when HomeDirectoryType is set to\n LOGICAL.
The following is an Entry and Target pair example.
\n [ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]\n
In most cases, you can use this value instead of the session policy to lock down your\n user to the designated home directory (\"chroot\"). To do this, you can set\n Entry to '/' and set Target to the HomeDirectory\n parameter value.
The following is an Entry and Target pair example for chroot.
\n [ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]\n
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target. This value can be set only when HomeDirectoryType is set to LOGICAL.
The following is an Entry and Target pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.
The following is an Entry and Target pair example for chroot.
[ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
A session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's\n access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName},\n ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.
This policy applies only when the domain of ServerId is Amazon S3. Amazon EFS does not use session policies.
For session policies, Transfer Family stores the policy as a JSON blob, instead\n of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass\n it in the Policy argument.
For an example of a session policy, see Creating a session\n policy.
\nFor more information, see AssumeRole in the Amazon Web Services\n Security Token Service API Reference.
\nA session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.
This policy applies only when the domain of ServerId is Amazon S3. Amazon EFS does not use session policies.
For session policies, Transfer Family stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.
For an example of a session policy, see Creating a session policy.
For more information, see AssumeRole in the Amazon Web Services Security Token Service API Reference.
Specifies the full POSIX identity, including user ID (Uid), group ID\n (Gid), and any secondary groups IDs (SecondaryGids), that controls\n your users' access to your Amazon Elastic File Systems (Amazon EFS). The POSIX permissions\n that are set on files and directories in your file system determines the level of access your\n users get when transferring files into and out of your Amazon EFS file systems.
Specifies the full POSIX identity, including user ID (Uid), group ID (Gid), and any secondary groups IDs (SecondaryGids), that controls your users' access to your Amazon Elastic File Systems (Amazon EFS). The POSIX permissions that are set on files and directories in your file system determines the level of access your users get when transferring files into and out of your Amazon EFS file systems.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 \n bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users \n when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust \n relationship that allows the server to access your resources when servicing your users' transfer requests.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.
" } }, "ServerId": { "target": "com.amazonaws.transfer#ServerId", "traits": { - "smithy.api#documentation": "A system-assigned unique identifier for a Transfer Family server instance that the user is\n assigned to.
", + "smithy.api#documentation": "A system-assigned unique identifier for a Transfer Family server instance that the user is assigned to.
", "smithy.api#required": {} } }, "UserName": { "target": "com.amazonaws.transfer#UserName", "traits": { - "smithy.api#documentation": "A unique string that identifies a user and is associated with a server as specified by the\n ServerId. This user name must be a minimum of 3 and a maximum of 100 characters\n long. The following are valid characters: a-z, A-Z, 0-9, underscore '_', hyphen\n '-', period '.', and at sign '@'. The user name can't start\n with a hyphen, period, or at sign.
A unique string that identifies a user and is associated with a server as specified by the ServerId. This user name must be a minimum of 3 and a maximum of 100 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore '_', hyphen '-', period '.', and at sign '@'. The user name can't start with a hyphen, period, or at sign.
A system-assigned unique identifier for a Transfer Family server instance that the account is\n assigned to.
", + "smithy.api#documentation": "A system-assigned unique identifier for a Transfer Family server instance that the account is assigned to.
", "smithy.api#required": {} } }, "UserName": { "target": "com.amazonaws.transfer#UserName", "traits": { - "smithy.api#documentation": "The unique identifier for a user that is assigned to a server instance that was specified\n in the request.
", + "smithy.api#documentation": "The unique identifier for a user that is assigned to a server instance that was specified in the request.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "\n UpdateUserResponse returns the user name and identifier for the request to\n update a user's properties.
UpdateUserResponse returns the user name and identifier for the request to update a user's properties.
Specify icon file data string (in base64 encoding).
" + "smithy.api#documentation": "Specify an icon file data string (in base64 encoding).
" } } }, @@ -11180,6 +11411,23 @@ } } }, + "com.amazonaws.transfer#WebAppEndpointPolicy": { + "type": "enum", + "members": { + "FIPS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FIPS" + } + }, + "STANDARD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STANDARD" + } + } + } + }, "com.amazonaws.transfer#WebAppFaviconFile": { "type": "blob", "traits": { @@ -11284,7 +11532,7 @@ "Provisioned": { "target": "com.amazonaws.transfer#WebAppUnitCount", "traits": { - "smithy.api#documentation": "An integer that represents the number of units for your desired number of concurrent connections, or the number of user sessions on your web app at the same time.
\nEach increment allows an additional 250 concurrent sessions: a value of 1 sets the number of concurrent sessions to 250; 2 sets a value of 500, and so on.
An integer that represents the number of units for your desired number of concurrent connections, or the number of user sessions on your web app at the same time.
Each increment allows an additional 250 concurrent sessions: a value of 1 sets the number of concurrent sessions to 250; 2 sets a value of 500, and so on.
Includes the necessary permissions for S3, EFS, and Lambda operations that Transfer can\n assume, so that all workflow steps can operate on the required resources
", + "smithy.api#documentation": "Includes the necessary permissions for S3, EFS, and Lambda operations that Transfer can assume, so that all workflow steps can operate on the required resources
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
\nIn addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a\n workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects\n while the file is still being uploaded.
Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects while the file is still being uploaded.
A trigger that starts a workflow: the workflow begins to execute after a file is uploaded.
\nTo remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example.
\n aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'\n
\n OnUpload can contain a maximum of one WorkflowDetail object.
A trigger that starts a workflow: the workflow begins to execute after a file is uploaded.
To remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example.
aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'
OnUpload can contain a maximum of one WorkflowDetail object.
A trigger that starts a workflow if a file is only partially uploaded. You can attach a workflow to a server\n that executes whenever there is a partial upload.
\nA partial upload occurs when a file is open when the session disconnects.
\n\n OnPartialUpload can contain a maximum of one WorkflowDetail object.
A trigger that starts a workflow if a file is only partially uploaded. You can attach a workflow to a server that executes whenever there is a partial upload.
A partial upload occurs when a file is open when the session disconnects.
OnPartialUpload can contain a maximum of one WorkflowDetail object.
Container for the WorkflowDetail data type.\n It is used by actions that trigger a workflow to begin execution.
Container for the WorkflowDetail data type. It is used by actions that trigger a workflow to begin execution.
\n Currently, the following step types are supported.\n
\n\n \n COPY\n - Copy the file to another location.
\n \n CUSTOM\n - Perform a custom step with an Lambda function target.
\n \n DECRYPT\n - Decrypt a file that was encrypted before it was uploaded.
\n \n DELETE\n - Delete the file.
\n \n TAG\n - Add a tag to the file.
Currently, the following step types are supported.
COPY - Copy the file to another location.
CUSTOM - Perform a custom step with an Lambda function target.
DECRYPT - Decrypt a file that was encrypted before it was uploaded.
DELETE - Delete the file.
TAG - Add a tag to the file.
Details for a step that performs a file copy.
\n\n Consists of the following values:\n
\nA description
\nAn Amazon S3 location for the destination of the file copy.
\nA flag that indicates whether to overwrite an existing file of the same name. The default is\n FALSE.
Details for a step that performs a file copy.
Consists of the following values:
A description
An Amazon S3 location for the destination of the file copy.
A flag that indicates whether to overwrite an existing file of the same name. The default is FALSE.
Details for a step that invokes an Lambda function.
\nConsists of the Lambda function's name, target, and timeout (in seconds).
" + "smithy.api#documentation": "Details for a step that invokes an Lambda function.
Consists of the Lambda function's name, target, and timeout (in seconds).
" } }, "DeleteStepDetails": { @@ -11418,13 +11666,13 @@ "TagStepDetails": { "target": "com.amazonaws.transfer#TagStepDetails", "traits": { - "smithy.api#documentation": "Details for a step that creates one or more tags.
\nYou specify one or more tags. Each tag contains a key-value pair.
" + "smithy.api#documentation": "Details for a step that creates one or more tags.
You specify one or more tags. Each tag contains a key-value pair.
" } }, "DecryptStepDetails": { "target": "com.amazonaws.transfer#DecryptStepDetails", "traits": { - "smithy.api#documentation": "Details for a step that decrypts an encrypted file.
\nConsists of the following values:
\nA descriptive name
\nAn Amazon S3 or Amazon Elastic File System (Amazon EFS) location for the source file to\n decrypt.
\nAn S3 or Amazon EFS location for the destination of the file decryption.
\nA flag that indicates whether to overwrite an existing file of the same name. The default is\n FALSE.
The type of encryption that's used. Currently, only PGP encryption is supported.
\nDetails for a step that decrypts an encrypted file.
Consists of the following values:
A descriptive name
An Amazon S3 or Amazon Elastic File System (Amazon EFS) location for the source file to decrypt.
An S3 or Amazon EFS location for the destination of the file decryption.
A flag that indicates whether to overwrite an existing file of the same name. The default is FALSE.
The type of encryption that's used. Currently, only PGP encryption is supported.
Descriptive text that you can provide to help with identification \n of the current policy store.
" } + }, + "deletionProtection": { + "target": "com.amazonaws.verifiedpermissions#DeletionProtection", + "traits": { + "smithy.api#documentation": "Specifies whether the policy store can be deleted. If enabled, the policy store can't be deleted.
\nThe default state is DISABLED.
Descriptive text that you can provide to help with identification \n of the current policy store.
" } + }, + "deletionProtection": { + "target": "com.amazonaws.verifiedpermissions#DeletionProtection", + "traits": { + "smithy.api#documentation": "Specifies whether the policy store can be deleted. If enabled, the policy store can't be deleted.
\nThe default state is DISABLED.
The policy store can't be deleted because deletion protection is enabled. To delete this policy store, disable deletion protection.
", + "smithy.api#error": "client", + "smithy.api#httpError": 406 + } + }, "com.amazonaws.verifiedpermissions#IpAddr": { "type": "string", "traits": { @@ -5959,6 +6009,12 @@ "smithy.api#required": {} } }, + "deletionProtection": { + "target": "com.amazonaws.verifiedpermissions#DeletionProtection", + "traits": { + "smithy.api#documentation": "Specifies whether the policy store can be deleted. If enabled, the policy store can't be deleted.
\nWhen you call UpdatePolicyStore, this parameter is unchanged unless explicitly included in the call.
This is the latest version of the WAF API,\n released in November, 2019. The names of the entities that you use to access this API,\n like endpoints and namespaces, all have the versioning information added, like \"V2\" or\n \"v2\", to distinguish from the prior version. We recommend migrating your resources to\n this version, because it has a number of significant improvements.
\nIf you used WAF prior to this release, you can't use this WAFV2 API to access any\n WAF resources that you created before. WAF Classic support will end on September 30, 2025.
\nFor information about WAF, including how to migrate your WAF Classic resources to this version,\n see the WAF Developer Guide.
\nWAF is a web application firewall that lets you monitor the HTTP and HTTPS\n requests that are forwarded to a protected resource. Protected resource types include Amazon CloudFront distribution, Amazon API Gateway REST API, Application Load Balancer, AppSync\n GraphQL API, Amazon Cognito user pool, App Runner service, and Amazon Web Services Verified Access instance. WAF also lets you control access to your content,\n to protect the Amazon Web Services resource that WAF is monitoring. Based on conditions that\n you specify, such as the IP addresses that requests originate from or the values of query\n strings, the protected resource responds to requests with either the requested content, an HTTP 403 status code\n (Forbidden), or with a custom response.
\nThis API guide is for developers who need detailed information about WAF API actions,\n data types, and errors. For detailed information about WAF features and guidance for configuring and using \n WAF, see the WAF Developer\n Guide.
\nYou can make calls using the endpoints listed in WAF endpoints and quotas.
\nFor regional resources, you can use any of the endpoints in the list.\n A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance.
\nFor Amazon CloudFront, you must use the API endpoint listed for\n US East (N. Virginia): us-east-1.
\nAlternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the\n programming language or platform that you're using. For more information, see Amazon Web Services SDKs.
", + "smithy.api#documentation": "This is the latest version of the WAF API,\n released in November, 2019. The names of the entities that you use to access this API,\n like endpoints and namespaces, all have the versioning information added, like \"V2\" or\n \"v2\", to distinguish from the prior version. We recommend migrating your resources to\n this version, because it has a number of significant improvements.
\nIf you used WAF prior to this release, you can't use this WAFV2 API to access any\n WAF resources that you created before. WAF Classic support will end on September 30, 2025.
\nFor information about WAF, including how to migrate your WAF Classic resources to this version,\n see the WAF Developer Guide.
\nWAF is a web application firewall that lets you monitor the HTTP and HTTPS\n requests that are forwarded to a protected resource. Protected resource types include Amazon CloudFront distribution, Amazon API Gateway REST API, Application Load Balancer, AppSync\n GraphQL API, Amazon Cognito user pool, App Runner service, Amplify application, and Amazon Web Services Verified Access instance. WAF also lets you control access to your content,\n to protect the Amazon Web Services resource that WAF is monitoring. Based on conditions that\n you specify, such as the IP addresses that requests originate from or the values of query\n strings, the protected resource responds to requests with either the requested content, an HTTP 403 status code\n (Forbidden), or with a custom response.
\nThis API guide is for developers who need detailed information about WAF API actions,\n data types, and errors. For detailed information about WAF features and guidance for configuring and using \n WAF, see the WAF Developer\n Guide.
\nYou can make calls using the endpoints listed in WAF endpoints and quotas.
\nFor regional resources, you can use any of the endpoints in the list.\n A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance.
\nFor Amazon CloudFront and Amplify, you must use the API endpoint listed for\n US East (N. Virginia): us-east-1.
\nAlternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the\n programming language or platform that you're using. For more information, see Amazon Web Services SDKs.
", "smithy.api#title": "AWS WAFV2", "smithy.api#xmlNamespace": { "uri": "http://waf.amazonaws.com/doc/2019-07-29/" @@ -1773,7 +1773,7 @@ "ResourceArn": { "target": "com.amazonaws.wafv2#ResourceArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource to associate with the web ACL.
\nThe ARN must be in one of the following formats:
\nFor an Application Load Balancer: arn:partition:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id\n \n
For an Amazon API Gateway REST API: arn:partition:apigateway:region::/restapis/api-id/stages/stage-name\n \n
For an AppSync GraphQL API: arn:partition:appsync:region:account-id:apis/GraphQLApiId\n \n
For an Amazon Cognito user pool: arn:partition:cognito-idp:region:account-id:userpool/user-pool-id\n \n
For an App Runner service: arn:partition:apprunner:region:account-id:service/apprunner-service-name/apprunner-service-id\n \n
For an Amazon Web Services Verified Access instance: arn:partition:ec2:region:account-id:verified-access-instance/instance-id\n \n
The Amazon Resource Name (ARN) of the resource to associate with the web ACL.
\nThe ARN must be in one of the following formats:
\nFor an Application Load Balancer: arn:partition:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id\n \n
For an Amazon API Gateway REST API: arn:partition:apigateway:region::/restapis/api-id/stages/stage-name\n \n
For an AppSync GraphQL API: arn:partition:appsync:region:account-id:apis/GraphQLApiId\n \n
For an Amazon Cognito user pool: arn:partition:cognito-idp:region:account-id:userpool/user-pool-id\n \n
For an App Runner service: arn:partition:apprunner:region:account-id:service/apprunner-service-name/apprunner-service-id\n \n
For an Amazon Web Services Verified Access instance: arn:partition:ec2:region:account-id:verified-access-instance/instance-id\n \n
For an Amplify application: arn:partition:amplify:region:account-id:apps/app-id\n \n
What WAF should do if the body is larger than WAF can inspect.
\nWAF does not support inspecting the entire contents of the web request body if the body \n exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service \n only forwards the contents that are within the limit to WAF for inspection.
\nFor Application Load Balancer and AppSync, the limit is fixed at 8 KB (8,192 bytes).
\nFor CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and \n you can increase the limit for each resource type in the web ACL AssociationConfig, for additional processing fees.
The options for oversize handling are the following:
\n\n CONTINUE - Inspect the available body contents normally, according to the rule inspection criteria.
\n MATCH - Treat the web request as matching the rule statement. WAF\n applies the rule action to the request.
\n NO_MATCH - Treat the web request as not matching the rule\n statement.
You can combine the MATCH or NO_MATCH\n settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.
Default: CONTINUE\n
What WAF should do if the body is larger than WAF can inspect.
\nWAF does not support inspecting the entire contents of the web request body if the body \n exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service \n only forwards the contents that are within the limit to WAF for inspection.
\nFor Application Load Balancer and AppSync, the limit is fixed at 8 KB (8,192 bytes).
\nFor CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and \n you can increase the limit for each resource type in the web ACL AssociationConfig, for additional processing fees.
For Amplify, use the CloudFront limit.
\nThe options for oversize handling are the following:
\n\n CONTINUE - Inspect the available body contents normally, according to the rule inspection criteria.
\n MATCH - Treat the web request as matching the rule statement. WAF\n applies the rule action to the request.
\n NO_MATCH - Treat the web request as not matching the rule\n statement.
You can combine the MATCH or NO_MATCH\n settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.
Default: CONTINUE\n
Specifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nCreates a WebACL per the specifications provided.
\nA web ACL defines a collection of rules to use to inspect and control web requests. Each rule has a statement that defines what to look for in web requests and an action that WAF applies to requests that match the statement. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resource types include Amazon CloudFront distribution, Amazon API Gateway REST API, Application Load Balancer, AppSync GraphQL API, Amazon Cognito user pool, App Runner service, and Amazon Web Services Verified Access instance.
" + "smithy.api#documentation": "Creates a WebACL per the specifications provided.
\nA web ACL defines a collection of rules to use to inspect and control web requests. Each rule has a statement that defines what to look for in web requests and an action that WAF applies to requests that match the statement. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resource types include Amazon CloudFront distribution, Amazon API Gateway REST API, Application Load Balancer, AppSync GraphQL API, Amazon Cognito user pool, App Runner service, Amplify application, and Amazon Web Services Verified Access instance.
" } }, "com.amazonaws.wafv2#CreateWebACLRequest": { @@ -4251,7 +4251,7 @@ "Scope": { "target": "com.amazonaws.wafv2#Scope", "traits": { - "smithy.api#documentation": "Specifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nThe Amazon Resource Name (ARN) of the resource to disassociate from the web ACL.
\nThe ARN must be in one of the following formats:
\nFor an Application Load Balancer: arn:partition:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id\n \n
For an Amazon API Gateway REST API: arn:partition:apigateway:region::/restapis/api-id/stages/stage-name\n \n
For an AppSync GraphQL API: arn:partition:appsync:region:account-id:apis/GraphQLApiId\n \n
For an Amazon Cognito user pool: arn:partition:cognito-idp:region:account-id:userpool/user-pool-id\n \n
For an App Runner service: arn:partition:apprunner:region:account-id:service/apprunner-service-name/apprunner-service-id\n \n
For an Amazon Web Services Verified Access instance: arn:partition:ec2:region:account-id:verified-access-instance/instance-id\n \n
The Amazon Resource Name (ARN) of the resource to disassociate from the web ACL.
\nThe ARN must be in one of the following formats:
\nFor an Application Load Balancer: arn:partition:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id\n \n
For an Amazon API Gateway REST API: arn:partition:apigateway:region::/restapis/api-id/stages/stage-name\n \n
For an AppSync GraphQL API: arn:partition:appsync:region:account-id:apis/GraphQLApiId\n \n
For an Amazon Cognito user pool: arn:partition:cognito-idp:region:account-id:userpool/user-pool-id\n \n
For an App Runner service: arn:partition:apprunner:region:account-id:service/apprunner-service-name/apprunner-service-id\n \n
For an Amazon Web Services Verified Access instance: arn:partition:ec2:region:account-id:verified-access-instance/instance-id\n \n
For an Amplify application: arn:partition:amplify:region:account-id:apps/app-id\n \n
Inspect the request body as plain text. The request body immediately follows the request\n headers. This is the part of a request that contains any additional data that you want to\n send to your web server as the HTTP request body, such as data from a form.
\nWAF does not support inspecting the entire contents of the web request body if the body \n exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service \n only forwards the contents that are within the limit to WAF for inspection.
\nFor Application Load Balancer and AppSync, the limit is fixed at 8 KB (8,192 bytes).
\nFor CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and \n you can increase the limit for each resource type in the web ACL AssociationConfig, for additional processing fees.
For information about how to handle oversized\n request bodies, see the Body object configuration.
Inspect the request body as plain text. The request body immediately follows the request\n headers. This is the part of a request that contains any additional data that you want to\n send to your web server as the HTTP request body, such as data from a form.
\nWAF does not support inspecting the entire contents of the web request body if the body \n exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service \n only forwards the contents that are within the limit to WAF for inspection.
\nFor Application Load Balancer and AppSync, the limit is fixed at 8 KB (8,192 bytes).
\nFor CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and \n you can increase the limit for each resource type in the web ACL AssociationConfig, for additional processing fees.
For Amplify, use the CloudFront limit.
\nFor information about how to handle oversized\n request bodies, see the Body object configuration.
Inspect the request body as JSON. The request body immediately follows the request\n headers. This is the part of a request that contains any additional data that you want to\n send to your web server as the HTTP request body, such as data from a form.
\nWAF does not support inspecting the entire contents of the web request body if the body \n exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service \n only forwards the contents that are within the limit to WAF for inspection.
\nFor Application Load Balancer and AppSync, the limit is fixed at 8 KB (8,192 bytes).
\nFor CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and \n you can increase the limit for each resource type in the web ACL AssociationConfig, for additional processing fees.
For information about how to handle oversized\n request bodies, see the JsonBody object configuration.
Inspect the request body as JSON. The request body immediately follows the request\n headers. This is the part of a request that contains any additional data that you want to\n send to your web server as the HTTP request body, such as data from a form.
\nWAF does not support inspecting the entire contents of the web request body if the body \n exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service \n only forwards the contents that are within the limit to WAF for inspection.
\nFor Application Load Balancer and AppSync, the limit is fixed at 8 KB (8,192 bytes).
\nFor CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and \n you can increase the limit for each resource type in the web ACL AssociationConfig, for additional processing fees.
For Amplify, use the CloudFront limit.
\nFor information about how to handle oversized\n request bodies, see the JsonBody object configuration.
Specifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nThe Amazon Resource Name (ARN) of the resource whose web ACL you want to retrieve.
\nThe ARN must be in one of the following formats:
\nFor an Application Load Balancer: arn:partition:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id\n \n
For an Amazon API Gateway REST API: arn:partition:apigateway:region::/restapis/api-id/stages/stage-name\n \n
For an AppSync GraphQL API: arn:partition:appsync:region:account-id:apis/GraphQLApiId\n \n
For an Amazon Cognito user pool: arn:partition:cognito-idp:region:account-id:userpool/user-pool-id\n \n
For an App Runner service: arn:partition:apprunner:region:account-id:service/apprunner-service-name/apprunner-service-id\n \n
For an Amazon Web Services Verified Access instance: arn:partition:ec2:region:account-id:verified-access-instance/instance-id\n \n
The Amazon Resource Name (ARN) of the resource whose web ACL you want to retrieve.
\nThe ARN must be in one of the following formats:
\nFor an Application Load Balancer: arn:partition:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id\n \n
For an Amazon API Gateway REST API: arn:partition:apigateway:region::/restapis/api-id/stages/stage-name\n \n
For an AppSync GraphQL API: arn:partition:appsync:region:account-id:apis/GraphQLApiId\n \n
For an Amazon Cognito user pool: arn:partition:cognito-idp:region:account-id:userpool/user-pool-id\n \n
For an App Runner service: arn:partition:apprunner:region:account-id:service/apprunner-service-name/apprunner-service-id\n \n
For an Amazon Web Services Verified Access instance: arn:partition:ec2:region:account-id:verified-access-instance/instance-id\n \n
For an Amplify application: arn:partition:amplify:region:account-id:apps/app-id\n \n
The name of the web ACL. You cannot change the name of a web ACL after you create it.
", - "smithy.api#required": {} + "smithy.api#documentation": "The name of the web ACL. You cannot change the name of a web ACL after you create it.
" } }, "Scope": { "target": "com.amazonaws.wafv2#Scope", "traits": { - "smithy.api#documentation": "Specifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nThe unique identifier for the web ACL. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.
", - "smithy.api#required": {} + "smithy.api#documentation": "The unique identifier for the web ACL. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.
" + } + }, + "ARN": { + "target": "com.amazonaws.wafv2#ResourceArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the web ACL that you want to retrieve.
" } } }, @@ -7350,7 +7353,7 @@ "OversizeHandling": { "target": "com.amazonaws.wafv2#OversizeHandling", "traits": { - "smithy.api#documentation": "What WAF should do if the body is larger than WAF can inspect.
\nWAF does not support inspecting the entire contents of the web request body if the body \n exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service \n only forwards the contents that are within the limit to WAF for inspection.
\nFor Application Load Balancer and AppSync, the limit is fixed at 8 KB (8,192 bytes).
\nFor CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and \n you can increase the limit for each resource type in the web ACL AssociationConfig, for additional processing fees.
The options for oversize handling are the following:
\n\n CONTINUE - Inspect the available body contents normally, according to the rule inspection criteria.
\n MATCH - Treat the web request as matching the rule statement. WAF\n applies the rule action to the request.
\n NO_MATCH - Treat the web request as not matching the rule\n statement.
You can combine the MATCH or NO_MATCH\n settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.
Default: CONTINUE\n
What WAF should do if the body is larger than WAF can inspect.
\nWAF does not support inspecting the entire contents of the web request body if the body \n exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service \n only forwards the contents that are within the limit to WAF for inspection.
\nFor Application Load Balancer and AppSync, the limit is fixed at 8 KB (8,192 bytes).
\nFor CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and \n you can increase the limit for each resource type in the web ACL AssociationConfig, for additional processing fees.
For Amplify, use the CloudFront limit.
\nThe options for oversize handling are the following:
\n\n CONTINUE - Inspect the available body contents normally, according to the rule inspection criteria.
\n MATCH - Treat the web request as matching the rule statement. WAF\n applies the rule action to the request.
\n NO_MATCH - Treat the web request as not matching the rule\n statement.
You can combine the MATCH or NO_MATCH\n settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.
Default: CONTINUE\n
Specifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nUpdates the specified WebACL. While updating a web ACL, WAF provides\n continuous coverage to the resources that you have associated with the web ACL.
\nThis operation completely replaces the mutable specifications that you already have for the web ACL with the ones that you provide to this call.
\nTo modify a web ACL, do the following:
\nRetrieve it by calling GetWebACL\n
\nUpdate its settings as needed
\nProvide the complete web ACL specification to this call
\nA web ACL defines a collection of rules to use to inspect and control web requests. Each rule has a statement that defines what to look for in web requests and an action that WAF applies to requests that match the statement. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resource types include Amazon CloudFront distribution, Amazon API Gateway REST API, Application Load Balancer, AppSync GraphQL API, Amazon Cognito user pool, App Runner service, and Amazon Web Services Verified Access instance.
\n\n Temporary inconsistencies during updates\n
\nWhen you create or change a web ACL or other WAF resources, the changes take a small amount of time to propagate to all areas where the resources are stored. The propagation time can be from a few seconds to a number of minutes.
\nThe following are examples of the temporary inconsistencies that you might notice during change propagation:
\nAfter you create a web ACL, if you try to associate it with a resource, you might get an exception indicating that the web ACL is unavailable.
\nAfter you add a rule group to a web ACL, the new rule group rules might be in effect in one area where the web ACL is used and not in another.
\nAfter you change a rule action setting, you might see the old action in some places and the new action in others.
\nAfter you add an IP address to an IP set that is in use in a blocking rule, the new address might be blocked in one area while still allowed in another.
\nUpdates the specified WebACL. While updating a web ACL, WAF provides\n continuous coverage to the resources that you have associated with the web ACL.
\nThis operation completely replaces the mutable specifications that you already have for the web ACL with the ones that you provide to this call.
\nTo modify a web ACL, do the following:
\nRetrieve it by calling GetWebACL\n
\nUpdate its settings as needed
\nProvide the complete web ACL specification to this call
\nA web ACL defines a collection of rules to use to inspect and control web requests. Each rule has a statement that defines what to look for in web requests and an action that WAF applies to requests that match the statement. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resource types include Amazon CloudFront distribution, Amazon API Gateway REST API, Application Load Balancer, AppSync GraphQL API, Amazon Cognito user pool, App Runner service, Amplify application, and Amazon Web Services Verified Access instance.
\n\n Temporary inconsistencies during updates\n
\nWhen you create or change a web ACL or other WAF resources, the changes take a small amount of time to propagate to all areas where the resources are stored. The propagation time can be from a few seconds to a number of minutes.
\nThe following are examples of the temporary inconsistencies that you might notice during change propagation:
\nAfter you create a web ACL, if you try to associate it with a resource, you might get an exception indicating that the web ACL is unavailable.
\nAfter you add a rule group to a web ACL, the new rule group rules might be in effect in one area where the web ACL is used and not in another.
\nAfter you change a rule action setting, you might see the old action in some places and the new action in others.
\nAfter you add an IP address to an IP set that is in use in a blocking rule, the new address might be blocked in one area while still allowed in another.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution.
\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nSpecifies whether this is for a global resource type, such as a Amazon CloudFront distribution. For an Amplify application, use CLOUDFRONT.
To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:
\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.
API and SDKs - For all calls, use the Region endpoint us-east-1.
\nA web ACL defines a collection of rules to use to inspect and control web requests. Each rule has a statement that defines what to look for in web requests and an action that WAF applies to requests that match the statement. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resource types include Amazon CloudFront distribution, Amazon API Gateway REST API, Application Load Balancer, AppSync GraphQL API, Amazon Cognito user pool, App Runner service, and Amazon Web Services Verified Access instance.
" + "smithy.api#documentation": "A web ACL defines a collection of rules to use to inspect and control web requests. Each rule has a statement that defines what to look for in web requests and an action that WAF applies to requests that match the statement. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resource types include Amazon CloudFront distribution, Amazon API Gateway REST API, Application Load Balancer, AppSync GraphQL API, Amazon Cognito user pool, App Runner service, Amplify application, and Amazon Web Services Verified Access instance.
" } }, "com.amazonaws.wafv2#WebACLSummaries": { diff --git a/codegen/sdk/aws-models/workspaces-thin-client.json b/codegen/sdk/aws-models/workspaces-thin-client.json index ab9019f793a..9fad71e496a 100644 --- a/codegen/sdk/aws-models/workspaces-thin-client.json +++ b/codegen/sdk/aws-models/workspaces-thin-client.json @@ -25,7 +25,8 @@ "com.amazonaws.workspacesthinclient#ActivationCode": { "type": "string", "traits": { - "smithy.api#pattern": "^[a-z]{2}[a-z0-9]{6}$" + "smithy.api#pattern": "^[a-z]{2}[a-z0-9]{6}$", + "smithy.api#sensitive": {} } }, "com.amazonaws.workspacesthinclient#ApplyTimeOf": { @@ -663,6 +664,10 @@ "tags": { "target": "com.amazonaws.workspacesthinclient#TagsMap", "traits": { + "smithy.api#deprecated": { + "message": "This field will be removed in future releases. Use ListTagsForResource API instead.", + "since": "2025-03-25" + }, "smithy.api#documentation": "The tag keys and optional values for the resource.
" } } @@ -993,6 +998,10 @@ "tags": { "target": "com.amazonaws.workspacesthinclient#TagsMap", "traits": { + "smithy.api#deprecated": { + "message": "This field will be removed in future releases. Use ListTagsForResource API instead.", + "since": "2025-03-25" + }, "smithy.api#documentation": "The tag keys and optional values for the resource.
" } }, @@ -1957,6 +1966,10 @@ "tags": { "target": "com.amazonaws.workspacesthinclient#TagsMap", "traits": { + "smithy.api#deprecated": { + "message": "This field will be removed in future releases. Use ListTagsForResource API instead.", + "since": "2025-03-25" + }, "smithy.api#documentation": "The tag keys and optional values for the resource.
" } } diff --git a/gradle.properties b/gradle.properties index fc824e58f11..80438fe89cd 100644 --- a/gradle.properties +++ b/gradle.properties @@ -6,7 +6,7 @@ kotlin.native.ignoreDisabledTargets=true org.gradle.jvmargs=-Xmx6g -XX:MaxMetaspaceSize=2G # sdk -sdkVersion=1.4.46-SNAPSHOT +sdkVersion=1.4.64-SNAPSHOT # dokka config (values specified at build-time as needed) smithyKotlinDocBaseUrl=https://sdk.amazonaws.com/kotlin/api/smithy-kotlin/api/$smithyKotlinRuntimeVersion/ diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index a99edd1327a..1b4a0466884 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -4,7 +4,7 @@ ksp-version = "2.1.0-1.0.29" # Keep in sync with kotlin-version dokka-version = "1.9.10" -aws-kotlin-repo-tools-version = "0.4.25-kn" +aws-kotlin-repo-tools-version = "0.4.26-kn" # libs coroutines-version = "1.9.0" @@ -12,8 +12,8 @@ atomicfu-version = "0.25.0" binary-compatibility-validator-version = "0.16.3" # smithy-kotlin codegen and runtime are versioned separately -smithy-kotlin-runtime-version = "1.4.11" -smithy-kotlin-codegen-version = "0.34.11" +smithy-kotlin-runtime-version = "1.4.12" +smithy-kotlin-codegen-version = "0.34.12" # codegen smithy-version = "1.53.0" diff --git a/settings.gradle.kts b/settings.gradle.kts index dc2418bfe1d..6dc165db0f0 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -57,7 +57,6 @@ include(":tests:codegen:event-stream") include(":tests:codegen:rules-engine") include(":tests:e2e-test-util") include(":tests:codegen:smoke-tests") -include(":tests:codegen:smoke-tests:services") include(":tests:codegen:checksums") // generated services @@ -73,13 +72,6 @@ file("services").listFiles().forEach { } } -// generated services by smoke tests test suite -file("tests/codegen/smoke-tests/services").listFiles().forEach { - if (it.isServiceDir) { - include(":tests:codegen:smoke-tests:services:${it.name}") - } -} - if ("dynamodb".isBootstrappedService) { include(":hll:dynamodb-mapper") include(":hll:dynamodb-mapper:dynamodb-mapper") diff --git a/tests/benchmarks/service-benchmarks/README.md b/tests/benchmarks/service-benchmarks/README.md index 9928835fc1d..22acb88288c 100644 --- a/tests/benchmarks/service-benchmarks/README.md +++ b/tests/benchmarks/service-benchmarks/README.md @@ -6,10 +6,7 @@ are complete ## Instructions -To run the benchmarks: -* `./gradlew :tests:benchmarks:service-benchmarks:bootstrapAll` - This ensures that all the required service clients are bootstrapped and ready to be built. **You only need to do this - once** in your workspace unless you clean up generated services or make a change to codegen. +Ensure all services, including `iam`, have been generated before proceeding with the benchmarks. To run the benchmarks: * `./gradlew build` This builds the whole SDK. * `./gradlew :tests:benchmarks:service-benchmarks:run` diff --git a/tests/codegen/build.gradle.kts b/tests/codegen/build.gradle.kts index 1b6c585df92..ce3df2cdb9b 100644 --- a/tests/codegen/build.gradle.kts +++ b/tests/codegen/build.gradle.kts @@ -19,12 +19,6 @@ subprojects { } } - /* - Don't apply the rest of the configuration to the code generated smoke test services! - Those use the KMP plugin not JVM. - */ - if (project.path.startsWith(":tests:codegen:smoke-tests:services")) return@subprojects - apply(plugin = libraries.plugins.aws.kotlin.repo.tools.smithybuild.get().pluginId) apply(plugin = libraries.plugins.kotlin.jvm.get().pluginId) diff --git a/tests/codegen/smoke-tests/build.gradle.kts b/tests/codegen/smoke-tests/build.gradle.kts index e6298681c91..0cb583f8e7e 100644 --- a/tests/codegen/smoke-tests/build.gradle.kts +++ b/tests/codegen/smoke-tests/build.gradle.kts @@ -3,43 +3,40 @@ * SPDX-License-Identifier: Apache-2.0 */ -import aws.sdk.kotlin.gradle.codegen.dsl.generateSmithyProjections import aws.sdk.kotlin.gradle.codegen.dsl.smithyKotlinPlugin import aws.sdk.kotlin.gradle.codegen.smithyKotlinProjectionPath +import aws.sdk.kotlin.gradle.codegen.smithyKotlinProjectionSrcDir import aws.sdk.kotlin.tests.codegen.CodegenTest import aws.sdk.kotlin.tests.codegen.Model description = "AWS SDK for Kotlin's smoke test codegen test suite" -dependencies { - testImplementation(gradleTestKit()) -} - val tests = listOf( CodegenTest("successService", Model("smoke-tests-success.smithy"), "smithy.kotlin.traits#SuccessService"), CodegenTest("failureService", Model("smoke-tests-failure.smithy"), "smithy.kotlin.traits#FailureService"), CodegenTest("exceptionService", Model("smoke-tests-exception.smithy"), "smithy.kotlin.traits#ExceptionService"), ) -configureProjections() -configureTasks() +smithyBuild { + val basePackage = "aws.sdk.kotlin.test.codegen.smoketest" + + projections { + tests.forEach { test -> + create(test.name) { + val modelPath = layout.projectDirectory.file(test.model.path + test.model.fileName).asFile.absolutePath + imports = listOf(modelPath) -fun configureProjections() { - smithyBuild { - this@Build_gradle.tests.forEach { test -> - projections.register(test.name) { - imports = listOf(layout.projectDirectory.file(test.model.path + test.model.fileName).asFile.absolutePath) smithyKotlinPlugin { serviceShapeId = test.serviceShapeId - packageName = "aws.sdk.kotlin.test.${test.name.lowercase()}" - packageVersion = "1.0" + packageName = "$basePackage.${test.name}" + packageVersion = project.version.toString() + sdkId = test.name.replaceFirstChar { it.uppercaseChar() } buildSettings { - generateFullProject = false generateDefaultBuildFiles = false - optInAnnotations = listOf( - "aws.smithy.kotlin.runtime.InternalApi", - "aws.sdk.kotlin.runtime.InternalSdkApi", - ) + generateFullProject = false + } + apiSettings { + visibility = "internal" } } } @@ -47,40 +44,12 @@ fun configureProjections() { } } -fun configureTasks() { - tasks.register("stageServices") { - dependsOn(tasks.generateSmithyProjections) - doLast { - this@Build_gradle.tests.forEach { test -> - val projectionPath = smithyBuild.smithyKotlinProjectionPath(test.name).get() - val destinationPath = layout.projectDirectory.asFile.absolutePath + "/services/${test.name}" - - copy { - from("$projectionPath/src") - into("$destinationPath/generated-src") - } - - copy { - from("$projectionPath/build.gradle.kts") - into(destinationPath) - } - } - } - } +kotlin.sourceSets.getByName("test") { + smithyBuild.projections.forEach { projection -> + // Add generated model to source set + kotlin.srcDir(smithyBuild.smithyKotlinProjectionSrcDir(projection.name)) - tasks.withType